Esempio n. 1
0
    def render(self, generate, session, logger, personality, archetype,
               pre_feature, post_feature, param_tmpl, **kwargs):
        dbpersonality = Personality.get_unique(session, archetype=archetype,
                                               name=personality, compel=True)

        plenary = PlenaryPersonalityBase(dbpersonality, logger=logger)
        if pre_feature:
            plenary = PlenaryPersonalityPreFeature(dbpersonality, logger=logger)

        if post_feature:
            plenary = PlenaryPersonalityPostFeature(dbpersonality, logger=logger)

        if param_tmpl:
            param_templates = get_parameters_by_tmpl(dbpersonality)
            if param_tmpl in param_templates.keys():
                plenary = PlenaryPersonalityParameter(dbpersonality, param_tmpl,
                                                      param_templates[param_tmpl],
                                                      logger=logger)
            else:
                raise NotFoundException("No parameter template %s%s found." %
                                        (param_tmpl, TEMPLATE_EXTENSION))

        lines = []
        if generate:
            lines.append(plenary._generate_content())
        else:
            lines.append(plenary.read())

        return lines
Esempio n. 2
0
    def render(self, session, logger, personality, archetype, vmhost_capacity_function,
               vmhost_overcommit_memory, cluster_required, config_override,
               host_environment, grn, eon_id, leave_existing, **arguments):
        dbpersona = Personality.get_unique(session, name=personality,
                                           archetype=archetype, compel=True)

        if vmhost_capacity_function is not None or \
                vmhost_overcommit_memory is not None:
            if not "esx" in dbpersona.cluster_infos:
                dbpersona.cluster_infos["esx"] = PersonalityESXClusterInfo()

        if vmhost_capacity_function:
            # Basic sanity tests to see if the function works
            try:
                global_vars = {'__builtins__': restricted_builtins}
                local_vars = {'memory': 10}
                capacity = eval(vmhost_capacity_function, global_vars,
                                local_vars)
            except Exception, err:
                raise ArgumentError("Failed to evaluate the function: %s" % err)
            if not isinstance(capacity, dict):
                raise ArgumentError("The function should return a dictonary.")
            for name, value in capacity.items():
                if not isinstance(name, str) or (not isinstance(value, int) and
                                                 not isinstance(value, float)):
                    raise ArgumentError("The function should return a dictionary "
                                        "with all keys being strings, and all "
                                        "values being numbers.")

            # TODO: Should this be mandatory? It is for now.
            if "memory" not in capacity:
                raise ArgumentError("The memory constraint is missing from "
                                    "the returned dictionary.")

            dbpersona.cluster_infos["esx"].vmhost_capacity_function = vmhost_capacity_function
Esempio n. 3
0
    def render(
        self, session, gateway, ip, netmask, prefixlen, network_environment, archetype, personality, **arguments
    ):
        dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment)
        dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env)

        if netmask:
            dest = IPv4Network("%s/%s" % (ip, netmask))
        else:
            dest = IPv4Network("%s/%s" % (ip, prefixlen))

        if personality:
            dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True)
        else:
            dbpersonality = None

        q = session.query(StaticRoute)
        q = q.filter_by(network=dbnetwork)
        q = q.filter_by(gateway_ip=gateway)
        q = q.filter_by(dest_ip=dest.ip)
        q = q.filter_by(dest_cidr=dest.prefixlen)
        q = q.filter_by(personality=dbpersonality)

        try:
            dbroute = q.one()
        except NoResultFound:
            raise NotFoundException("Static Route to {0} using gateway {1} " "not found.".format(dest, gateway))

        session.delete(dbroute)
        session.flush()

        # TODO: refresh affected host templates
        return
Esempio n. 4
0
    def render(self, generate, session, logger, personality, archetype,
               pre_feature, post_feature, param_tmpl, **kwargs):
        dbpersonality = Personality.get_unique(session,
                                               archetype=archetype,
                                               name=personality,
                                               compel=True)

        plenary = PlenaryPersonalityBase(dbpersonality, logger=logger)
        if pre_feature:
            plenary = PlenaryPersonalityPreFeature(dbpersonality,
                                                   logger=logger)

        if post_feature:
            plenary = PlenaryPersonalityPostFeature(dbpersonality,
                                                    logger=logger)

        if param_tmpl:
            param_templates = get_parameters_by_tmpl(dbpersonality)
            if param_tmpl in param_templates.keys():
                plenary = PlenaryPersonalityParameter(
                    dbpersonality,
                    param_tmpl,
                    param_templates[param_tmpl],
                    logger=logger)
            else:
                raise NotFoundException("No parameter template %s%s found." %
                                        (param_tmpl, TEMPLATE_EXTENSION))

        lines = []
        if generate:
            lines.append(plenary._generate_content())
        else:
            lines.append(plenary.read())

        return lines
Esempio n. 5
0
    def render(self, generate, session, logger, personality, archetype,
               pre_feature, post_feature, param_tmpl, **kwargs):
        dbpersonality = Personality.get_unique(session, archetype=archetype,
                                               name=personality, compel=True)

        if pre_feature:
            plenary = PlenaryPersonalityPreFeature.get_plenary(dbpersonality,
                                                               logger=logger)
        elif post_feature:
            plenary = PlenaryPersonalityPostFeature.get_plenary(dbpersonality,
                                                                logger=logger)
        elif param_tmpl:
            param_templates = get_parameters_by_tmpl(dbpersonality)

            if param_tmpl in param_templates.keys():
                values = param_templates[param_tmpl]
            else:
                values = {}

            ptmpl = ParameterTemplate(dbpersonality, param_tmpl, values)
            plenary = PlenaryPersonalityParameter(ptmpl, logger=logger)
        else:
            plenary = PlenaryPersonalityBase.get_plenary(dbpersonality,
                                                         logger=logger)

        if generate:
            return plenary._generate_content()
        else:
            return plenary.read()
Esempio n. 6
0
def validate_personality_config(session, archetype, personality):
    """
        Validates all the parameters on personality to validate
        if all required parameters have been set. All feature
        parameters are also validated.
    """
    dbpersonality = None
    if isinstance(personality, Personality):
        dbpersonality = personality
    else:
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
    dbarchetype = dbpersonality.archetype

    error = []
    ## validate parameters
    param_definitions = []
    parameters = get_parameters(session, personality=dbpersonality)
    if dbarchetype.paramdef_holder:
        param_definitions = dbarchetype.paramdef_holder.param_definitions
        error += validate_required_parameter(param_definitions, parameters)

    ## features  for personalities
    for link in dbarchetype.features + dbpersonality.features:
        param_definitions = []
        if link.feature.paramdef_holder:
            param_definitions = link.feature.paramdef_holder.param_definitions
            tmp_error = validate_required_parameter(param_definitions, parameters, link)
            if tmp_error:
                error.append("Feature Binding : %s" % link.feature)
                error += tmp_error
    return error
Esempio n. 7
0
def validate_personality_config(session, archetype, personality):
    """
        Validates all the parameters on personality to validate
        if all required parameters have been set. All feature
        parameters are also validated.
    """
    dbpersonality = None
    if isinstance(personality, Personality):
        dbpersonality = personality
    else:
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=archetype,
                                               compel=True)
    dbarchetype = dbpersonality.archetype

    error = []
    ## validate parameters
    param_definitions = []
    parameters = get_parameters(session, personality=dbpersonality)
    if dbarchetype.paramdef_holder:
        param_definitions = dbarchetype.paramdef_holder.param_definitions
        error += validate_required_parameter(param_definitions, parameters)

    ## features  for personalities
    for link in dbarchetype.features + dbpersonality.features:
        param_definitions = []
        if link.feature.paramdef_holder:
            param_definitions = link.feature.paramdef_holder.param_definitions
            tmp_error = validate_required_parameter(param_definitions,
                                                    parameters, link)
            if tmp_error:
                error.append("Feature Binding : %s" % link.feature)
                error += tmp_error
    return error
Esempio n. 8
0
    def render(self, session, archetype, personality, other,
               other_archetype, **arguments):

        dbpersona = Personality.get_unique(session, name=personality,
                                           archetype=archetype, compel=True)

        if not other_archetype:
            other_archetype = archetype

        db_other_persona = Personality.get_unique(session, name=other,
                                           archetype=other_archetype, compel=True)

        ret = defaultdict(dict)
        self.populate_data(session, dbpersona, "my", ret)
        self.populate_data(session, db_other_persona, "other", ret)

        return DiffData(dbpersona, db_other_persona, ret)
 def render(self, session, service, archetype, personality, **arguments):
     dbpersonality = Personality.get_unique(session, name=personality,
                                            archetype=archetype, compel=True)
     dbservice = Service.get_unique(session, service, compel=True)
     if dbpersonality in dbservice.personalities:
         raise ArgumentError("Service %s is already required by personality "
                             "%s, archetype %s." % (service, personality,
                                                    archetype))
     dbservice.personalities.append(dbpersonality)
     return
Esempio n. 10
0
 def render(self, session, service, archetype, personality, **arguments):
     dbpersonality = Personality.get_unique(session,
                                            name=personality,
                                            archetype=archetype,
                                            compel=True)
     dbservice = Service.get_unique(session, service, compel=True)
     if dbpersonality in dbservice.personalities:
         raise ArgumentError(
             "Service %s is already required by personality "
             "%s, archetype %s." % (service, personality, archetype))
     dbservice.personalities.append(dbpersonality)
     return
Esempio n. 11
0
 def render(self, session, service, archetype, personality, **arguments):
     dbpersonality = Personality.get_unique(session, name=personality,
                                            archetype=archetype, compel=True)
     dbservice = Service.get_unique(session, service, compel=True)
     try:
         dbservice.personalities.remove(dbpersonality)
     except ValueError:
         raise NotFoundException("Service %s required for archetype "
                                 "%s, personality %s not found." %
                                 (service, archetype, personality))
     session.flush()
     return
Esempio n. 12
0
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError(
                "{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        # Will need to write a cluster plenary and either write or
        # remove a host plenary.  Grab the domain key since the two
        # must be in the same domain.
        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)
            cluster_plenary.write(locked=True)
            try:
                host_plenary.write(locked=True)
            except IncompleteError:
                host_plenary.cleanup(domain=dbhost.branch.name, locked=True)
        except:
            cluster_plenary.restore_stash()
            host_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Esempio n. 13
0
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError("{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        # Will need to write a cluster plenary and either write or
        # remove a host plenary.  Grab the domain key since the two
        # must be in the same domain.
        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)
            cluster_plenary.write(locked=True)
            try:
                host_plenary.write(locked=True)
            except IncompleteError:
                host_plenary.cleanup(domain=dbhost.branch.name, locked=True)
        except:
            cluster_plenary.restore_stash()
            host_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Esempio n. 14
0
    def render(self, session, logger, personality, archetype, **arguments):
        dbpersona = Personality.get_unique(session, name=personality,
                                           archetype=archetype, compel=True)

        dbhost = session.query(Host).filter_by(personality=dbpersona).first()
        dbcls = session.query(Cluster).filter_by(personality=dbpersona).first()
        if dbhost or dbcls:
            raise ArgumentError("{0} is still in use and cannot be deleted."
                                .format(dbpersona))

        plenary = PlenaryPersonality(dbpersona, logger=logger)
        session.delete(dbpersona)
        session.flush()
        plenary.remove()

        return
Esempio n. 15
0
    def render(self, session, logger, personality, archetype, **arguments):
        dbpersona = Personality.get_unique(session, name=personality,
                                           archetype=archetype, compel=True)

        dbhost = session.query(Host).filter_by(personality=dbpersona).first()
        dbcls = session.query(Cluster).filter_by(personality=dbpersona).first()
        if dbhost or dbcls:
            raise ArgumentError("{0} is still in use and cannot be deleted."
                                .format(dbpersona))

        plenary = Plenary.get_plenary(dbpersona, logger=logger)
        session.delete(dbpersona)
        session.flush()
        plenary.remove()

        return
Esempio n. 16
0
def get_parameter_holder(session, archetype=None, personality=None,
                         auto_include=False):
    db_param_holder = None
    dbpersonality = None
    if isinstance(personality, Personality):
        dbpersonality = personality
    else:
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
    db_param_holder = dbpersonality.paramholder

    if db_param_holder is None and auto_include:
        db_param_holder = PersonalityParameter(personality=dbpersonality)
        session.add(db_param_holder)

    return db_param_holder
Esempio n. 17
0
    def render(self, session, service, instance, archetype, personality,
               networkip, **kwargs):

        dbservice = Service.get_unique(session, service, compel=True)
        dblocation = get_location(session, **kwargs)
        dbinstance = get_service_instance(session, dbservice, instance)

        if networkip:
            dbnet_env = NetworkEnvironment.get_unique_or_default(session)
            dbnetwork = get_network_byip(session, networkip, dbnet_env)
        else:
            dbnetwork = None

        if archetype is None and personality:
            # Can't get here with the standard aq client.
            raise ArgumentError("Specifying --personality requires you to "
                                "also specify --archetype.")

        kwargs = {}
        if archetype and personality:
            dbpersona = Personality.get_unique(session,
                                               name=personality,
                                               archetype=archetype,
                                               compel=True)

            map_class = PersonalityServiceMap
            query = session.query(map_class).filter_by(personality=dbpersona)

            kwargs["personality"] = dbpersona
        else:
            map_class = ServiceMap
            query = session.query(map_class)

        dbmap = query.filter_by(location=dblocation,
                                service_instance=dbinstance,
                                network=dbnetwork).first()

        if not dbmap:
            dbmap = map_class(service_instance=dbinstance,
                              location=dblocation,
                              network=dbnetwork,
                              **kwargs)

        session.add(dbmap)
        session.flush()

        return
Esempio n. 18
0
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError("{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        with CompileKey.merge([host_plenary.get_key(),
                              cluster_plenary.get_key()]):
            try:
                cluster_plenary.write(locked=True)
                try:
                    host_plenary.write(locked=True)
                except IncompleteError:
                    host_plenary.remove(locked=True)
            except:
                cluster_plenary.restore_stash()
                host_plenary.restore_stash()
                raise
Esempio n. 19
0
    def render(self, session, service, instance, archetype, personality,
               networkip, **kwargs):

        dbservice = Service.get_unique(session, service, compel=True)
        dblocation = get_location(session, **kwargs)
        dbinstance = get_service_instance(session, dbservice, instance)

        if networkip:
            dbnet_env = NetworkEnvironment.get_unique_or_default(session)
            dbnetwork = get_network_byip(session, networkip, dbnet_env)
        else:
            dbnetwork = None

        if archetype is None and personality:
            # Can't get here with the standard aq client.
            raise ArgumentError("Specifying --personality requires you to "
                                "also specify --archetype.")

        kwargs = {}
        if archetype and personality:
            dbpersona = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)

            map_class = PersonalityServiceMap
            query = session.query(map_class).filter_by(personality=dbpersona)

            kwargs["personality"] = dbpersona
        else:
            map_class = ServiceMap
            query = session.query(map_class)

        dbmap = query.filter_by(location=dblocation,
                                service_instance=dbinstance,
                                network=dbnetwork).first()

        if not dbmap:
            dbmap = map_class(service_instance=dbinstance,
                              location=dblocation,
                              network=dbnetwork, **kwargs)

        session.add(dbmap)
        session.flush()

        return
Esempio n. 20
0
    def render(self, session, logger, target, grn, eon_id, hostname, list, personality,
               archetype, **arguments):
        dbgrn = lookup_grn(session, grn, eon_id, logger=logger,
                           config=self.config)

        plenaries = PlenaryCollection(logger=logger)

        if hostname:
            objs = [hostname_to_host(session, hostname)]
            config_key = "host_grn_targets"
        elif list:
            check_hostlist_size(self.command, self.config, list)
            objs = hostlist_to_hosts(session, list)
            config_key = "host_grn_targets"
        elif personality:
            objs = [Personality.get_unique(session, name=personality,
                                           archetype=archetype, compel=True)]
            config_key = "personality_grn_targets"

        for obj in objs:
            section = "archetype_" + obj.archetype.name

            if self.config.has_option(section, config_key):
                valid_targets = [s.strip() for s in
                                 self.config.get(section, config_key).split(",")]
            else:
                raise ArgumentError("{0} has no valid GRN targets configured."
                                    .format(obj.archetype))

            if target not in valid_targets:
                raise ArgumentError("Invalid target %s for archetype %s, please "
                                    "choose from: %s." %
                                    (target, obj.archetype.name,
                                     ", ".join(valid_targets)))

            plenaries.append(Plenary.get_plenary(obj))
            self._update_dbobj(obj, target, dbgrn)

        session.flush()

        plenaries.write()

        return
Esempio n. 21
0
    def render(self, session, logger, domain, sandbox, archetype, personality,
               pancinclude, pancexclude, pancdebug, cleandeps,
               **arguments):
        dbdomain = None
        dbauthor = None
        if domain or sandbox:
            (dbdomain, dbauthor) = get_branch_and_author(session, logger,
                                                         domain=domain,
                                                         sandbox=sandbox,
                                                         compel=True)

        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if pancdebug:
            pancinclude = r'.*'
            pancexclude = r'components/spma/functions'

        q = session.query(Host)
        q = q.filter_by(personality=dbpersonality)
        if dbdomain:
            q = q.filter_by(branch=dbdomain)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        host_list = q.all()

        if not host_list:
            return

        # If the domain was not specified, set it to the domain of first host
        dbdomain, dbauthor = validate_branch_author(host_list)

        plenaries = PlenaryCollection(logger=logger)
        for host in host_list:
            plenaries.append(Plenary.get_plenary(host))

        dom = TemplateDomain(dbdomain, dbauthor, logger=logger)
        with plenaries.get_key():
            dom.compile(session, only=plenaries.object_templates,
                        panc_debug_include=pancinclude,
                        panc_debug_exclude=pancexclude,
                        cleandeps=cleandeps, locked=True)
        return
Esempio n. 22
0
    def render(self, session, archetype, personality, cluster,
               **kwargs):
        dbpers = Personality.get_unique(session, name=personality,
                                        archetype=archetype, compel=True)
        dbclus = Cluster.get_unique(session, cluster, compel=True)
        if len(dbclus.allowed_personalities) > 1:
            for host in dbclus.hosts:
                if host.personality == dbpers:
                    raise ArgumentError("The cluster member %s has a "
                                        "personality of %s which is "
                                        "incompatible with this constraint." %
                                        (host.fqdn, host.personality))

        if dbpers in dbclus.allowed_personalities:
            dbclus.allowed_personalities.remove(dbpers)
            dbclus.validate()

        session.flush()

        return
Esempio n. 23
0
def get_parameter_holder(session,
                         archetype=None,
                         personality=None,
                         auto_include=False):
    db_param_holder = None
    dbpersonality = None
    if isinstance(personality, Personality):
        dbpersonality = personality
    else:
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=archetype,
                                               compel=True)
    db_param_holder = dbpersonality.paramholder

    if db_param_holder is None and auto_include:
        db_param_holder = PersonalityParameter(personality=dbpersonality)
        session.add(db_param_holder)

    return db_param_holder
Esempio n. 24
0
    def render(self, session, logger, personality, archetype,
               vmhost_capacity_function, vmhost_overcommit_memory,
               cluster_required, config_override, host_environment, grn,
               eon_id, leave_existing, **arguments):
        dbpersona = Personality.get_unique(session,
                                           name=personality,
                                           archetype=archetype,
                                           compel=True)

        if vmhost_capacity_function is not None or \
                vmhost_overcommit_memory is not None:
            if not "esx" in dbpersona.cluster_infos:
                dbpersona.cluster_infos["esx"] = PersonalityESXClusterInfo()

        if vmhost_capacity_function:
            # Basic sanity tests to see if the function works
            try:
                global_vars = {'__builtins__': restricted_builtins}
                local_vars = {'memory': 10}
                capacity = eval(vmhost_capacity_function, global_vars,
                                local_vars)
            except Exception, err:
                raise ArgumentError("Failed to evaluate the function: %s" %
                                    err)
            if not isinstance(capacity, dict):
                raise ArgumentError("The function should return a dictonary.")
            for name, value in capacity.items():
                if not isinstance(name, str) or (not isinstance(value, int) and
                                                 not isinstance(value, float)):
                    raise ArgumentError(
                        "The function should return a dictionary "
                        "with all keys being strings, and all "
                        "values being numbers.")

            # TODO: Should this be mandatory? It is for now.
            if "memory" not in capacity:
                raise ArgumentError("The memory constraint is missing from "
                                    "the returned dictionary.")

            dbpersona.cluster_infos[
                "esx"].vmhost_capacity_function = vmhost_capacity_function
Esempio n. 25
0
    def render(self, session, service, instance, archetype, personality,
               networkip, **arguments):
        dbservice = Service.get_unique(session, service, compel=True)
        dbinstance = ServiceInstance.get_unique(session,
                                                service=dbservice,
                                                name=instance,
                                                compel=True)
        dblocation = get_location(session, **arguments)

        if networkip:
            dbnet_env = NetworkEnvironment.get_unique_or_default(session)
            dbnetwork = get_network_byip(session, networkip, dbnet_env)
        else:
            dbnetwork = None

        if personality:
            if not archetype:
                # Can't get here with the standard aq client.
                raise ArgumentError("Specifying --personality requires you to "
                                    "also specify --archetype.")
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = session.query(PersonalityServiceMap)
            q = q.filter_by(personality=dbpersonality)
        else:
            q = session.query(ServiceMap)

        q = q.filter_by(location=dblocation,
                        service_instance=dbinstance,
                        network=dbnetwork)
        dbmap = q.first()

        if dbmap:
            session.delete(dbmap)
        session.flush()
        return
Esempio n. 26
0
    def render(self, session, logger, metacluster, personality, max_members,
               fix_location, high_availability, comments, **arguments):
        dbmetacluster = MetaCluster.get_unique(session, metacluster,
                                               compel=True)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmetacluster))

        if personality:
            archetype = dbmetacluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbmetacluster.personality = dbpersonality

        if max_members is not None:
            dbmetacluster.max_clusters = max_members

        if comments is not None:
            dbmetacluster.comments = comments

        if high_availability is not None:
            dbmetacluster.high_availability = high_availability

        # TODO update_cluster_location would update VMs. Metaclusters
        # will contain VMs in Vulcan2 model.
        update_cluster_location(session, logger, dbmetacluster, fix_location,
                                plenaries, **arguments)

        session.flush()
        dbmetacluster.validate()

        plenaries.write(locked=False)

        return
Esempio n. 27
0
 def render(self, session, logger, personality, archetype, domain, sandbox,
            **arguments):
     (dbbranch, dbauthor) = get_branch_and_author(session,
                                                  logger,
                                                  domain=domain,
                                                  sandbox=sandbox)
     if archetype and personality:
         dbpersonality = Personality.get_unique(session,
                                                name=personality,
                                                archetype=archetype,
                                                compel=True)
         if not dbbranch:
             return dbpersonality
         thresholds = self.get_threshold(dbpersonality, dbbranch, dbauthor)
         return ThresholdedPersonality(dbpersonality, thresholds)
     q = session.query(Personality)
     if archetype:
         dbarchetype = Archetype.get_unique(session, archetype, compel=True)
         q = q.filter_by(archetype=dbarchetype)
     if personality:
         q = q.filter_by(name=personality)
     q = q.join(Archetype)
     q = q.order_by(Archetype.name, Personality.name)
     q = q.options(contains_eager('archetype'), subqueryload('services'),
                   subqueryload('_grns'), subqueryload('features'),
                   joinedload('features.feature'),
                   joinedload('cluster_infos'))
     results = PersonalityList()
     if not dbbranch:
         results.extend(q.all())
         return results
     for dbpersonality in q.all():
         # In theory the results here could be inconsistent if the
         # domain is being written to.  In practice... it's not worth
         # taking out the compile lock to ensure consistency.
         thresholds = self.get_threshold(dbpersonality, dbbranch, dbauthor)
         results.append(ThresholdedPersonality(dbpersonality, thresholds))
     return results
Esempio n. 28
0
 def render(self, session, logger,
            personality, archetype, domain, sandbox, **arguments):
     (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                  domain=domain,
                                                  sandbox=sandbox)
     if archetype and personality:
         dbpersonality = Personality.get_unique(session, name=personality,
                                                archetype=archetype,
                                                compel=True)
         if not dbbranch:
             return dbpersonality
         thresholds = self.get_threshold(dbpersonality, dbbranch, dbauthor)
         return ThresholdedPersonality(dbpersonality, thresholds)
     q = session.query(Personality)
     if archetype:
         dbarchetype = Archetype.get_unique(session, archetype, compel=True)
         q = q.filter_by(archetype=dbarchetype)
     if personality:
         q = q.filter_by(name=personality)
     q = q.join(Archetype)
     q = q.order_by(Archetype.name, Personality.name)
     q = q.options(contains_eager('archetype'),
                   subqueryload('services'),
                   subqueryload('_grns'),
                   subqueryload('features'),
                   joinedload('features.feature'),
                   joinedload('cluster_infos'))
     results = PersonalityList()
     if not dbbranch:
         results.extend(q.all())
         return results
     for dbpersonality in q.all():
         # In theory the results here could be inconsistent if the
         # domain is being written to.  In practice... it's not worth
         # taking out the compile lock to ensure consistency.
         thresholds = self.get_threshold(dbpersonality, dbbranch, dbauthor)
         results.append(ThresholdedPersonality(dbpersonality, thresholds))
     return results
Esempio n. 29
0
    def render(self, session, service, instance, archetype, personality,
               networkip, **arguments):
        dbservice = Service.get_unique(session, service, compel=True)
        dbinstance = ServiceInstance.get_unique(session, service=dbservice,
                                                name=instance, compel=True)
        dblocation = get_location(session, **arguments)

        if networkip:
            dbnet_env = NetworkEnvironment.get_unique_or_default(session)
            dbnetwork = get_network_byip(session, networkip, dbnet_env)
        else:
            dbnetwork = None

        if personality:
            if not archetype:
                # Can't get here with the standard aq client.
                raise ArgumentError("Specifying --personality requires you to "
                                    "also specify --archetype.")
            dbarchetype = Archetype.get_unique(session, archetype,
                                               compel=True)
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = session.query(PersonalityServiceMap)
            q = q.filter_by(personality=dbpersonality)
        else:
            q = session.query(ServiceMap)

        q = q.filter_by(location=dblocation, service_instance=dbinstance,
                        network=dbnetwork)
        dbmap = q.first()

        if dbmap:
            session.delete(dbmap)
        session.flush()
        return
Esempio n. 30
0
    def render(self, session, logger, target, grn, eon_id, hostname, list, personality,
               archetype, **arguments):
        dbgrn = lookup_grn(session, grn, eon_id, logger=logger,
                           config=self.config)

        target_type = "personality" if personality else "host"

        if hostname:
            objs = [hostname_to_host(session, hostname)]
        elif list:
            check_hostlist_size(self.command, self.config, list)
            objs = hostlist_to_hosts(session, list)
        elif personality:
            objs = [Personality.get_unique(session, name=personality,
                                           archetype=archetype, compel=True)]

        for obj in objs:
            # INFO: Fails for archetypes other than 'aquilon', 'vmhost'
            valid_targets = self.config.get("archetype_" + obj.archetype.name,
                                            target_type + "_grn_targets")

            if target not in map(lambda s: s.strip(), valid_targets.split(",")):
                raise ArgumentError("Invalid %s target %s for archetype %s, please "
                                    "choose from %s" % (target_type, target,
                                                        obj.archetype.name,
                                                        valid_targets))

            self._update_dbobj(obj, target, dbgrn)

        session.flush()

        if personality:
            plenary = PlenaryPersonality(objs[0], logger=logger)
            plenary.write()

        return
Esempio n. 31
0
    def render(self, session, logger, target, hostname, list, personality,
               archetype, **arguments):

        target_type = "personality" if personality else "host"

        if hostname:
            objs = [hostname_to_host(session, hostname)]
        elif list:
            check_hostlist_size(self.command, self.config, list)
            objs = hostlist_to_hosts(session, list)
        elif personality:
            objs = [Personality.get_unique(session, name=personality,
                                           archetype=archetype, compel=True)]

        plenaries = PlenaryCollection(logger=logger)
        for obj in objs:
            # INFO: Fails for archetypes other than 'aquilon', 'vmhost'
            valid_targets = self.config.get("archetype_" + obj.archetype.name,
                                            target_type + "_grn_targets")

            if target not in [s.strip() for s in valid_targets.split(",")]:
                raise ArgumentError("Invalid %s target %s for archetype %s, please "
                                    "choose from %s" % (target_type, target,
                                                        obj.archetype.name,
                                                        valid_targets))

            for grn_rec in obj._grns[:]:
                if target == grn_rec.target:
                    obj._grns.remove(grn_rec)
            plenaries.append(Plenary.get_plenary(obj))

        session.flush()

        plenaries.write()

        return
Esempio n. 32
0
    def render(self, session, logger, hostname, osname, osversion, archetype,
               personality, buildstatus, keepbindings, grn, eon_id,
               **arguments):
        dbhost = hostname_to_host(session, hostname)

        # Currently, for the Host to be created it *must* be associated with
        # a Machine already.  If that ever changes, need to check here and
        # bail if dbhost.machine does not exist.

        if archetype and archetype != dbhost.archetype.name:
            if not personality:
                raise ArgumentError("Changing archetype also requires "
                                    "specifying --personality.")
        if personality:
            if archetype:
                dbarchetype = Archetype.get_unique(session,
                                                   archetype,
                                                   compel=True)
                if dbarchetype.cluster_type is not None:
                    raise ArgumentError("Archetype %s is a cluster archetype" %
                                        dbarchetype.name)
            else:
                dbarchetype = dbhost.archetype

            if not osname and not osversion and \
               dbhost.operating_system.archetype != dbarchetype:
                raise ArgumentError("{0} belongs to {1:l}, not {2:l}.  Please "
                                    "specify --osname/--osversion.".format(
                                        dbhost.operating_system,
                                        dbhost.operating_system.archetype,
                                        dbarchetype))

            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbarchetype,
                                                   compel=True)
            if dbhost.cluster and dbhost.cluster.allowed_personalities and \
               dbpersonality not in dbhost.cluster.allowed_personalities:
                allowed = [
                    "%s/%s" % (p.archetype.name, p.name)
                    for p in dbhost.cluster.allowed_personalities
                ]
                raise ArgumentError("The {0:l} is not allowed by {1}.  "
                                    "Specify one of {2}.".format(
                                        dbpersonality, dbhost.cluster,
                                        allowed))

            dbhost.personality = dbpersonality

        if not osname:
            osname = dbhost.operating_system.name
        if osname and osversion:
            dbos = OperatingSystem.get_unique(session,
                                              name=osname,
                                              version=osversion,
                                              archetype=dbhost.archetype,
                                              compel=True)
            # Hmm... no cluster constraint here...
            dbhost.operating_system = dbos
        elif osname != dbhost.operating_system.name:
            raise ArgumentError("Please specify a version to use for OS %s." %
                                osname)

        if buildstatus:
            dbstatus = HostLifecycle.get_unique(session,
                                                buildstatus,
                                                compel=True)
            dbhost.status.transition(dbhost, dbstatus)

        if grn or eon_id:
            dbgrn = lookup_grn(session,
                               grn,
                               eon_id,
                               logger=logger,
                               config=self.config)
            dbhost.owner_grn = dbgrn

        session.flush()

        if dbhost.archetype.is_compileable:
            self.compile(session, dbhost, logger, keepbindings)

        return
Esempio n. 33
0
    def render(self, session, logger, cluster, personality,
               max_members, fix_location, down_hosts_threshold,
               maint_threshold, comments,
               # ESX specific options
               switch, memory_capacity, clear_overrides, vm_to_host_ratio,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        self.check_cluster_type(dbcluster, forbid=MetaCluster)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if vm_to_host_ratio:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            dbcluster.vm_count = vm_count
            dbcluster.host_count = host_count

        if switch is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbnetdev = NetworkDevice.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbnetdev))
            else:
                dbnetdev = None
            dbcluster.network_device = dbnetdev

        if memory_capacity is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            dbcluster.memory_capacity = memory_capacity

        if clear_overrides is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            dbcluster.memory_capacity = None

        update_cluster_location(session, logger, dbcluster, fix_location,
                                plenaries, **arguments)

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality

        if max_members is not None:
            # Allow removing the restriction
            if max_members < 0:
                max_members = None
            dbcluster.max_hosts = max_members

        if comments is not None:
            dbcluster.comments = comments

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)

        session.flush()
        dbcluster.validate()

        plenaries.write(locked=False)

        return
Esempio n. 34
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_basic("cluster", cluster)
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=archetype,
                                               compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding clusters to {0:l} is not allowed.".format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section,
                                                 "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype,
                                                 "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {
            'name': cluster,
            'location_constraint': dbloc,
            'personality': dbpersonality,
            'max_hosts': max_members,
            'branch': dbbranch,
            'sandbox_author': dbauthor,
            'down_hosts_threshold': dht,
            'down_hosts_percent': down_hosts_pct,
            'status': dbstatus,
            'comments': comments
        }

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'switch'):
            kw['switch'] = Switch.get_unique(session, switch, compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.validate_membership(dbcluster)
            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()
        session.refresh(dbcluster)

        plenaries.append(Plenary.get_plenary(dbcluster))

        key = plenaries.get_write_key()

        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Esempio n. 35
0
    def render(
            self,
            session,
            logger,
            # search_cluster
            archetype,
            cluster_type,
            personality,
            domain,
            sandbox,
            branch,
            buildstatus,
            allowed_archetype,
            allowed_personality,
            down_hosts_threshold,
            down_maint_threshold,
            max_members,
            member_archetype,
            member_hostname,
            member_personality,
            capacity_override,
            cluster,
            esx_guest,
            instance,
            esx_metacluster,
            service,
            share,
            esx_share,
            esx_switch,
            esx_virtual_machine,
            fullinfo,
            style,
            **arguments):

        if esx_share:
            self.deprecated_option("esx_share",
                                   "Please use --share instead.",
                                   logger=logger,
                                   **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session,
                                                        buildstatus,
                                                        compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session,
                                      esx_virtual_machine,
                                      compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session,
                                                  name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session,
                                        allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session,
                                          archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session,
                                          archetype=dbma,
                                          name=member_personality,
                                          compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())
Esempio n. 36
0
    def render(self, session, logger, metacluster, archetype, personality,
               domain, sandbox, max_members, buildstatus, comments,
               **arguments):

        validate_nlist_key("metacluster", metacluster)

        # this should be reverted when virtbuild supports these options
        if not archetype:
            archetype = "metacluster"

        if not personality:
            personality = "metacluster"

        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_instance(session, buildstatus)

        # this should be reverted when virtbuild supports these options
        if not domain and not sandbox:
            domain = self.config.get("archetype_metacluster", "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=False)

        dbloc = get_location(session, **arguments)

        # this should be reverted when virtbuild supports this option
        if not dbloc:
            dbloc = Location.get_unique(session,
                                        name=self.config.get("archetype_metacluster",
                                                             "location_name"),
                                        location_type=self.config.get("archetype_metacluster",
                                                                      "location_type"))
        elif not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            max_members = self.config.getint("archetype_metacluster",
                                             "max_members_default")

        if metacluster.strip().lower() == 'global':
            raise ArgumentError("Metacluster name global is reserved.")

        MetaCluster.get_unique(session, metacluster, preclude=True)

        dbcluster = MetaCluster(name=metacluster, location_constraint=dbloc,
                                personality=dbpersonality,
                                max_clusters=max_members,
                                branch=dbbranch, sandbox_author=dbauthor,
                                status=dbstatus, comments=comments)

        session.add(dbcluster)
        session.flush()

        plenary = Plenary.get_plenary(dbcluster, logger=logger)
        plenary.write()

        return
Esempio n. 37
0
    def render(self, session, logger, feature, archetype, personality, model,
               vendor, interface, justification, user, **arguments):

        # Binding a feature to a named interface makes sense in the scope of a
        # personality, but not for a whole archetype.
        if interface and not personality:
            raise ArgumentError("Binding to a named interface needs "
                                "a personality.")

        q = session.query(Personality)
        dbarchetype = None

        feature_type = "host"

        justification_required = True

        # Warning: order matters here!
        params = {}
        if personality:
            justification_required = False
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            params["personality"] = dbpersonality
            if interface:
                params["interface_name"] = interface
                feature_type = "interface"
            dbarchetype = dbpersonality.archetype
            q = q.filter_by(archetype=dbarchetype)
            q = q.filter_by(name=personality)
        elif archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            params["archetype"] = dbarchetype
            q = q.filter_by(archetype=dbarchetype)
        else:
            # It's highly unlikely that a feature template would work for
            # _any_ archetype, so disallow this case for now. As I can't
            # rule out that such a case will not have some uses in the
            # future, the restriction is here and not in the model.
            raise ArgumentError("Please specify either an archetype or "
                                "a personality when binding a feature.")

        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                        compel=True)

            if dbmodel.machine_type == "nic":
                feature_type = "interface"
            else:
                feature_type = "hardware"

            params["model"] = dbmodel

        if dbarchetype and not dbarchetype.is_compileable:
            raise UnimplementedError("Binding features to non-compilable "
                                     "archetypes is not implemented.")

        if not feature_type:  # pragma: no cover
            raise InternalError("Feature type is not known.")

        dbfeature = Feature.get_unique(session, name=feature,
                                       feature_type=feature_type, compel=True)

        cnt = q.count()
        # TODO: should the limit be configurable?
        if justification_required and cnt > 0:
            if not justification:
                raise AuthorizationException(
                      "Changing feature bindings for more "
                      "than just a personality requires --justification.")
            validate_justification(user, justification)

        self.do_link(session, logger, dbfeature, params)
        session.flush()

        idx = 0
        written = 0
        successful = []
        failed = []

        with CompileKey(logger=logger):
            personalities = q.all()

            for personality in personalities:
                idx += 1
                if idx % 1000 == 0:  # pragma: no cover
                    logger.client_info("Processing personality %d of %d..." %
                                       (idx, cnt))

                if not personality.archetype.is_compileable:  # pragma: no cover
                    continue

                try:
                    plenary_personality = PlenaryPersonality(personality)
                    written += plenary_personality.write(locked=True)
                    successful.append(plenary_personality)
                except IncompleteError:
                    pass
                except Exception, err:  # pragma: no cover
                    failed.append("{0} failed: {1}".format(personality, err))

            if failed:  # pragma: no cover
                for plenary in successful:
                    plenary.restore_stash()
                raise PartialError([], failed)
Esempio n. 38
0
    def render(self, session, logger, feature, archetype, personality, model,
               vendor, interface, justification, user, **arguments):

        # Binding a feature to a named interface makes sense in the scope of a
        # personality, but not for a whole archetype.
        if interface and not personality:
            raise ArgumentError("Binding to a named interface needs "
                                "a personality.")

        q = session.query(Personality)
        dbarchetype = None

        feature_type = "host"

        justification_required = True

        # Warning: order matters here!
        params = {}
        if personality:
            justification_required = False
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            params["personality"] = dbpersonality
            if interface:
                params["interface_name"] = interface
                feature_type = "interface"
            dbarchetype = dbpersonality.archetype
            q = q.filter_by(archetype=dbarchetype)
            q = q.filter_by(name=personality)
        elif archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            params["archetype"] = dbarchetype
            q = q.filter_by(archetype=dbarchetype)
        else:
            # It's highly unlikely that a feature template would work for
            # _any_ archetype, so disallow this case for now. As I can't
            # rule out that such a case will not have some uses in the
            # future, the restriction is here and not in the model.
            raise ArgumentError("Please specify either an archetype or "
                                "a personality when binding a feature.")

        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)

            if dbmodel.model_type.isNic():
                feature_type = "interface"
            else:
                feature_type = "hardware"

            params["model"] = dbmodel

        if dbarchetype and not dbarchetype.is_compileable:
            raise UnimplementedError("Binding features to non-compilable "
                                     "archetypes is not implemented.")

        if not feature_type:  # pragma: no cover
            raise InternalError("Feature type is not known.")

        dbfeature = Feature.get_unique(session, name=feature,
                                       feature_type=feature_type, compel=True)

        cnt = q.count()
        # TODO: should the limit be configurable?
        if justification_required and cnt > 0:
            if not justification:
                raise AuthorizationException("Changing feature bindings for "
                                             "more than just a personality "
                                             "requires --justification.")
            validate_justification(user, justification)

        self.do_link(session, logger, dbfeature, params)
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        for dbpersonality in q:
            plenaries.append(Plenary.get_plenary(dbpersonality))

        written = plenaries.write()
        logger.client_info("Flushed %d/%d templates." %
                           (written, len(plenaries.plenaries)))
        return
Esempio n. 39
0
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0
                and dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError(
                "The personality %s for %s is not allowed "
                "by the cluster. Specify --personality "
                "and provide one of %s" %
                (dbhost.personality, dbhost.fqdn, ", ".join(
                    [x.name for x in dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(
                dbhost, dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError(
                    "{0:l} already in {1:l}, use "
                    "aq reconfigure to change personality.".format(
                        dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostLifecycle.get_unique(session,
                                                    'almostready',
                                                    compel=True)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostLifecycle.get_unique(session,
                                                   'ready',
                                                   compel=True)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        key = CompileKey.merge(
            [chooser.get_write_key(),
             plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)
            plenaries.write(locked=True)
        except:
            chooser.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Esempio n. 40
0
    def render(self, session, logger,
               # search_cluster
               archetype, cluster_type, personality,
               domain, sandbox, branch, buildstatus,
               allowed_archetype, allowed_personality,
               down_hosts_threshold, down_maint_threshold, max_members,
               member_archetype, member_hostname, member_personality,
               capacity_override, cluster, esx_guest, instance,
               esx_metacluster, service, share, esx_share,
               esx_switch, esx_virtual_machine,
               fullinfo, style, **arguments):

        if esx_share:
            self.deprecated_option("esx_share", "Please use --share instead.",
                                   logger=logger, **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session, buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session, esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session, esx_virtual_machine, compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session, name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session, allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session, archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session, archetype=dbma,
                                          name=member_personality, compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())
Esempio n. 41
0
    def render(self, session, service, instance, archetype, personality,
               networkip, include_parents, **arguments):
        dbservice = service and Service.get_unique(
            session, service, compel=True) or None
        dblocation = get_location(session, **arguments)
        queries = []
        # The current logic basically shoots for exact match when given
        # (like exact personality maps only or exact archetype maps
        # only), or "any" if an exact spec isn't given.
        if archetype and personality:
            dbpersona = Personality.get_unique(session,
                                               name=personality,
                                               archetype=archetype,
                                               compel=True)
            q = session.query(PersonalityServiceMap)
            q = q.filter_by(personality=dbpersona)
            queries.append(q)
        elif personality:
            # Alternately, this could throw an error and ask for archetype.
            q = session.query(PersonalityServiceMap)
            q = q.join('personality').filter_by(name=personality)
            q = q.reset_joinpoint()
            queries.append(q)
        elif archetype:
            # Alternately, this could throw an error and ask for personality.
            q = session.query(PersonalityServiceMap)
            q = q.join('personality', 'archetype').filter_by(name=archetype)
            q = q.reset_joinpoint()
            queries.append(q)
        else:
            queries.append(session.query(ServiceMap))
            queries.append(session.query(PersonalityServiceMap))
        if dbservice:
            for i in range(len(queries)):
                queries[i] = queries[i].join('service_instance')
                queries[i] = queries[i].filter_by(service=dbservice)
                queries[i] = queries[i].reset_joinpoint()
        if instance:
            for i in range(len(queries)):
                queries[i] = queries[i].join('service_instance')
                queries[i] = queries[i].filter_by(name=instance)
                queries[i] = queries[i].reset_joinpoint()
        # Nothing fancy for now - just show any relevant explicit bindings.
        if dblocation:
            for i in range(len(queries)):
                if include_parents:
                    base_cls = queries[i].column_descriptions[0]["expr"]
                    col = base_cls.location_id
                    queries[i] = queries[i].filter(
                        col.in_(dblocation.parent_ids()))
                else:
                    queries[i] = queries[i].filter_by(location=dblocation)

        if networkip:
            dbnet_env = NetworkEnvironment.get_unique_or_default(session)
            dbnetwork = get_network_byip(session, networkip, dbnet_env)
            for i in range(len(queries)):
                queries[i] = queries[i].filter_by(network=dbnetwork)

        results = ServiceMapList()
        for q in queries:
            results.extend(q.all())
        if service and instance and dblocation:
            # This should be an exact match.  (Personality doesn't
            # matter... either it was given and it should be an
            # exact match for PersonalityServiceMap or it wasn't
            # and this should be an exact match for ServiceMap.)
            if not results:
                raise NotFoundException("No matching map found.")
        return results
Esempio n. 42
0
    def render(self, session, logger, hostname, machine, archetype,
               buildstatus, personality, osname, osversion, service, instance,
               model, machine_type, vendor, serial, cluster, guest_on_cluster,
               guest_on_share, member_cluster_share, domain, sandbox, branch,
               sandbox_owner, dns_domain, shortname, mac, ip, networkip,
               network_environment, exact_location, server_of_service,
               server_of_instance, grn, eon_id, fullinfo, **arguments):
        dbnet_env = NetworkEnvironment.get_unique_or_default(
            session, network_environment)

        q = session.query(Host)

        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            q = q.filter_by(machine=dbmachine)

        # Add the machine definition and the primary name. Use aliases to make
        # sure the end result will be ordered by primary name.
        PriDns = aliased(DnsRecord)
        PriFqdn = aliased(Fqdn)
        PriDomain = aliased(DnsDomain)
        q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id),
                   (PriFqdn, PriDns.fqdn_id == PriFqdn.id),
                   (PriDomain, PriFqdn.dns_domain_id == PriDomain.id))
        q = q.order_by(PriFqdn.name, PriDomain.name)
        q = q.options(
            contains_eager('machine'),
            contains_eager('machine.primary_name', alias=PriDns),
            contains_eager('machine.primary_name.fqdn', alias=PriFqdn),
            contains_eager('machine.primary_name.fqdn.dns_domain',
                           alias=PriDomain))
        q = q.reset_joinpoint()

        # Hardware-specific filters
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter(Machine.location == dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))

        if model or vendor or machine_type:
            subq = Model.get_matching_query(session,
                                            name=model,
                                            vendor=vendor,
                                            machine_type=machine_type,
                                            compel=True)
            q = q.filter(Machine.model_id.in_(subq))

        if serial:
            self.deprecated_option(
                "serial",
                "Please use search machine --serial instead.",
                logger=logger,
                **arguments)
            q = q.filter(Machine.serial_no == serial)

        # DNS IP address related filters
        if mac or ip or networkip or hostname or dns_domain or shortname:
            # Inner joins are cheaper than outer joins, so make some effort to
            # use inner joins when possible
            if mac or ip or networkip:
                q = q.join(Interface)
            else:
                q = q.outerjoin(Interface)
            if ip or networkip:
                q = q.join(AddressAssignment, Network, from_joinpoint=True)
            else:
                q = q.outerjoin(AddressAssignment,
                                Network,
                                from_joinpoint=True)

            if mac:
                self.deprecated_option("mac", "Please use search machine "
                                       "--mac instead.",
                                       logger=logger,
                                       **arguments)
                q = q.filter(Interface.mac == mac)
            if ip:
                q = q.filter(AddressAssignment.ip == ip)
                q = q.filter(Network.network_environment == dbnet_env)
            if networkip:
                dbnetwork = get_network_byip(session, networkip, dbnet_env)
                q = q.filter(AddressAssignment.network == dbnetwork)

            dbdns_domain = None
            if hostname:
                (shortname, dbdns_domain) = parse_fqdn(session, hostname)
            if dns_domain:
                dbdns_domain = DnsDomain.get_unique(session,
                                                    dns_domain,
                                                    compel=True)

            if shortname or dbdns_domain:
                ARecAlias = aliased(ARecord)
                ARecFqdn = aliased(Fqdn)

                q = q.outerjoin(
                    (ARecAlias,
                     and_(ARecAlias.ip == AddressAssignment.ip,
                          ARecAlias.network_id
                          == AddressAssignment.network_id)),
                    (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id))
                if shortname:
                    q = q.filter(
                        or_(ARecFqdn.name == shortname,
                            PriFqdn.name == shortname))
                if dbdns_domain:
                    q = q.filter(
                        or_(ARecFqdn.dns_domain == dbdns_domain,
                            PriFqdn.dns_domain == dbdns_domain))
            q = q.reset_joinpoint()

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if sandbox_owner:
            dbauthor = get_user_principal(session, sandbox_owner)

        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = HostLifecycle.get_unique(session,
                                                     buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if osname and osversion and archetype:
            # archetype was already resolved above
            dbos = OperatingSystem.get_unique(session,
                                              name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
            q = q.filter_by(operating_system=dbos)
        elif osname or osversion:
            q = q.join('operating_system')
            if osname:
                q = q.filter_by(name=osname)
            if osversion:
                q = q.filter_by(version=osversion)
            q = q.reset_joinpoint()

        if service:
            dbservice = Service.get_unique(session, service, compel=True)
            if instance:
                dbsi = get_service_instance(session, dbservice, instance)
                q = q.filter(Host.services_used.contains(dbsi))
            else:
                q = q.join('services_used')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('services_used')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if server_of_service:
            dbserver_service = Service.get_unique(session,
                                                  server_of_service,
                                                  compel=True)
            if server_of_instance:
                dbssi = get_service_instance(session, dbserver_service,
                                             server_of_instance)
                q = q.join('_services_provided')
                q = q.filter_by(service_instance=dbssi)
                q = q.reset_joinpoint()
            else:
                q = q.join('_services_provided', 'service_instance')
                q = q.filter_by(service=dbserver_service)
                q = q.reset_joinpoint()
        elif server_of_instance:
            q = q.join('_services_provided', 'service_instance')
            q = q.filter_by(name=server_of_instance)
            q = q.reset_joinpoint()

        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('_cluster', 'cluster', '_metacluster')
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_cluster:
            # TODO: this does not handle metaclusters according to Wes
            dbcluster = Cluster.get_unique(session,
                                           guest_on_cluster,
                                           compel=True)
            q = q.join('machine', VirtualMachine, ClusterResource)
            q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_share:
            #v2
            v2shares = session.query(
                Share.id).filter_by(name=guest_on_share).all()
            if not v2shares:
                raise NotFoundException(
                    "No shares found with name {0}.".format(guest_on_share))

            NasAlias = aliased(VirtualDisk)
            q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if member_cluster_share:
            #v2
            v2shares = session.query(
                Share.id).filter_by(name=member_cluster_share).all()
            if not v2shares:
                raise NotFoundException(
                    "No shares found with name {0}.".format(guest_on_share))

            NasAlias = aliased(VirtualDisk)

            q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine,
                       'machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)

            persq = session.query(Personality.id)
            persq = persq.outerjoin(PersonalityGrnMap)
            persq = persq.filter(
                or_(Personality.owner_eon_id == dbgrn.eon_id,
                    PersonalityGrnMap.eon_id == dbgrn.eon_id))
            q = q.outerjoin(HostGrnMap)
            q = q.filter(
                or_(Host.owner_eon_id == dbgrn.eon_id,
                    HostGrnMap.eon_id == dbgrn.eon_id,
                    Host.personality_id.in_(persq.subquery())))
            q = q.reset_joinpoint()

        if fullinfo:
            return q.all()
        return SimpleHostList(q.all())
Esempio n. 43
0
    def refresh_windows_hosts(self, session, logger, containers):
        conn = sqlite3.connect(self.config.get("broker", "windows_host_info"))
        # Enable dictionary-style access to the rows.
        conn.row_factory = sqlite3.Row

        windows_hosts = {}
        interfaces = {}
        cur = conn.cursor()
        # There are more fields in the dataset like machine and
        # aqhostname that might be useful for error messages but these
        # are sufficient.
        cur.execute("select ether, windowshostname from machines")
        for row in cur:
            host = row["windowshostname"]
            if host:
                host = host.strip().lower()
            else:
                continue
            mac = row["ether"]
            if mac:
                mac = mac.strip().lower()
            windows_hosts[host] = mac
            interfaces[mac] = host

        success = []
        failed = []

        q = session.query(Host)
        q = q.filter_by(comments='Created by refresh_windows_host')
        for dbhost in q.all():
            mac_addresses = [iface.mac for iface in dbhost.machine.interfaces]
            if dbhost.fqdn in windows_hosts and \
               windows_hosts[dbhost.fqdn] in mac_addresses:
                # All is well
                continue
            deps = get_host_dependencies(session, dbhost)
            if deps:
                msg = "Skipping removal of host %s with dependencies: %s" % \
                        (dbhost.fqdn, ", ".join(deps))
                failed.append(msg)
                logger.info(msg)
                continue
            dbmachine = dbhost.machine
            success.append("Removed host entry for %s (%s)" %
                           (dbmachine.label, dbmachine.fqdn))
            if dbmachine.vm_container:
                containers.add(dbmachine.vm_container)
            session.delete(dbhost)
            dbdns_rec = dbmachine.primary_name
            dbmachine.primary_name = None
            delete_dns_record(dbdns_rec)
        session.flush()
        # The Host() creations below fail when autoflush is enabled.
        session.autoflush = False

        dbdomain = Domain.get_unique(session,
                                     self.config.get("archetype_windows",
                                                     "host_domain"),
                                     compel=InternalError)
        dbarchetype = Archetype.get_unique(session, "windows",
                                           compel=InternalError)
        dbpersonality = Personality.get_unique(session, archetype=dbarchetype,
                                               name="generic",
                                               compel=InternalError)
        dbstatus = HostLifecycle.get_unique(session, "ready",
                                            compel=InternalError)
        dbos = OperatingSystem.get_unique(session, name="windows",
                                          version="generic",
                                          archetype=dbarchetype,
                                          compel=InternalError)
        for (host, mac) in windows_hosts.items():
            try:
                (short, dbdns_domain) = parse_fqdn(session, host)
            except AquilonError, err:
                msg = "Skipping host %s: %s" % (host, err)
                failed.append(msg)
                logger.info(msg)
                continue
            existing = DnsRecord.get_unique(session, name=short,
                                            dns_domain=dbdns_domain)
            if existing:
                if not existing.hardware_entity:
                    msg = "Skipping host %s: It is not a primary name." % host
                    failed.append(msg)
                    logger.info(msg)
                    continue
                # If these are invalid there should have been a deletion
                # attempt above.
                if not existing.hardware_entity.interfaces:
                    msg = "Skipping host %s: Host already exists but has " \
                            "no interface attached." % host
                    failed.append(msg)
                    logger.info(msg)
                elif existing.hardware_entity.interfaces[0].mac != mac:
                    msg = "Skipping host %s: Host already exists but with " \
                            "MAC address %s and not %s." % \
                            (host, existing.hardware_entity.interfaces[0].mac,
                             mac)
                    failed.append(msg)
                    logger.info(msg)
                continue
            dbinterface = session.query(Interface).filter_by(mac=mac).first()
            if not dbinterface:
                msg = "Skipping host %s: MAC address %s is not present in " \
                        "AQDB." % (host, mac)
                failed.append(msg)
                logger.info(msg)
                continue
            q = session.query(Machine)
            q = q.filter_by(id=dbinterface.hardware_entity.id)
            dbmachine = q.first()
            if not dbmachine:
                msg = "Skipping host %s: The AQDB interface with MAC address " \
                        "%s is tied to hardware %s instead of a virtual " \
                        "machine." % \
                        (host, mac, dbinterface.hardware_entity.label)
                failed.append(msg)
                logger.info(msg)
                continue
            if dbinterface.assignments:
                msg = "Skipping host %s: The AQDB interface with MAC address " \
                        "%s is already tied to %s." % \
                        (host, mac, dbinterface.assignments[0].fqdns[0])
                failed.append(msg)
                logger.info(msg)
                continue
            if dbmachine.host:
                msg = "Skipping host %s: The AQDB interface with MAC address " \
                        "%s is already tied to %s." % \
                        (host, mac, dbmachine.fqdn)
                failed.append(msg)
                logger.info(msg)
                continue
            dbhost = Host(machine=dbmachine, branch=dbdomain,
                          status=dbstatus, owner_grn=dbpersonality.owner_grn,
                          personality=dbpersonality, operating_system=dbos,
                          comments="Created by refresh_windows_host")
            session.add(dbhost)

            if self.config.has_option("archetype_windows", "default_grn_target"):
                dbhost.grns.append((dbhost, dbgrn,
                                    self.config.get("archetype_",
                                                    "default_grn_target")))

            dbfqdn = Fqdn.get_or_create(session, name=short,
                                        dns_domain=dbdns_domain, preclude=True)
            dbdns_rec = ReservedName(fqdn=dbfqdn)
            session.add(dbdns_rec)
            dbmachine.primary_name = dbdns_rec
            success.append("Added host entry for %s (%s)." %
                           (dbmachine.label, dbdns_rec.fqdn))
            if dbmachine.vm_container:
                containers.add(dbmachine.vm_container)
            session.flush()
Esempio n. 44
0
STATUS = Status.get_unique(sess, 'ready')
assert isinstance(STATUS, Status), 'No ready status @ %s' % func_name()

DOMAIN = Domain.get_unique(sess, 'ny-prod')
assert isinstance(DOMAIN, Domain), 'no ny-prod domain @ %s' % func_name()

ARCH = Archetype.get_unique(sess, 'aquilon')
assert isinstance(ARCH, Archetype), 'No archetype @ %s' % func_name()

OS = OperatingSystem.get_unique(sess,
                                name='linux',
                                version='5.0.1-x86_64',
                                archetype=ARCH)
assert isinstance(OS, OperatingSystem), 'No os @ %s' % func_name()

PRSNLTY = Personality.get_unique(sess, name='generic', archetype=ARCH)
assert isinstance(PRSNLTY, Personality), 'no personality @ %s' % func_name()

NETWORK = sess.query(Network).filter(Network.cidr < 31).first()
assert isinstance(NETWORK, Network), 'no network in %s' % func_name()

DNS_DOMAIN = DnsDomain.get_unique(sess, DNAME)
assert isinstance(DNS_DOMAIN, DnsDomain), 'no dns domain @ %s' % func_name()

BRANCH = sess.query(Branch).first()
if not BRANCH:
    BRANCH = Branch(branch_type='domain',
                    name='ny-prod',
                    is_sync_valid=1,
                    compiler='/ms/dist/elfms/PROJ/panc/prod/lib/panc.jar',
                    autosync=1,
Esempio n. 45
0
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0 and
            dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError("The personality %s for %s is not allowed "
                                "by the cluster. Specify --personality "
                                "and provide one of %s" %
                                (dbhost.personality, dbhost.fqdn,
                                 ", ".join([x.name for x in
                                            dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(dbhost,
                                                                   dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res, logger)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError("{0:l} already in {1:l}, use "
                                    "aq reconfigure to change personality."
                                    .format(dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostAlmostready.get_instance(session)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostReady.get_instance(session)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        with CompileKey.merge([chooser.get_key(), plenaries.get_key()]):
            plenaries.stash()
            try:
                chooser.write_plenary_templates(locked=True)
                plenaries.write(locked=True)
            except:
                chooser.restore_stash()
                plenaries.restore_stash()
                raise

        return
Esempio n. 46
0
    def render(
            self,
            session,
            logger,
            cluster,
            personality,
            max_members,
            fix_location,
            down_hosts_threshold,
            maint_threshold,
            comments,
            # ESX specific options
            switch,
            memory_capacity,
            clear_overrides,
            vm_to_host_ratio,
            **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster." %
                                format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count,
                                   host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger, dbcluster,
                                                   fix_location, plenaries,
                                                   remove_plenaries,
                                                   **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError(
                    "%s has %d hosts bound, which exceeds "
                    "the requested limit %d." %
                    (format(dbcluster), current_members, max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge(
            [plenaries.get_write_key(),
             remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Esempio n. 47
0
    def render(self, session, service, instance, archetype, personality,
               networkip, include_parents, **arguments):
        dbservice = service and Service.get_unique(session, service,
                                                   compel=True) or None
        dblocation = get_location(session, **arguments)
        queries = []
        # The current logic basically shoots for exact match when given
        # (like exact personality maps only or exact archetype maps
        # only), or "any" if an exact spec isn't given.
        if archetype and personality:
            dbpersona = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
            q = session.query(PersonalityServiceMap)
            q = q.filter_by(personality=dbpersona)
            queries.append(q)
        elif personality:
            # Alternately, this could throw an error and ask for archetype.
            q = session.query(PersonalityServiceMap)
            q = q.join('personality').filter_by(name=personality)
            q = q.reset_joinpoint()
            queries.append(q)
        elif archetype:
            # Alternately, this could throw an error and ask for personality.
            q = session.query(PersonalityServiceMap)
            q = q.join('personality', 'archetype').filter_by(name=archetype)
            q = q.reset_joinpoint()
            queries.append(q)
        else:
            queries.append(session.query(ServiceMap))
            queries.append(session.query(PersonalityServiceMap))
        if dbservice:
            for i in range(len(queries)):
                queries[i] = queries[i].join('service_instance')
                queries[i] = queries[i].filter_by(service=dbservice)
                queries[i] = queries[i].reset_joinpoint()
        if instance:
            for i in range(len(queries)):
                queries[i] = queries[i].join('service_instance')
                queries[i] = queries[i].filter_by(name=instance)
                queries[i] = queries[i].reset_joinpoint()
        # Nothing fancy for now - just show any relevant explicit bindings.
        if dblocation:
            for i in range(len(queries)):
                if include_parents:
                    base_cls = queries[i].column_descriptions[0]["expr"]
                    col = base_cls.location_id
                    queries[i] = queries[i].filter(col.in_(dblocation.parent_ids()))
                else:
                    queries[i] = queries[i].filter_by(location=dblocation)

        if networkip:
            dbnet_env = NetworkEnvironment.get_unique_or_default(session)
            dbnetwork = get_network_byip(session, networkip, dbnet_env)
            for i in range(len(queries)):
                queries[i] = queries[i].filter_by(network=dbnetwork)

        results = ServiceMapList()
        for q in queries:
            results.extend(q.all())
        if service and instance and dblocation:
            # This should be an exact match.  (Personality doesn't
            # matter... either it was given and it should be an
            # exact match for PersonalityServiceMap or it wasn't
            # and this should be an exact match for ServiceMap.)
            if not results:
                raise NotFoundException("No matching map found.")
        return results
Esempio n. 48
0
                q = session.query(Host)
            q = q.filter_by(personality=dbpersona)
            # XXX: Ideally, filter based on hosts that are/arenot in cluster
            if q.count() > 0:
                raise ArgumentError("The personality {0} is in use and cannot "
                                    "be modified".format(str(dbpersona)))
            dbpersona.cluster_required = cluster_required

        if host_environment is not None:
            legacy_env = HostEnvironment.get_unique(session,
                                                    'legacy',
                                                    compel=True)
            if dbpersona.host_environment == legacy_env:
                HostEnvironment.polymorphic_subclass(
                    host_environment, "Unknown environment name")
                Personality.validate_env_in_name(personality, host_environment)
                dbpersona.host_environment = HostEnvironment.get_unique(
                    session, host_environment, compel=True)
            else:
                raise ArgumentError(
                    "The personality '{0}' already has env set to '{1}'"
                    " and cannot be updated".format(str(dbpersona),
                                                    host_environment))

        plenaries = PlenaryCollection(logger=logger)

        if grn or eon_id:
            dbgrn = lookup_grn(session,
                               grn,
                               eon_id,
                               logger=logger,
Esempio n. 49
0
    def render(self,
               session,
               logger,
               hostname,
               machine,
               archetype,
               domain,
               sandbox,
               osname,
               osversion,
               buildstatus,
               personality,
               comments,
               zebra_interfaces,
               grn,
               eon_id,
               skip_dsdb_check=False,
               **arguments):
        dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        section = "archetype_" + dbarchetype.name

        # This is for the various add_*_host commands
        if not domain and not sandbox:
            domain = self.config.get(section, "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding hosts to {0:l} is not allowed.".format(dbbranch))

        if not buildstatus:
            buildstatus = 'build'
        dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True)
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if not personality:
            if self.config.has_option(section, "default_personality"):
                personality = self.config.get(section, "default_personality")
            else:
                personality = 'generic'
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=dbarchetype,
                                               compel=True)

        if not osname:
            if self.config.has_option(section, "default_osname"):
                osname = self.config.get(section, "default_osname")
        if not osversion:
            if self.config.has_option(section, "default_osversion"):
                osversion = self.config.get(section, "default_osversion")

        if not osname or not osversion:
            raise ArgumentError("Can not determine a sensible default OS "
                                "for archetype %s. Please use the "
                                "--osname and --osversion parameters." %
                                (dbarchetype.name))

        dbos = OperatingSystem.get_unique(session,
                                          name=osname,
                                          version=osversion,
                                          archetype=dbarchetype,
                                          compel=True)

        if (dbmachine.model.machine_type == 'aurora_node'
                and dbpersonality.archetype.name != 'aurora'):
            raise ArgumentError("Machines of type aurora_node can only be "
                                "added with archetype aurora.")

        if dbmachine.host:
            raise ArgumentError("{0:c} {0.label} is already allocated to "
                                "{1:l}.".format(dbmachine, dbmachine.host))

        if grn or eon_id:
            dbgrn = lookup_grn(session,
                               grn,
                               eon_id,
                               logger=logger,
                               config=self.config)
        else:
            dbgrn = dbpersonality.owner_grn

        dbhost = Host(machine=dbmachine,
                      branch=dbbranch,
                      owner_grn=dbgrn,
                      sandbox_author=dbauthor,
                      personality=dbpersonality,
                      status=dbstatus,
                      operating_system=dbos,
                      comments=comments)
        session.add(dbhost)

        if self.config.has_option("archetype_" + archetype,
                                  "default_grn_target"):
            dbhost.grns.append((dbhost, dbgrn,
                                self.config.get("archetype_" + archetype,
                                                "default_grn_target")))

        if zebra_interfaces:
            # --autoip does not make sense for Zebra (at least not the way it's
            # implemented currently)
            dbinterface = None
        else:
            dbinterface = get_boot_interface(dbmachine)

        # This method is allowed to return None. This can only happen
        # (currently) using add_aurora_host, add_windows_host, or possibly by
        # bypassing the aq client and posting a request directly.
        audit_results = []
        ip = generate_ip(session,
                         logger,
                         dbinterface,
                         audit_results=audit_results,
                         **arguments)

        dbdns_rec, newly_created = grab_address(session,
                                                hostname,
                                                ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        dbmachine.primary_name = dbdns_rec

        # Fix up auxiliary addresses to point to the primary name by default
        if ip:
            dns_env = dbdns_rec.fqdn.dns_environment

            for addr in dbmachine.all_addresses():
                if addr.interface.interface_type == "management":
                    continue
                if addr.service_address_id:  # pragma: no cover
                    continue
                for rec in addr.dns_records:
                    if rec.fqdn.dns_environment == dns_env:
                        rec.reverse_ptr = dbdns_rec.fqdn

        if zebra_interfaces:
            if not ip:
                raise ArgumentError(
                    "Zebra configuration requires an IP address.")
            dbsrv_addr = self.assign_zebra_address(session, dbmachine,
                                                   dbdns_rec, zebra_interfaces)
        else:
            if ip:
                if not dbinterface:
                    raise ArgumentError(
                        "You have specified an IP address for the "
                        "host, but {0:l} does not have a bootable "
                        "interface.".format(dbmachine))
                assign_address(dbinterface, ip, dbdns_rec.network)
            dbsrv_addr = None

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbsrv_addr:
            plenaries.append(Plenary.get_plenary(dbsrv_addr))

        key = plenaries.get_write_key()
        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)

            # XXX: This (and some of the code above) is horrible.  There
            # should be a generic/configurable hook here that could kick
            # in based on archetype and/or domain.
            dsdb_runner = DSDBRunner(logger=logger)
            if dbhost.archetype.name == 'aurora':
                # For aurora, check that DSDB has a record of the host.
                if not skip_dsdb_check:
                    try:
                        dsdb_runner.show_host(hostname)
                    except ProcessException, e:
                        raise ArgumentError("Could not find host in DSDB: %s" %
                                            e)
            elif not dbmachine.primary_ip:
                logger.info("No IP for %s, not adding to DSDB." %
                            dbmachine.fqdn)
Esempio n. 50
0
    def reconfigure_list(self, session, logger, dbhosts, archetype,
                         personality, buildstatus, osname, osversion,
                         **arguments):
        failed = []
        # Check all the parameters up front.
        # Some of these could be more intelligent about defaults
        # (either by checking for unique entries or relying on the list)
        # - starting simple.
        if archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            if dbarchetype.cluster_type is not None:
                raise ArgumentError("Archetype %s is a cluster archetype" %
                                    dbarchetype.name)
            # TODO: Once OS is a first class object this block needs
            # to check that either OS is also being reset or that the
            # OS is valid for the new archetype.
        else:
            dbarchetype = None
        if personality:
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=dbarchetype,
                                                   compel=True)
        if osname and not osversion:
            raise ArgumentError("Please specify --osversion for OS %s." %
                                osname)
        if osversion:
            if not osname:
                raise ArgumentError("Please specify --osname to use with "
                                    "OS version %s." % osversion)
            # Linux model names are the same under aurora and aquilon, so
            # allowing to omit --archetype would not be useful
            if not archetype:
                raise ArgumentError("Please specify --archetype for OS "
                                    "%s, version %s." % (osname, osversion))
            dbos = OperatingSystem.get_unique(session, name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
        else:
            dbos = None

        if buildstatus:
            dbstatus = HostLifecycle.get_unique(session, buildstatus,
                                                compel=True)

        # Take a shortcut if there's nothing to do, but only after all the other
        # parameters have been checked
        if not dbhosts:
            return

        personalities = {}
        branches = {}
        authors = {}
        # Do any final cross-list or dependency checks before entering
        # the Chooser loop.
        for dbhost in dbhosts:
            if dbhost.branch in branches:
                branches[dbhost.branch].append(dbhost)
            else:
                branches[dbhost.branch] = [dbhost]
            if dbhost.sandbox_author in authors:
                authors[dbhost.sandbox_author].append(dbhost)
            else:
                authors[dbhost.sandbox_author] = [dbhost]

            if dbos and not dbarchetype and dbhost.archetype != dbos.archetype:
                failed.append("{0}: Cannot change operating system because it "
                              "needs {1:l} instead of "
                              "{2:l}.".format(dbhost.fqdn, dbhost.archetype,
                                              dbos.archetype))
            if dbarchetype and not dbos and \
               dbhost.operating_system.archetype != dbarchetype:
                failed.append("{0}: Cannot change archetype because {1:l} needs "
                              "{2:l}.".format(dbhost.fqdn, dbhost.operating_system,
                                              dbhost.operating_system.archetype))
            if (personality and dbhost.cluster and
                len(dbhost.cluster.allowed_personalities) > 0 and
                dbpersonality not in dbhost.cluster.allowed_personalities):
                allowed = ["%s/%s" % (p.archetype.name, p.name) for p in
                           dbhost.cluster.allowed_personalities]
                failed.append("{0}: The {1:l} is not allowed by {2}.  "
                              "Specify one of {3}.".format(
                                  dbhost.fqdn, dbpersonality,
                                  dbhost.cluster, allowed))
            if personality:
                personalities[dbhost.fqdn] = dbpersonality
            elif archetype:
                personalities[dbhost.fqdn] = Personality.get_unique(session,
                        name=dbhost.personality.name, archetype=dbarchetype)
                if not personalities[dbhost.fqdn]:
                    failed.append("%s: No personality %s found for archetype "
                                  "%s." %
                                  (dbhost.fqdn, dbhost.personality.name,
                                   dbarchetype.name))

        if failed:
            raise ArgumentError("Cannot modify the following hosts:\n%s" %
                                "\n".join(failed))
        if len(branches) > 1:
            keys = branches.keys()
            branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y]))
            keys.sort(cmp=branch_sort)
            stats = ["{0:d} hosts in {1:l}".format(len(branches[branch]), branch)
                     for branch in keys]
            raise ArgumentError("All hosts must be in the same domain or "
                                "sandbox:\n%s" % "\n".join(stats))
        dbbranch = branches.keys()[0]
        if len(authors) > 1:
            keys = authors.keys()
            author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y]))
            keys.sort(cmp=author_sort)
            stats = ["%s hosts with sandbox author %s" %
                     (len(authors[author]), author.name) for author in keys]
            raise ArgumentError("All hosts must be managed by the same "
                                "sandbox author:\n%s" % "\n".join(stats))
        dbauthor = authors.keys()[0]

        failed = []
        choosers = []
        for dbhost in dbhosts:
            if dbhost.fqdn in personalities:
                dbhost.personality = personalities[dbhost.fqdn]
                session.add(dbhost)
            if osversion:
                dbhost.operating_system = dbos
                session.add(dbhost)
            if buildstatus:
                dbhost.status.transition(dbhost, dbstatus)
                session.add(dbhost)
        session.flush()

        logger.client_info("Verifying service bindings.")
        for dbhost in dbhosts:
            if dbhost.archetype.is_compileable:
                if arguments.get("keepbindings", None):
                    chooser = Chooser(dbhost, logger=logger,
                                      required_only=False)
                else:
                    chooser = Chooser(dbhost, logger=logger,
                                      required_only=True)
                choosers.append(chooser)
                try:
                    chooser.set_required()
                except ArgumentError, e:
                    failed.append(str(e))
Esempio n. 51
0
    def render(self, session, logger, personality, archetype,
               grn, eon_id, host_environment, comments,
               cluster_required, copy_from, config_override, **arguments):
        if not VALID_PERSONALITY_RE.match(personality):
            raise ArgumentError("Personality name '%s' is not valid." %
                                personality)
        if not (grn or eon_id):
            raise ArgumentError("GRN or EON ID is required for adding a "
                                "personality.")

        dbarchetype = Archetype.get_unique(session, archetype, compel=True)

        if not host_environment:
            try:
                host_environment = self.config.get("archetype_" + archetype,
                                                   "default_environment")
            except (NoSectionError, NoOptionError):
                raise ArgumentError("Default environment is not configured "
                                    "for {0:l}, please specify "
                                    "--host_environment.".format(dbarchetype))

        HostEnvironment.polymorphic_subclass(host_environment,
                                             "Unknown environment name")
        Personality.validate_env_in_name(personality, host_environment)
        host_env = HostEnvironment.get_unique(session, host_environment, compel=True)

        Personality.get_unique(session, archetype=dbarchetype, name=personality,
                               preclude=True)

        dbgrn = lookup_grn(session, grn, eon_id, logger=logger,
                           config=self.config)

        dbpersona = Personality(name=personality, archetype=dbarchetype,
                                cluster_required=bool(cluster_required),
                                host_environment=host_env, owner_grn=dbgrn,
                                comments=comments,
                                config_override=config_override)
        session.add(dbpersona)

        if self.config.has_option("archetype_" + archetype, "default_grn_target"):
            dbpersona.grns.append((dbpersona, dbgrn,
                                   self.config.get("archetype_" + archetype,
                                                   "default_grn_target")))

        if copy_from:
            ## copy config data
            dbfrom_persona = Personality.get_unique(session,
                                                    archetype=dbarchetype,
                                                    name=copy_from,
                                                    compel=True)

            src_parameters = get_parameters(session,
                                            personality=dbfrom_persona)
            db_param_holder = PersonalityParameter(personality=dbpersona)

            for param in src_parameters:
                dbparameter = Parameter(value=param.value,
                                        comments=param.comments,
                                        holder=db_param_holder)
                session.add(dbparameter)

            for link in dbfrom_persona.features:
                params = {}
                params["personality"] = dbpersona
                if link.model:
                    params["model"] = link.model
                if link.interface_name:
                    params["interface_name"] = link.interface_name

                add_link(session, logger, link.feature, params)

            ## service maps
            q = session.query(PersonalityServiceMap).filter_by(personality=dbfrom_persona)

            for sm in q.all() :
                dbmap = PersonalityServiceMap(service_instance=sm.service_instance,
                                              location=sm.location,
                                              network=sm.network,
                                              personality=dbpersona)
                session.add(dbmap)

            ## required services
            dbpersona.services.extend(dbfrom_persona.services)

        session.flush()

        plenary = PlenaryPersonality(dbpersona, logger=logger)
        plenary.write()
        return
Esempio n. 52
0
    def render(self, session, logger,
               metacluster, archetype, personality,
               domain, sandbox,
               max_members,
               buildstatus, comments,
               **arguments):

        validate_basic("metacluster", metacluster)

        # this should be reverted when virtbuild supports these options
        if not archetype:
            archetype = "metacluster"

        if not personality:
            personality = "metacluster"

        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = "meta"  # dbpersonality.archetype.cluster_type

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_unique(session, buildstatus,
                                               compel=True)

        # this should be reverted when virtbuild supports these options
        if not domain and not sandbox:
            domain = self.config.get("archetype_metacluster", "host_domain")

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=False)

        dbloc = get_location(session, **arguments)

        # this should be reverted when virtbuild supports this option
        if not dbloc:
            dbloc = Location.get_unique(session,
                                        name=self.config.get("archetype_metacluster",
                                                             "location_name"),
                                        location_type=self.config.get("archetype_metacluster",
                                                                      "location_type"))
        elif not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            max_members = self.config.getint("archetype_metacluster",
                                             "max_members_default")

        if metacluster.strip().lower() == 'global':
            raise ArgumentError("Metacluster name global is reserved.")

        MetaCluster.get_unique(session, metacluster, preclude=True)
        clus_type = MetaCluster  # Cluster.__mapper__.polymorphic_map[ctype].class_

        kw = {}

        dbcluster = MetaCluster(name=metacluster, location_constraint=dbloc,
                                personality=dbpersonality,
                                max_clusters=max_members,
                                branch=dbbranch, sandbox_author=dbauthor,
                                status=dbstatus, comments=comments)

        session.add(dbcluster)
        session.flush()

        plenary = PlenaryMetaCluster(dbcluster, logger=logger)
        plenary.write()

        return
Esempio n. 53
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_nlist_key("cluster", cluster)
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_instance(session, buildstatus)

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Adding clusters to {0:l} is not allowed."
                                .format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section, "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype, "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {'name': cluster,
              'location_constraint': dbloc,
              'personality': dbpersonality,
              'max_hosts': max_members,
              'branch': dbbranch,
              'sandbox_author': dbauthor,
              'down_hosts_threshold': dht,
              'down_hosts_percent': down_hosts_pct,
              'status': dbstatus,
              'comments': comments}

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'network_device'):
            kw['network_device'] = NetworkDevice.get_unique(session,
                                                            switch,
                                                            compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        plenaries.write()

        return
Esempio n. 54
0
    def render(self, session, logger, feature, archetype, personality, model,
               vendor, interface, justification, user, **arguments):

        # Binding a feature to a named interface makes sense in the scope of a
        # personality, but not for a whole archetype.
        if interface and not personality:
            raise ArgumentError("Binding to a named interface needs "
                                "a personality.")

        q = session.query(Personality)
        dbarchetype = None

        feature_type = "host"

        justification_required = True

        # Warning: order matters here!
        params = {}
        if personality:
            justification_required = False
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            params["personality"] = dbpersonality
            if interface:
                params["interface_name"] = interface
                feature_type = "interface"
            dbarchetype = dbpersonality.archetype
            q = q.filter_by(archetype=dbarchetype)
            q = q.filter_by(name=personality)
        elif archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            params["archetype"] = dbarchetype
            q = q.filter_by(archetype=dbarchetype)
        else:
            # It's highly unlikely that a feature template would work for
            # _any_ archetype, so disallow this case for now. As I can't
            # rule out that such a case will not have some uses in the
            # future, the restriction is here and not in the model.
            raise ArgumentError("Please specify either an archetype or "
                                "a personality when binding a feature.")

        if model:
            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       compel=True)

            if dbmodel.machine_type == "nic":
                feature_type = "interface"
            else:
                feature_type = "hardware"

            params["model"] = dbmodel

        if dbarchetype and not dbarchetype.is_compileable:
            raise UnimplementedError("Binding features to non-compilable "
                                     "archetypes is not implemented.")

        if not feature_type:  # pragma: no cover
            raise InternalError("Feature type is not known.")

        dbfeature = Feature.get_unique(session,
                                       name=feature,
                                       feature_type=feature_type,
                                       compel=True)

        cnt = q.count()
        # TODO: should the limit be configurable?
        if justification_required and cnt > 0:
            if not justification:
                raise AuthorizationException(
                    "Changing feature bindings for more "
                    "than just a personality requires --justification.")
            validate_justification(user, justification)

        self.do_link(session, logger, dbfeature, params)
        session.flush()

        idx = 0
        written = 0
        successful = []
        failed = []

        with CompileKey(logger=logger):
            personalities = q.all()

            for personality in personalities:
                idx += 1
                if idx % 1000 == 0:  # pragma: no cover
                    logger.client_info("Processing personality %d of %d..." %
                                       (idx, cnt))

                if not personality.archetype.is_compileable:  # pragma: no cover
                    continue

                try:
                    plenary_personality = PlenaryPersonality(personality)
                    written += plenary_personality.write(locked=True)
                    successful.append(plenary_personality)
                except IncompleteError:
                    pass
                except Exception, err:  # pragma: no cover
                    failed.append("{0} failed: {1}".format(personality, err))

            if failed:  # pragma: no cover
                for plenary in successful:
                    plenary.restore_stash()
                raise PartialError([], failed)