def render(self, session, logger, cluster, buildstatus, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbstatus = ClusterLifecycle.get_unique(
            session, buildstatus, compel=True)

        if not dbcluster.status.transition(dbcluster, dbstatus):
            return

        if not dbcluster.personality.archetype.is_compileable:
            return

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        for dbhost in dbcluster.hosts:
            plenaries.append(Plenary.get_plenary(dbhost))

        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)

            plenaries.write(locked=True)
            td = TemplateDomain(
                dbcluster.branch, dbcluster.sandbox_author, logger=logger)
            td.compile(session, plenaries.object_templates, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
Beispiel #2
0
    def render(self, session, logger, cluster, keepbindings, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if not dbcluster.personality.archetype.is_compileable:
            raise ArgumentError(
                "{0} is not a compilable archetype " "({1!s}).".format(dbcluster, dbcluster.personality.archetype)
            )

        chooser = Chooser(dbcluster, logger=logger, required_only=not keepbindings)
        chooser.set_required()
        chooser.flush_changes()
        td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger)
        # Force a domain lock as pan might overwrite any of the profiles...
        with chooser.get_key():
            try:
                chooser.write_plenary_templates(locked=True)

                td.compile(session, only=chooser.plenaries.object_templates, locked=True)
            except:
                chooser.restore_stash()

                # Okay, cleaned up templates, make sure the caller knows
                # we've aborted so that DB can be appropriately rollback'd.
                raise

        return
Beispiel #3
0
    def render(self, session, archetype, cluster_type, compilable,
               description, comments, **kwargs):
        validate_nlist_key('--archetype', archetype)

        def subclasses(cls):
            for subcls in cls.__subclasses__():
                for subsubcls in subclasses(subcls):
                    yield subsubcls
                yield subcls

        reserved_names = set([cls.prefix for cls in subclasses(Plenary)])
        # There are also some top-level directories in the template repository
        reserved_names.update(["hardware", "pan", "t"])

        if archetype in reserved_names:
            raise ArgumentError("Archetype name %s is reserved." % archetype)

        Archetype.get_unique(session, archetype, preclude=True)

        if description is None:
            description = archetype
        if cluster_type:
            cls = Cluster.polymorphic_subclass(cluster_type,
                                               "Unknown cluster type")
            # Normalization
            cluster_type = inspect(cls).polymorphic_identity

        dbarch = Archetype(name=archetype, cluster_type=cluster_type,
                           outputdesc=description, comments=comments,
                           is_compileable=bool(compilable))

        session.add(dbarch)
        session.flush()

        return
Beispiel #4
0
def test_cluster_service_binding_assoc_proxy():
    """ tests the association proxy on cluster to service works """
    ec = Cluster.get_unique(sess, CLUSTER_NAME)
    assert ec
    print 'length of %s.service_bindings is %s' % (ec.name,
                                                   len(ec.service_bindings))
    assert len(ec.service_bindings) is 1
Beispiel #5
0
    def render(self, session, logger, cluster, pancinclude, pancexclude, pancdebug, cleandeps, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if pancdebug:
            pancinclude = r".*"
            pancexclude = r"components/spma/functions"
        dom = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger)

        plenaries = PlenaryCollection(logger=logger)

        def add_cluster_plenaries(cluster):
            plenaries.append(Plenary.get_plenary(cluster))
            for host in cluster.hosts:
                plenaries.append(Plenary.get_plenary(host))

        add_cluster_plenaries(dbcluster)
        if isinstance(dbcluster, MetaCluster):
            for cluster in dbcluster.members:
                add_cluster_plenaries(cluster)

        with plenaries.get_key():
            dom.compile(
                session,
                only=plenaries.object_templates,
                panc_debug_include=pancinclude,
                panc_debug_exclude=pancexclude,
                cleandeps=cleandeps,
                locked=True,
            )
        return
Beispiel #6
0
    def render(self, session, hostname, machine, cpuname, cpuvendor, cpuspeed,
               cpucount, memory, cluster, share, fullinfo, style, **arguments):
        if fullinfo or style != 'raw':
            q = search_hardware_entity_query(session, Machine, **arguments)
        else:
            q = search_hardware_entity_query(session, Machine.label, **arguments)
        if machine:
            q = q.filter_by(label=machine)
        if hostname:
            dns_rec = DnsRecord.get_unique(session, fqdn=hostname, compel=True)
            q = q.filter(Machine.primary_name_id == dns_rec.id)
        if cpuname or cpuvendor or cpuspeed is not None:
            subq = Cpu.get_matching_query(session, name=cpuname,
                                          vendor=cpuvendor, speed=cpuspeed,
                                          compel=True)
            q = q.filter(Machine.cpu_id.in_(subq))
        if cpucount is not None:
            q = q.filter_by(cpu_quantity=cpucount)
        if memory is not None:
            q = q.filter_by(memory=memory)
        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('vm_container', ClusterResource, Cluster)
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.join('vm_container', ClusterResource)
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if share:
            v2shares = session.query(Share.id).filter_by(name=share)
            if not v2shares.count():
                raise NotFoundException("No shares found with name {0}."
                                        .format(share))

            NasAlias = aliased(VirtualNasDisk)
            q = q.join('disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(v2shares.subquery()))
            q = q.reset_joinpoint()

        if fullinfo or style != "raw":
            q = q.options(joinedload('location'),
                          subqueryload('interfaces'),
                          lazyload('interfaces.hardware_entity'),
                          joinedload('interfaces.assignments'),
                          joinedload('interfaces.assignments.dns_records'),
                          joinedload('chassis_slot'),
                          subqueryload('chassis_slot.chassis'),
                          subqueryload('disks'),
                          subqueryload('host'),
                          lazyload('host.hardware_entity'),
                          subqueryload('host.services_used'),
                          subqueryload('host._cluster'),
                          lazyload('host._cluster.host'))
            return q.all()
        return StringAttributeList(q.all(), "label")
Beispiel #7
0
def lookup_target(session, plenaries, hostname, ip, cluster, resourcegroup,
                  service_address, alias):
    """
    Check the parameters of the server providing a given service

    Look for potential conflicts, and return a dict that is suitable to be
    passed to either the constructor of ServiceInstanceServer, or to the
    find_server() function.
    """

    params = {}

    if cluster and hostname:
        raise ArgumentError("Only one of --cluster and --hostname may be "
                            "specified.")

    if alias:
        dbdns_env = DnsEnvironment.get_unique_or_default(session)
        dbdns_rec = Alias.get_unique(session, fqdn=alias,
                                     dns_environment=dbdns_env, compel=True)
        params["alias"] = dbdns_rec

    if hostname:
        params["host"] = hostname_to_host(session, hostname)
        plenaries.append(Plenary.get_plenary(params["host"]))
    if cluster:
        params["cluster"] = Cluster.get_unique(session, cluster, compel=True)
        plenaries.append(Plenary.get_plenary(params["cluster"]))

    if service_address:
        # TODO: calling get_resource_holder() means doing redundant DB lookups
        # TODO: it would be nice to also accept an FQDN for the service address,
        # to be consistent with the usage of the --service_address option in
        # add_service_address/del_service_address
        holder = get_resource_holder(session, hostname=hostname,
                                     cluster=cluster,
                                     resgroup=resourcegroup, compel=True)

        dbsrv_addr = ServiceAddress.get_unique(session,
                                               name=service_address,
                                               holder=holder, compel=True)
        params["service_address"] = dbsrv_addr
    elif ip:
        for addr in params["host"].hardware_entity.all_addresses():
            if ip != addr.ip:
                continue

            if addr.service_address:
                params["service_address"] = addr.service_address
            else:
                params["address_assignment"] = addr
            break

    return params
Beispiel #8
0
def test_cluster_bound_svc():
    """ test the creation of a cluster bound service """
    si = add_service_instance(sess, SVC_NAME, INST_NAME)
    assert si, 'no service instance in %s' % func_name()

    ec = Cluster.get_unique(sess, CLUSTER_NAME)
    cs = ClusterServiceBinding(cluster=ec, service_instance=si)
    create(sess, cs)

    assert cs, 'no cluster bound service created by' % func_name()
    print cs
Beispiel #9
0
def test_cluster_bound_svc():
    """ test the creation of a cluster bound service """
    si = add_service_instance(sess, SVC_NAME, INST_NAME)
    assert si, "no service instance in %s" % func_name()

    ec = Cluster.get_unique(sess, CLUSTER_NAME)
    cs = ClusterServiceBinding(cluster=ec, service_instance=si)
    create(sess, cs)

    assert cs, "no cluster bound service created by" % func_name()
    print cs
Beispiel #10
0
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError(
                "{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        # Will need to write a cluster plenary and either write or
        # remove a host plenary.  Grab the domain key since the two
        # must be in the same domain.
        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)
            cluster_plenary.write(locked=True)
            try:
                host_plenary.write(locked=True)
            except IncompleteError:
                host_plenary.cleanup(domain=dbhost.branch.name, locked=True)
        except:
            cluster_plenary.restore_stash()
            host_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Beispiel #11
0
    def render(self, session, archetype, cluster_type, compilable, description, **kwargs):
        valid = re.compile("^[a-zA-Z0-9_-]+$")
        if not valid.match(archetype):
            raise ArgumentError("Archetype name '%s' is not valid." % archetype)
        if archetype in ["hardware", "machine", "pan", "t", "service", "servicedata", "clusters"]:
            raise ArgumentError("Archetype name %s is reserved." % archetype)

        Archetype.get_unique(session, archetype, preclude=True)

        if description is None:
            description = archetype
        if cluster_type:
            Cluster.polymorphic_subclass(cluster_type, "Unknown cluster type")
        dbarch = Archetype(
            name=archetype, cluster_type=cluster_type, outputdesc=description, is_compileable=bool(compilable)
        )

        session.add(dbarch)
        session.flush()

        return
Beispiel #12
0
    def render(self, session, hostname, machine, cpuname, cpuvendor, cpuspeed, 
               cpucount, memory, cluster, share, fullinfo, style, **arguments):
        if fullinfo or style != 'raw':
            q = search_hardware_entity_query(session, Machine, **arguments)
        else:
            q = search_hardware_entity_query(session, Machine.label, **arguments)
        if machine:
            q = q.filter_by(label=machine)
        if hostname:
            dns_rec = DnsRecord.get_unique(session, fqdn=hostname, compel=True)
            q = q.filter(Machine.primary_name_id==dns_rec.id)
        if cpuname or cpuvendor or cpuspeed is not None:
            subq = Cpu.get_matching_query(session, name=cpuname,
                                          vendor=cpuvendor, speed=cpuspeed,
                                          compel=True)
            q = q.filter(Machine.cpu_id.in_(subq))
        if cpucount is not None:
            q = q.filter_by(cpu_quantity=cpucount)
        if memory is not None:
            q = q.filter_by(memory=memory)
        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('vm_container', ClusterResource, Cluster)
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.join('vm_container', ClusterResource)
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if share:
            #v2
            v2shares = session.query(Share.id).filter_by(name=share).all()
            if v2shares:
                NasAlias = aliased(VirtualDisk)
                q = q.join('disks', (NasAlias, NasAlias.id == Disk.id))
                q = q.filter(
                    NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
                q = q.reset_joinpoint()

        if fullinfo:
            q = q.options(joinedload('location'),
                          subqueryload('interfaces'),
                          joinedload('interfaces.assignments'),
                          joinedload('interfaces.assignments.dns_records'),
                          joinedload('chassis_slot'),
                          subqueryload('chassis_slot.chassis'),
                          subqueryload('disks'),
                          subqueryload('host'),
                          subqueryload('host.services_used'),
                          subqueryload('host._cluster'))
            return q.all()
        return SimpleMachineList(q.all())
Beispiel #13
0
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError("{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        # Will need to write a cluster plenary and either write or
        # remove a host plenary.  Grab the domain key since the two
        # must be in the same domain.
        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)
            cluster_plenary.write(locked=True)
            try:
                host_plenary.write(locked=True)
            except IncompleteError:
                host_plenary.cleanup(domain=dbhost.branch.name, locked=True)
        except:
            cluster_plenary.restore_stash()
            host_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Beispiel #14
0
    def render(self, session, archetype, cluster_type, compilable,
               description, **kwargs):
        valid = re.compile('^[a-zA-Z0-9_-]+$')
        if (not valid.match(archetype)):
            raise ArgumentError("Archetype name '%s' is not valid." % archetype)
        if archetype in ["hardware", "machine", "pan", "t",
                         "service", "servicedata", "clusters"]:
            raise ArgumentError("Archetype name %s is reserved." % archetype)

        Archetype.get_unique(session, archetype, preclude=True)

        if description is None:
            description = archetype
        if cluster_type:
            Cluster.polymorphic_subclass(cluster_type, "Unknown cluster type")
        dbarch = Archetype(name=archetype,
                           cluster_type=cluster_type,
                           outputdesc=description,
                           is_compileable=bool(compilable))

        session.add(dbarch)
        session.flush()

        return
Beispiel #15
0
    def render(self, session, logger, cluster, service, instance, force=False,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbservice = Service.get_unique(session, service, compel=True)
        chooser = Chooser(dbcluster, logger=logger, required_only=False)
        if instance:
            dbinstance = get_service_instance(session, dbservice, instance)
            chooser.set_single(dbservice, dbinstance, force=force)
        else:
            chooser.set_single(dbservice, force=force)

        chooser.flush_changes()
        chooser.write_plenary_templates()

        return
Beispiel #16
0
    def render(self, session, logger, cluster,
               pancinclude, pancexclude, pancdebug, cleandeps,
               **arguments):
        dbclus = Cluster.get_unique(session, cluster, compel=True)
        if pancdebug:
            pancinclude = r'.*'
            pancexclude = r'components/spma/functions'
        dom = TemplateDomain(dbclus.branch, dbclus.sandbox_author,
                             logger=logger)
        profile_list = add_cluster_data(dbclus)

        dom.compile(session, only=profile_list,
                    panc_debug_include=pancinclude,
                    panc_debug_exclude=pancexclude,
                    cleandeps=cleandeps)
        return
Beispiel #17
0
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbhost = hostname_to_host(session, hostname)
        if not dbhost.cluster:
            raise ArgumentError("{0} is not bound to a cluster.".format(dbhost))
        if dbhost.cluster != dbcluster:
            raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
                                dbhost, dbhost.cluster, dbcluster))

        if personality:
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbpersonality.cluster_required:
                raise ArgumentError("Cannot switch host to personality %s "
                                    "because that personality requires a "
                                    "cluster" % personality)
            dbhost.personality = dbpersonality
        elif dbhost.personality.cluster_required:
            raise ArgumentError("Host personality %s requires a cluster, "
                                "use --personality to change personality "
                                "when leaving the cluster." %
                                dbhost.personality.name)

        dbcluster.hosts.remove(dbhost)
        remove_service_addresses(dbcluster, dbhost)
        dbcluster.validate()

        session.flush()
        session.expire(dbhost, ['_cluster'])

        host_plenary = Plenary.get_plenary(dbhost, logger=logger)
        cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
        with CompileKey.merge([host_plenary.get_key(),
                              cluster_plenary.get_key()]):
            try:
                cluster_plenary.write(locked=True)
                try:
                    host_plenary.write(locked=True)
                except IncompleteError:
                    host_plenary.remove(locked=True)
            except:
                cluster_plenary.restore_stash()
                host_plenary.restore_stash()
                raise
Beispiel #18
0
    def render(self, session, logger, cluster, pancinclude, pancexclude,
               pancdebug, cleandeps, **arguments):
        dbclus = Cluster.get_unique(session, cluster, compel=True)
        if pancdebug:
            pancinclude = r'.*'
            pancexclude = r'components/spma/functions'
        dom = TemplateDomain(dbclus.branch,
                             dbclus.sandbox_author,
                             logger=logger)
        profile_list = add_cluster_data(dbclus)

        dom.compile(session,
                    only=profile_list,
                    panc_debug_include=pancinclude,
                    panc_debug_exclude=pancexclude,
                    cleandeps=cleandeps)
        return
Beispiel #19
0
def get_resource_holder(session,
                        hostname=None,
                        cluster=None,
                        resgroup=None,
                        compel=True):
    who = None
    if hostname is not None:
        dbhost = hostname_to_host(session, hostname)
        who = dbhost.resholder
        if who is None:
            if compel:
                raise NotFoundException("{0} has no resources.".format(dbhost))
            dbhost.resholder = HostResource(host=dbhost)
            session.add(dbhost.resholder)
            session.flush()
            who = dbhost.resholder

    if cluster is not None:
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        who = dbcluster.resholder
        if who is None:
            if compel:
                raise NotFoundException(
                    "{0} has no resources.".format(dbcluster))
            dbcluster.resholder = ClusterResource(cluster=dbcluster)
            session.add(dbcluster.resholder)
            session.flush()
            who = dbcluster.resholder

    if resgroup is not None:
        dbrg = ResourceGroup.get_unique(session,
                                        name=resgroup,
                                        holder=who,
                                        compel=True)
        who = dbrg.resholder
        if who is None:
            if compel:
                raise NotFoundException("{0} has no resources.".format(dbrg))
            dbrg.resholder = BundleResource(resourcegroup=dbrg)
            session.add(dbrg.resholder)
            session.flush()
            who = dbrg.resholder

    return who
Beispiel #20
0
    def render(self, session, logger, cluster, service, instance, **arguments):

        dbservice = Service.get_unique(session, service, compel=True)
        dbinstance = get_service_instance(session, dbservice, instance)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if dbinstance not in dbcluster.service_bindings:
            raise NotFoundException("{0} is not bound to {1:l}."
                                    .format(dbinstance, dbcluster))
        if dbservice in dbcluster.required_services:
            raise ArgumentError("Cannot remove cluster service instance "
                                "binding for %s cluster aligned service %s." %
                                (dbcluster.cluster_type, dbservice.name))
        dbcluster.service_bindings.remove(dbinstance)

        session.flush()

        plenary = Plenary.get_plenary(dbcluster, logger=logger)
        plenary.write()
        return
Beispiel #21
0
    def render(self, session, logger, cluster, keepbindings, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if not dbcluster.personality.archetype.is_compileable:
            raise ArgumentError("{0} is not a compilable archetype "
                                "({1!s}).".format(
                                    dbcluster,
                                    dbcluster.personality.archetype))

        chooser = Chooser(dbcluster,
                          logger=logger,
                          required_only=not (keepbindings))
        chooser.set_required()
        chooser.flush_changes()
        # Force a domain lock as pan might overwrite any of the profiles...
        key = CompileKey.merge([
            chooser.get_write_key(),
            CompileKey(domain=dbcluster.branch.name, logger=logger)
        ])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            profile_list = add_cluster_data(dbcluster)
            profile_list.extend(chooser.changed_server_fqdns())

            td = TemplateDomain(dbcluster.branch,
                                dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, only=profile_list, locked=True)

        except:
            chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.

            raise

        finally:
            lock_queue.release(key)

        return
Beispiel #22
0
    def render(self, session, archetype, personality, cluster,
               **kwargs):
        dbpers = Personality.get_unique(session, name=personality,
                                        archetype=archetype, compel=True)
        dbclus = Cluster.get_unique(session, cluster, compel=True)
        if len(dbclus.allowed_personalities) > 1:
            for host in dbclus.hosts:
                if host.personality == dbpers:
                    raise ArgumentError("The cluster member %s has a "
                                        "personality of %s which is "
                                        "incompatible with this constraint." %
                                        (host.fqdn, host.personality))

        if dbpers in dbclus.allowed_personalities:
            dbclus.allowed_personalities.remove(dbpers)
            dbclus.validate()

        session.flush()

        return
Beispiel #23
0
    def render(self, session, logger, cluster, data, generate, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbresource = get_resource(session, dbcluster, **arguments)
        if dbresource:
            plenary_info = Plenary.get_plenary(dbresource, logger=logger)
        else:
            if isinstance(dbcluster, MetaCluster):
                if data:
                    plenary_info = PlenaryMetaClusterData(dbcluster, logger=logger)
                else:
                    plenary_info = PlenaryMetaClusterObject(dbcluster, logger=logger)
            else:
                if data:
                    plenary_info = PlenaryClusterData(dbcluster, logger=logger)
                else:
                    plenary_info = PlenaryClusterObject(dbcluster, logger=logger)

        if generate:
            return plenary_info._generate_content()
        else:
            return plenary_info.read()
Beispiel #24
0
    def render(self, session, logger, metacluster, cluster, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbmetacluster = MetaCluster.get_unique(session, metacluster,
                                               compel=True)
        old_metacluster = None
        if dbcluster.metacluster and dbcluster.metacluster != dbmetacluster:
            if dbcluster.virtual_machines:
                raise ArgumentError("Cannot move cluster to a new metacluster "
                                    "while virtual machines are attached.")
            old_metacluster = dbcluster.metacluster
            old_metacluster.members.remove(dbcluster)
            session.expire(dbcluster, ['_metacluster'])
        if not dbcluster.metacluster:
            dbmetacluster.members.append(dbcluster)

        session.flush()

        plenary = Plenary.get_plenary(dbcluster, logger=logger)
        plenary.write()

        return
Beispiel #25
0
    def render(self,
               session,
               logger,
               cluster,
               service,
               instance,
               force=False,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbservice = Service.get_unique(session, service, compel=True)
        chooser = Chooser(dbcluster, logger=logger, required_only=False)
        if instance:
            dbinstance = get_service_instance(session, dbservice, instance)
            chooser.set_single(dbservice, dbinstance, force=force)
        else:
            chooser.set_single(dbservice, force=force)

        chooser.flush_changes()
        chooser.write_plenary_templates()

        return
Beispiel #26
0
    def render(self, session, archetype, compilable, cluster_type,
               description, comments, **kwargs):
        dbarchetype = Archetype.get_unique(session, archetype, compel=True)

        if compilable is not None:
            dbarchetype.is_compileable = compilable

        if description is not None:
            dbarchetype.outputdesc = description

        if comments is not None:
            dbarchetype.comments = comments

        if cluster_type:
            # Verify & normalize the value
            cls = Cluster.polymorphic_subclass(cluster_type,
                                               "Unknown cluster type")
            cluster_type = inspect(cls).polymorphic_identity

        if cluster_type is not None and \
            dbarchetype.cluster_type != cluster_type:

            if dbarchetype.cluster_type is None:
                q = session.query(Host.hardware_entity_id)
            else:
                q = session.query(Cluster.id)
            q = q.join('personality').filter_by(archetype=dbarchetype)
            if q.count() > 0:
                raise ArgumentError("{0} is currently in use, the cluster "
                                    "type cannot be changed."
                                    .format(dbarchetype))

            if cluster_type == "":
                dbarchetype.cluster_type = None
            else:
                dbarchetype.cluster_type = cluster_type

        return
Beispiel #27
0
    def render(self, session, logger, cluster, keepbindings, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if not dbcluster.personality.archetype.is_compileable:
            raise ArgumentError("{0} is not a compilable archetype "
                                "({1!s}).".format(dbcluster,
                                                  dbcluster.personality.archetype))

        chooser = Chooser(dbcluster, logger=logger,
                          required_only=not(keepbindings))
        chooser.set_required()
        chooser.flush_changes()
        # Force a domain lock as pan might overwrite any of the profiles...
        key = CompileKey.merge([chooser.get_write_key(),
                                CompileKey(domain=dbcluster.branch.name,
                                           logger=logger)])
        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)

            profile_list = add_cluster_data(dbcluster)
            profile_list.extend(chooser.changed_server_fqdns())

            td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, only=profile_list, locked=True)

        except:
            chooser.restore_stash()

            # Okay, cleaned up templates, make sure the caller knows
            # we've aborted so that DB can be appropriately rollback'd.

            raise

        finally:
            lock_queue.release(key)

        return
Beispiel #28
0
def get_resource_holder(session, hostname=None, cluster=None, resgroup=None,
                        compel=True):
    who = None
    if hostname is not None:
        dbhost = hostname_to_host(session, hostname)
        who = dbhost.resholder
        if who is None:
            if compel:
                raise NotFoundException("{0} has no resources.".format(dbhost))
            dbhost.resholder = HostResource(host=dbhost)
            session.add(dbhost.resholder)
            session.flush()
            who = dbhost.resholder

    if cluster is not None:
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        who = dbcluster.resholder
        if who is None:
            if compel:
                raise NotFoundException("{0} has no resources.".format(dbcluster))
            dbcluster.resholder = ClusterResource(cluster=dbcluster)
            session.add(dbcluster.resholder)
            session.flush()
            who = dbcluster.resholder

    if resgroup is not None:
        dbrg = ResourceGroup.get_unique(session, name=resgroup, holder=who,
                                        compel=True)
        who = dbrg.resholder
        if who is None:
            if compel:
                raise NotFoundException("{0} has no resources.".format(dbrg))
            dbrg.resholder = BundleResource(resourcegroup=dbrg)
            session.add(dbrg.resholder)
            session.flush()
            who = dbrg.resholder

    return who
Beispiel #29
0
    def render(self, session, logger, metacluster, cluster, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbmetacluster = MetaCluster.get_unique(session,
                                               metacluster,
                                               compel=True)
        old_metacluster = None
        if dbcluster.metacluster and dbcluster.metacluster != dbmetacluster:
            if dbcluster.machines:
                raise ArgumentError("Cannot move cluster to a new metacluster "
                                    "while virtual machines are attached.")
            old_metacluster = dbcluster.metacluster
            old_metacluster.members.remove(dbcluster)
            session.expire(dbcluster, ['_metacluster'])
        if not dbcluster.metacluster:
            dbmetacluster.validate_membership(dbcluster)
            dbmetacluster.members.append(dbcluster)

        session.flush()

        plenary = PlenaryCluster(dbcluster, logger=logger)
        plenary.write()

        return
Beispiel #30
0
    def render(self, session, logger, cluster, buildstatus, **arguments):
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        if not dbcluster.status.transition(dbcluster, dbstatus):
            return

        if not dbcluster.personality.archetype.is_compileable:
            return

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        for dbhost in dbcluster.hosts:
            plenaries.append(Plenary.get_plenary(dbhost))

        # Force a host lock as pan might overwrite the profile...
        key = CompileKey(domain=dbcluster.branch.name, logger=logger)
        try:
            lock_queue.acquire(key)

            plenaries.write(locked=True)
            td = TemplateDomain(dbcluster.branch,
                                dbcluster.sandbox_author,
                                logger=logger)
            td.compile(session, plenaries.object_templates, locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
        return
Beispiel #31
0
 def render(self, session, logger, cluster, **arguments):
     dbcluster = Cluster.get_unique(session, cluster, compel=True)
     del_cluster(session, logger, dbcluster, self.config)
Beispiel #32
0
    def render(self, session, logger, hostname, cluster,
               personality, **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0 and
            dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError("The personality %s for %s is not allowed "
                                "by the cluster. Specify --personality "
                                "and provide one of %s" %
                                (dbhost.personality, dbhost.fqdn,
                                 ", ".join([x.name for x in
                                            dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(dbhost,
                                                                   dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res, logger)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError("{0:l} already in {1:l}, use "
                                    "aq reconfigure to change personality."
                                    .format(dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostAlmostready.get_instance(session)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostReady.get_instance(session)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        with CompileKey.merge([chooser.get_key(), plenaries.get_key()]):
            plenaries.stash()
            try:
                chooser.write_plenary_templates(locked=True)
                plenaries.write(locked=True)
            except:
                chooser.restore_stash()
                plenaries.restore_stash()
                raise

        return
Beispiel #33
0
 def render(self, session, logger, membersof, archetype, personality,
            buildstatus, osname, osversion, **arguments):
     dbcluster = Cluster.get_unique(session, membersof, compel=True)
     self.reconfigure_list(session, logger, dbcluster.hosts,
                           archetype, personality,
                           buildstatus, osname, osversion, **arguments)
Beispiel #34
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_basic("cluster", cluster)
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=archetype,
                                               compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding clusters to {0:l} is not allowed.".format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section,
                                                 "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype,
                                                 "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {
            'name': cluster,
            'location_constraint': dbloc,
            'personality': dbpersonality,
            'max_hosts': max_members,
            'branch': dbbranch,
            'sandbox_author': dbauthor,
            'down_hosts_threshold': dht,
            'down_hosts_percent': down_hosts_pct,
            'status': dbstatus,
            'comments': comments
        }

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'switch'):
            kw['switch'] = Switch.get_unique(session, switch, compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.validate_membership(dbcluster)
            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()
        session.refresh(dbcluster)

        plenaries.append(Plenary.get_plenary(dbcluster))

        key = plenaries.get_write_key()

        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Beispiel #35
0
    def render(self, session, logger, cluster, personality,
               max_members, fix_location, down_hosts_threshold,
               maint_threshold, comments,
               # ESX specific options
               switch, memory_capacity, clear_overrides, vm_to_host_ratio,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster."
                                % format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count, host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger,
                                          dbcluster, fix_location,
                                          plenaries, remove_plenaries,
                                          **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError("%s has %d hosts bound, which exceeds "
                                    "the requested limit %d." %
                                    (format(dbcluster), current_members,
                                     max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Beispiel #36
0
    def render(
            self,
            session,
            logger,
            cluster,
            personality,
            max_members,
            fix_location,
            down_hosts_threshold,
            maint_threshold,
            comments,
            # ESX specific options
            switch,
            memory_capacity,
            clear_overrides,
            vm_to_host_ratio,
            **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster." %
                                format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count,
                                   host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger, dbcluster,
                                                   fix_location, plenaries,
                                                   remove_plenaries,
                                                   **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError(
                    "%s has %d hosts bound, which exceeds "
                    "the requested limit %d." %
                    (format(dbcluster), current_members, max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge(
            [plenaries.get_write_key(),
             remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Beispiel #37
0
    def render(self, session, network, network_environment, ip, type, side,
               machine, fqdn, cluster, pg, has_dynamic_ranges, exact_location,
               fullinfo, style, **arguments):
        """Return a network matching the parameters.

        Some of the search terms can only return a unique network.  For
        those (like ip and fqdn) we proceed with the query anyway.  This
        allows for quick scripted tests like "is the network for X.X.X.X
        a tor_net2?".

        """
        dbnet_env = NetworkEnvironment.get_unique_or_default(session,
                                                             network_environment)
        q = session.query(Network)
        q = q.filter_by(network_environment=dbnet_env)
        if network:
            # Note: the network name is not unique (neither in QIP)
            q = q.filter_by(name=network)
        if ip:
            dbnetwork = get_net_id_from_ip(session, ip, dbnet_env)
            q = q.filter_by(id=dbnetwork.id)
        if type:
            q = q.filter_by(network_type=type)
        if side:
            q = q.filter_by(side=side)
        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            vlans = []
            if dbmachine.cluster and dbmachine.cluster.network_device:
                # If this is a VM on a cluster, consult the VLANs.  There
                # could be functionality here for real hardware to consult
                # interface port groups... there's no real use case yet.
                vlans = [VlanInfo.get_vlan_id(session, i.port_group)
                         for i in dbmachine.interfaces if i.port_group]
                if vlans:
                    q = q.join('observed_vlans')
                    q = q.filter_by(network_device=dbmachine.cluster.network_device)
                    q = q.filter(ObservedVlan.vlan_id.in_(vlans))
                    q = q.reset_joinpoint()
            if not vlans:
                networks = [addr.network.id for addr in
                            dbmachine.all_addresses()]
                if not networks:
                    msg = "Machine %s has no interfaces " % dbmachine.label
                    if dbmachine.cluster:
                        msg += "with a portgroup or "
                    msg += "assigned to a network."
                    raise ArgumentError(msg)
                q = q.filter(Network.id.in_(networks))
        if fqdn:
            (short, dbdns_domain) = parse_fqdn(session, fqdn)
            dnsq = session.query(ARecord.ip)
            dnsq = dnsq.join(ARecord.fqdn)
            dnsq = dnsq.filter_by(name=short)
            dnsq = dnsq.filter_by(dns_domain=dbdns_domain)
            networks = [get_net_id_from_ip(session, addr.ip, dbnet_env).id
                        for addr in dnsq.all()]
            q = q.filter(Network.id.in_(networks))
        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if dbcluster.network_device:
                q = q.join('observed_vlans')
                q = q.filter_by(network_device=dbcluster.network_device)
                q = q.reset_joinpoint()
            else:
                net_ids = [h.hardware_entity.primary_name.network.id for h in
                           dbcluster.hosts if getattr(h.hardware_entity.primary_name,
                                                      "network")]
                q = q.filter(Network.id.in_(net_ids))
        if pg:
            vlan = VlanInfo.get_vlan_id(session, pg, compel=ArgumentError)
            q = q.join('observed_vlans')
            q = q.filter_by(vlan_id=vlan)
            q = q.reset_joinpoint()
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Network.location_id.in_(childids))
        if has_dynamic_ranges:
            q = q.filter(exists([DynamicStub.dns_record_id],
                                from_obj=DynamicStub.__table__.join(ARecord.__table__))
                         .where(Network.id == DynamicStub.network_id))
        q = q.order_by(Network.ip)
        if fullinfo or style != 'raw':
            q = q.options(undefer('comments'))
            return q.all()
        return StringAttributeList(q.all(),
                                   lambda n: "%s/%s" % (n.ip, n.cidr))
Beispiel #38
0
 def render(self, session, logger, cluster, **arguments):
     dbcluster = Cluster.get_unique(session, cluster, compel=True)
     del_cluster(session, logger, dbcluster, self.config)
Beispiel #39
0
    def render(self, session, network, network_environment, ip, type, side,
               machine, fqdn, cluster, pg, has_dynamic_ranges, fullinfo,
               **arguments):
        """Return a network matching the parameters.

        Some of the search terms can only return a unique network.  For
        those (like ip and fqdn) we proceed with the query anyway.  This
        allows for quick scripted tests like "is the network for X.X.X.X
        a tor_net2?".

        """
        dbnet_env = NetworkEnvironment.get_unique_or_default(
            session, network_environment)
        q = session.query(Network)
        q = q.filter_by(network_environment=dbnet_env)
        if network:
            # Note: the network name is not unique (neither in QIP)
            q = q.filter_by(name=network)
        if ip:
            dbnetwork = get_net_id_from_ip(session, ip, dbnet_env)
            q = q.filter_by(id=dbnetwork.id)
        if type:
            q = q.filter_by(network_type=type)
        if side:
            q = q.filter_by(side=side)
        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            vlans = []
            if dbmachine.cluster and dbmachine.cluster.switch:
                # If this is a VM on a cluster, consult the VLANs.  There
                # could be functionality here for real hardware to consult
                # interface port groups... there's no real use case yet.
                vlans = [
                    VlanInfo.get_vlan_id(session, i.port_group)
                    for i in dbmachine.interfaces if i.port_group
                ]
                if vlans:
                    q = q.join('observed_vlans')
                    q = q.filter_by(switch=dbmachine.cluster.switch)
                    q = q.filter(ObservedVlan.vlan_id.in_(vlans))
                    q = q.reset_joinpoint()
            if not vlans:
                networks = [
                    addr.network.id for addr in dbmachine.all_addresses()
                ]
                if not networks:
                    msg = "Machine %s has no interfaces " % dbmachine.label
                    if dbmachine.cluster:
                        msg += "with a portgroup or "
                    msg += "assigned to a network."
                    raise ArgumentError(msg)
                q = q.filter(Network.id.in_(networks))
        if fqdn:
            (short, dbdns_domain) = parse_fqdn(session, fqdn)
            dnsq = session.query(ARecord.ip)
            dnsq = dnsq.join(ARecord.fqdn)
            dnsq = dnsq.filter_by(name=short)
            dnsq = dnsq.filter_by(dns_domain=dbdns_domain)
            networks = [
                get_net_id_from_ip(session, addr.ip, dbnet_env).id
                for addr in dnsq.all()
            ]
            q = q.filter(Network.id.in_(networks))
        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if dbcluster.switch:
                q = q.join('observed_vlans')
                q = q.filter_by(switch=dbcluster.switch)
                q = q.reset_joinpoint()
            else:
                net_ids = [
                    h.machine.primary_name.network.id for h in dbcluster.hosts
                    if getattr(h.machine.primary_name, "network")
                ]
                q = q.filter(Network.id.in_(net_ids))
        if pg:
            vlan = VlanInfo.get_vlan_id(session, pg, compel=ArgumentError)
            q = q.join('observed_vlans')
            q = q.filter_by(vlan_id=vlan)
            q = q.reset_joinpoint()
        dblocation = get_location(session, **arguments)
        if dblocation:
            if arguments.get('exact_location'):
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Network.location_id.in_(childids))
        if has_dynamic_ranges:
            q = q.filter(
                exists([DynamicStub.dns_record_id],
                       from_obj=DynamicStub.__table__.join(
                           ARecord.__table__)).where(
                               Network.id == DynamicStub.network_id))
        q = q.order_by(Network.ip)
        if fullinfo:
            q = q.options(undefer('comments'))
            return q.all()
        return ShortNetworkList(q.all())
Beispiel #40
0
    def render(self, session, logger, hostname, machine, archetype,
               buildstatus, personality, osname, osversion, service, instance,
               model, machine_type, vendor, serial, cluster,
               guest_on_cluster, guest_on_share, member_cluster_share,
               domain, sandbox, branch, sandbox_owner,
               dns_domain, shortname, mac, ip, networkip, network_environment,
               exact_location, server_of_service, server_of_instance, grn,
               eon_id, fullinfo, **arguments):
        dbnet_env = NetworkEnvironment.get_unique_or_default(session,
                                                             network_environment)

        q = session.query(Host)

        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            q = q.filter_by(machine=dbmachine)

        # Add the machine definition and the primary name. Use aliases to make
        # sure the end result will be ordered by primary name.
        PriDns = aliased(DnsRecord)
        PriFqdn = aliased(Fqdn)
        PriDomain = aliased(DnsDomain)
        q = q.join(Machine,
                   (PriDns, PriDns.id == Machine.primary_name_id),
                   (PriFqdn, PriDns.fqdn_id == PriFqdn.id),
                   (PriDomain, PriFqdn.dns_domain_id == PriDomain.id))
        q = q.order_by(PriFqdn.name, PriDomain.name)
        q = q.options(contains_eager('machine'),
                      contains_eager('machine.primary_name', alias=PriDns),
                      contains_eager('machine.primary_name.fqdn', alias=PriFqdn),
                      contains_eager('machine.primary_name.fqdn.dns_domain',
                                     alias=PriDomain))
        q = q.reset_joinpoint()

        # Hardware-specific filters
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter(Machine.location == dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))

        if model or vendor or machine_type:
            subq = Model.get_matching_query(session, name=model, vendor=vendor,
                                            machine_type=machine_type,
                                            compel=True)
            q = q.filter(Machine.model_id.in_(subq))

        if serial:
            self.deprecated_option("serial", "Please use search machine --serial instead.",
                logger=logger, **arguments)
            q = q.filter(Machine.serial_no == serial)

        # DNS IP address related filters
        if mac or ip or networkip or hostname or dns_domain or shortname:
            # Inner joins are cheaper than outer joins, so make some effort to
            # use inner joins when possible
            if mac or ip or networkip:
                q = q.join(Interface)
            else:
                q = q.outerjoin(Interface)
            if ip or networkip:
                q = q.join(AddressAssignment, Network, from_joinpoint=True)
            else:
                q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True)

            if mac:
                self.deprecated_option("mac", "Please use search machine "
                                       "--mac instead.", logger=logger,
                                       **arguments)
                q = q.filter(Interface.mac == mac)
            if ip:
                q = q.filter(AddressAssignment.ip == ip)
                q = q.filter(Network.network_environment == dbnet_env)
            if networkip:
                dbnetwork = get_network_byip(session, networkip, dbnet_env)
                q = q.filter(AddressAssignment.network == dbnetwork)

            dbdns_domain = None
            if hostname:
                (shortname, dbdns_domain) = parse_fqdn(session, hostname)
            if dns_domain:
                dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True)

            if shortname or dbdns_domain:
                ARecAlias = aliased(ARecord)
                ARecFqdn = aliased(Fqdn)

                q = q.outerjoin((ARecAlias,
                                 and_(ARecAlias.ip == AddressAssignment.ip,
                                      ARecAlias.network_id == AddressAssignment.network_id)),
                                (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id))
                if shortname:
                    q = q.filter(or_(ARecFqdn.name == shortname,
                                     PriFqdn.name == shortname))
                if dbdns_domain:
                    q = q.filter(or_(ARecFqdn.dns_domain == dbdns_domain,
                                     PriFqdn.dns_domain == dbdns_domain))
            q = q.reset_joinpoint()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if sandbox_owner:
            dbauthor = get_user_principal(session, sandbox_owner)

        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = HostLifecycle.get_unique(session, buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if osname and osversion and archetype:
            # archetype was already resolved above
            dbos = OperatingSystem.get_unique(session, name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
            q = q.filter_by(operating_system=dbos)
        elif osname or osversion:
            q = q.join('operating_system')
            if osname:
                q = q.filter_by(name=osname)
            if osversion:
                q = q.filter_by(version=osversion)
            q = q.reset_joinpoint()

        if service:
            dbservice = Service.get_unique(session, service, compel=True)
            if instance:
                dbsi = get_service_instance(session, dbservice, instance)
                q = q.filter(Host.services_used.contains(dbsi))
            else:
                q = q.join('services_used')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('services_used')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if server_of_service:
            dbserver_service = Service.get_unique(session, server_of_service,
                                                  compel=True)
            if server_of_instance:
                dbssi = get_service_instance(session, dbserver_service,
                                             server_of_instance)
                q = q.join('_services_provided')
                q = q.filter_by(service_instance=dbssi)
                q = q.reset_joinpoint()
            else:
                q = q.join('_services_provided', 'service_instance')
                q = q.filter_by(service=dbserver_service)
                q = q.reset_joinpoint()
        elif server_of_instance:
            q = q.join('_services_provided', 'service_instance')
            q = q.filter_by(name=server_of_instance)
            q = q.reset_joinpoint()

        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('_cluster', 'cluster', '_metacluster')
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_cluster:
            # TODO: this does not handle metaclusters according to Wes
            dbcluster = Cluster.get_unique(session, guest_on_cluster,
                                           compel=True)
            q = q.join('machine', VirtualMachine, ClusterResource)
            q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_share:
            #v2
            v2shares = session.query(Share.id).filter_by(name=guest_on_share).all()
            if not v2shares:
                raise NotFoundException("No shares found with name {0}."
                                        .format(guest_on_share))

            NasAlias = aliased(VirtualDisk)
            q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(
                NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if member_cluster_share:
            #v2
            v2shares = session.query(Share.id).filter_by(name=member_cluster_share).all()
            if not v2shares:
                raise NotFoundException("No shares found with name {0}."
                                        .format(guest_on_share))

            NasAlias = aliased(VirtualDisk)

            q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine,
                       'machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(
                NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)

            persq = session.query(Personality.id)
            persq = persq.outerjoin(PersonalityGrnMap)
            persq = persq.filter(or_(Personality.owner_eon_id == dbgrn.eon_id,
                                     PersonalityGrnMap.eon_id == dbgrn.eon_id))
            q = q.outerjoin(HostGrnMap)
            q = q.filter(or_(Host.owner_eon_id == dbgrn.eon_id,
                             HostGrnMap.eon_id == dbgrn.eon_id,
                             Host.personality_id.in_(persq.subquery())))
            q = q.reset_joinpoint()

        if fullinfo:
            return q.all()
        return SimpleHostList(q.all())
Beispiel #41
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[subqueryload('parents'),
                                                 joinedload('parents.dns_maps')],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation, dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if dbmodel.machine_type not in ['blade', 'rackmount', 'workstation',
                                        'aurora_node', 'virtual_machine']:
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %(type)s.  Try 'add %(type)s'." %
                    {"type": dbmodel.machine_type})

        if cluster:
            if dbmodel.machine_type != 'virtual_machine':
                raise ArgumentError("Only virtual machines can have a cluster "
                                    "attribute.")
            dbcluster = Cluster.get_unique(session, cluster,
                                           compel=ArgumentError)
            # This test could be either archetype or cluster_type
            if dbcluster.personality.archetype.name != 'esx_cluster':
                raise ArgumentError("Can only add virtual machines to "
                                    "clusters with archetype esx_cluster.")
            # TODO implement the same to vmhosts.
            if dbcluster.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")
            if dblocation and dbcluster.location_constraint != dblocation:
                raise ArgumentError("Cannot override cluster location {0} "
                                    "with location {1}.".format(
                                        dbcluster.location_constraint,
                                        dblocation))
            dblocation = dbcluster.location_constraint
        elif dbmodel.machine_type == 'virtual_machine':
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(chassis=dbchassis,
                    slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if cluster:
            if not dbcluster.resholder:
                dbcluster.resholder = ClusterResource(cluster=dbcluster)
            dbvm = VirtualMachine(machine=dbmachine, name=dbmachine.label,
                                  holder=dbcluster.resholder)
            dbcluster.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if cluster:
            plenaries.append(Plenary.get_plenary(dbcluster))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
Beispiel #42
0
    def render(
            self,
            session,
            logger,
            # search_cluster
            archetype,
            cluster_type,
            personality,
            domain,
            sandbox,
            branch,
            buildstatus,
            allowed_archetype,
            allowed_personality,
            down_hosts_threshold,
            down_maint_threshold,
            max_members,
            member_archetype,
            member_hostname,
            member_personality,
            capacity_override,
            cluster,
            esx_guest,
            instance,
            esx_metacluster,
            service,
            share,
            esx_share,
            esx_switch,
            esx_virtual_machine,
            fullinfo,
            style,
            **arguments):

        if esx_share:
            self.deprecated_option("esx_share",
                                   "Please use --share instead.",
                                   logger=logger,
                                   **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session,
                                                        buildstatus,
                                                        compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session,
                                      esx_virtual_machine,
                                      compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session,
                                                  name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session,
                                        allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session,
                                          archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session,
                                          archetype=dbma,
                                          name=member_personality,
                                          compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())
Beispiel #43
0
    def render(self, session, logger, domain, sandbox, cluster, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Managing clusters to {0:l} is not allowed.".format(dbbranch))

        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbsource = dbcluster.branch
        dbsource_author = dbcluster.sandbox_author
        old_branch = dbcluster.branch.name

        if not force:
            validate_branch_commits(dbsource, dbsource_author, dbbranch,
                                    dbauthor, logger, self.config)

        if dbcluster.metacluster:
            raise ArgumentError(
                "{0.name} is member of metacluster {1.name}, "
                "it must be managed at metacluster level.".format(
                    dbcluster, dbcluster.metacluster))

        old_branch = dbcluster.branch.name
        plenaries = PlenaryCollection(logger=logger)

        # manage at metacluster level
        if dbcluster.cluster_type == 'meta':
            clusters = dbcluster.members

            dbcluster.branch = dbbranch
            dbcluster.sandbox_author = dbauthor
            session.add(dbcluster)
            plenaries.append(Plenary.get_plenary(dbcluster))
        else:
            clusters = [dbcluster]

        for cluster in clusters:
            # manage at cluster level
            # Need to set the new branch *before* creating the plenary objects.
            cluster.branch = dbbranch
            cluster.sandbox_author = dbauthor
            session.add(cluster)
            plenaries.append(Plenary.get_plenary(cluster))
            for dbhost in cluster.hosts:
                dbhost.branch = dbbranch
                dbhost.sandbox_author = dbauthor
                session.add(dbhost)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # We're crossing domains, need to lock everything.
        key = CompileKey(logger=logger)
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.cleanup(old_branch, locked=True)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Beispiel #44
0
    def render(self, session, logger, cluster, personality,
               max_members, fix_location, down_hosts_threshold,
               maint_threshold, comments,
               # ESX specific options
               switch, memory_capacity, clear_overrides, vm_to_host_ratio,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        self.check_cluster_type(dbcluster, forbid=MetaCluster)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if vm_to_host_ratio:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            dbcluster.vm_count = vm_count
            dbcluster.host_count = host_count

        if switch is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbnetdev = NetworkDevice.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbnetdev))
            else:
                dbnetdev = None
            dbcluster.network_device = dbnetdev

        if memory_capacity is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            dbcluster.memory_capacity = memory_capacity

        if clear_overrides is not None:
            self.check_cluster_type(dbcluster, require=EsxCluster)
            dbcluster.memory_capacity = None

        update_cluster_location(session, logger, dbcluster, fix_location,
                                plenaries, **arguments)

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality

        if max_members is not None:
            # Allow removing the restriction
            if max_members < 0:
                max_members = None
            dbcluster.max_hosts = max_members

        if comments is not None:
            dbcluster.comments = comments

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)

        session.flush()
        dbcluster.validate()

        plenaries.write(locked=False)

        return
Beispiel #45
0
    def render(self, session, logger,
               # search_cluster
               archetype, cluster_type, personality,
               domain, sandbox, branch, buildstatus,
               allowed_archetype, allowed_personality,
               down_hosts_threshold, down_maint_threshold, max_members,
               member_archetype, member_hostname, member_personality,
               capacity_override, cluster, esx_guest, instance,
               esx_metacluster, service, share, esx_share,
               esx_switch, esx_virtual_machine,
               fullinfo, style, **arguments):

        if esx_share:
            self.deprecated_option("esx_share", "Please use --share instead.",
                                   logger=logger, **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session, buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session, esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session, esx_virtual_machine, compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session, name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session, allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session, archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session, archetype=dbma,
                                          name=member_personality, compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())
Beispiel #46
0
    def render(self, session, logger, domain, sandbox, cluster, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Managing clusters to {0:l} is not allowed."
                                .format(dbbranch))

        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbsource = dbcluster.branch
        dbsource_author = dbcluster.sandbox_author

        if not force:
            validate_branch_commits(dbsource, dbsource_author,
                                    dbbranch, dbauthor, logger, self.config)

        if dbcluster.metacluster:
            raise ArgumentError("{0.name} is member of metacluster {1.name}, "
                                "it must be managed at metacluster level.".
                                format(dbcluster, dbcluster.metacluster))

        plenaries = PlenaryCollection(logger=logger)

        # manage at metacluster level
        if isinstance(dbcluster, MetaCluster):
            plenaries.append(Plenary.get_plenary(dbcluster))
            clusters = dbcluster.members

            dbcluster.branch = dbbranch
            dbcluster.sandbox_author = dbauthor
        else:
            clusters = [dbcluster]

        for cluster in clusters:
            plenaries.append(Plenary.get_plenary(cluster))

            cluster.branch = dbbranch
            cluster.sandbox_author = dbauthor

            for dbhost in cluster.hosts:
                plenaries.append(Plenary.get_plenary(dbhost))

                dbhost.branch = dbbranch
                dbhost.sandbox_author = dbauthor

        session.flush()

        # We're crossing domains, need to lock everything.
        with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger),
                               CompileKey(domain=dbbranch.name, logger=logger)]):
            plenaries.stash()
            try:
                plenaries.write(locked=True)
            except:
                plenaries.restore_stash()
                raise

        return
Beispiel #47
0
    def render(self, session, logger, domain, sandbox, cluster, force,
               **arguments):
        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Managing clusters to {0:l} is not allowed."
                                .format(dbbranch))

        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        dbsource = dbcluster.branch
        dbsource_author = dbcluster.sandbox_author
        old_branch = dbcluster.branch.name

        if not force:
            validate_branch_commits(dbsource, dbsource_author,
                                    dbbranch, dbauthor, logger, self.config)

        if dbcluster.metacluster:
            raise ArgumentError("{0.name} is member of metacluster {1.name}, "
                                "it must be managed at metacluster level.".
                                format(dbcluster, dbcluster.metacluster))

        old_branch = dbcluster.branch.name
        plenaries = PlenaryCollection(logger=logger)

        # manage at metacluster level
        if dbcluster.cluster_type == 'meta':
            clusters = dbcluster.members

            dbcluster.branch = dbbranch
            dbcluster.sandbox_author = dbauthor
            session.add(dbcluster)
            plenaries.append(Plenary.get_plenary(dbcluster))
        else:
            clusters = [dbcluster]

        for cluster in clusters:
            # manage at cluster level
            # Need to set the new branch *before* creating the plenary objects.
            cluster.branch = dbbranch
            cluster.sandbox_author = dbauthor
            session.add(cluster)
            plenaries.append(Plenary.get_plenary(cluster))
            for dbhost in cluster.hosts:
                dbhost.branch = dbbranch
                dbhost.sandbox_author = dbauthor
                session.add(dbhost)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # We're crossing domains, need to lock everything.
        key = CompileKey(logger=logger)
        try:
            lock_queue.acquire(key)
            plenaries.stash()
            plenaries.cleanup(old_branch, locked=True)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Beispiel #48
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_nlist_key("cluster", cluster)
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_instance(session, buildstatus)

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Adding clusters to {0:l} is not allowed."
                                .format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section, "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype, "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {'name': cluster,
              'location_constraint': dbloc,
              'personality': dbpersonality,
              'max_hosts': max_members,
              'branch': dbbranch,
              'sandbox_author': dbauthor,
              'down_hosts_threshold': dht,
              'down_hosts_percent': down_hosts_pct,
              'status': dbstatus,
              'comments': comments}

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'network_device'):
            kw['network_device'] = NetworkDevice.get_unique(session,
                                                            switch,
                                                            compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        plenaries.write()

        return
Beispiel #49
0
    def render(self, session, logger, hostname, cluster, personality,
               **arguments):
        dbhost = hostname_to_host(session, hostname)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.status.name == 'decommissioned':
            raise ArgumentError("Cannot add hosts to decommissioned clusters.")

        # We only support changing personality within the same
        # archetype. The archetype decides things like which OS, how
        # it builds (dhcp, etc), whether it's compilable, and
        # switching all of that by side-effect seems wrong
        # somehow. And besides, it would make the user-interface and
        # implementation for this command ugly in order to support
        # changing all of those options.
        personality_change = False
        if personality is not None:
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=dbhost.archetype,
                                                   compel=True)
            if dbhost.personality != dbpersonality:
                dbhost.personality = dbpersonality
                personality_change = True

        # Allow for non-restricted clusters (the default?)
        if (len(dbcluster.allowed_personalities) > 0
                and dbhost.personality not in dbcluster.allowed_personalities):
            raise ArgumentError(
                "The personality %s for %s is not allowed "
                "by the cluster. Specify --personality "
                "and provide one of %s" %
                (dbhost.personality, dbhost.fqdn, ", ".join(
                    [x.name for x in dbcluster.allowed_personalities])))

        # Now that we've changed the personality, we can check
        # if this is a valid membership change
        dbcluster.validate_membership(dbhost)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbcluster))

        if dbhost.cluster and dbhost.cluster != dbcluster:
            logger.client_info("Removing {0:l} from {1:l}.".format(
                dbhost, dbhost.cluster))
            old_cluster = dbhost.cluster
            old_cluster.hosts.remove(dbhost)
            remove_service_addresses(old_cluster, dbhost)
            old_cluster.validate()
            session.expire(dbhost, ['_cluster'])
            plenaries.append(Plenary.get_plenary(old_cluster))

        # Apply the service addresses to the new member
        for res in walk_resources(dbcluster):
            if not isinstance(res, ServiceAddress):
                continue
            apply_service_address(dbhost, res.interfaces, res)

        if dbhost.cluster:
            if personality_change:
                raise ArgumentError(
                    "{0:l} already in {1:l}, use "
                    "aq reconfigure to change personality.".format(
                        dbhost, dbhost.cluster))
            # the cluster has not changed, therefore there's nothing
            # to do here.
            return

        # Calculate the node index: build a map of all possible values, remove
        # the used ones, and pick the smallest remaining one
        node_index_map = set(xrange(len(dbcluster._hosts) + 1))
        for link in dbcluster._hosts:
            # The cluster may have been bigger in the past, so node indexes may
            # be larger than the current cluster size
            try:
                node_index_map.remove(link.node_index)
            except KeyError:
                pass

        dbcluster.hosts.append((dbhost, min(node_index_map)))
        dbcluster.validate()

        # demote a host when switching clusters
        # promote a host when switching clusters
        if dbhost.status.name == 'ready':
            if dbcluster.status.name != 'ready':
                dbalmost = HostLifecycle.get_unique(session,
                                                    'almostready',
                                                    compel=True)
                dbhost.status.transition(dbhost, dbalmost)
                plenaries.append(Plenary.get_plenary(dbhost))
        elif dbhost.status.name == 'almostready':
            if dbcluster.status.name == 'ready':
                dbready = HostLifecycle.get_unique(session,
                                                   'ready',
                                                   compel=True)
                dbhost.status.transition(dbhost, dbready)
                plenaries.append(Plenary.get_plenary(dbhost))

        session.flush()

        # Enforce that service instances are set correctly for the
        # new cluster association.
        chooser = Chooser(dbhost, logger=logger)
        chooser.set_required()
        chooser.flush_changes()
        # the chooser will include the host plenary
        key = CompileKey.merge(
            [chooser.get_write_key(),
             plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            chooser.write_plenary_templates(locked=True)
            plenaries.write(locked=True)
        except:
            chooser.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Beispiel #50
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[
                                      subqueryload('parents'),
                                      joinedload('parents.dns_maps')
                                  ],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation,
                                                  dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session,
                                   name=model,
                                   vendor=vendor,
                                   compel=True)

        if dbmodel.machine_type not in [
                'blade', 'rackmount', 'workstation', 'aurora_node',
                'virtual_machine'
        ]:
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %(type)s.  Try 'add %(type)s'." %
                                {"type": dbmodel.machine_type})

        if cluster:
            if dbmodel.machine_type != 'virtual_machine':
                raise ArgumentError("Only virtual machines can have a cluster "
                                    "attribute.")
            dbcluster = Cluster.get_unique(session,
                                           cluster,
                                           compel=ArgumentError)
            # This test could be either archetype or cluster_type
            if dbcluster.personality.archetype.name != 'esx_cluster':
                raise ArgumentError("Can only add virtual machines to "
                                    "clusters with archetype esx_cluster.")
            # TODO implement the same to vmhosts.
            if dbcluster.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")
            if dblocation and dbcluster.location_constraint != dblocation:
                raise ArgumentError("Cannot override cluster location {0} "
                                    "with location {1}.".format(
                                        dbcluster.location_constraint,
                                        dblocation))
            dblocation = dbcluster.location_constraint
        elif dbmodel.machine_type == 'virtual_machine':
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(
                chassis=dbchassis, slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if cluster:
            if not dbcluster.resholder:
                dbcluster.resholder = ClusterResource(cluster=dbcluster)
            dbvm = VirtualMachine(machine=dbmachine,
                                  name=dbmachine.label,
                                  holder=dbcluster.resholder)
            dbcluster.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if cluster:
            plenaries.append(Plenary.get_plenary(dbcluster))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
Beispiel #51
0
    def render(self, session, logger, hostname, machine, archetype,
               buildstatus, personality, osname, osversion, service, instance,
               model, machine_type, vendor, serial, cluster, guest_on_cluster,
               guest_on_share, member_cluster_share, domain, sandbox, branch,
               sandbox_owner, dns_domain, shortname, mac, ip, networkip,
               network_environment, exact_location, server_of_service,
               server_of_instance, grn, eon_id, fullinfo, **arguments):
        dbnet_env = NetworkEnvironment.get_unique_or_default(
            session, network_environment)

        q = session.query(Host)

        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            q = q.filter_by(machine=dbmachine)

        # Add the machine definition and the primary name. Use aliases to make
        # sure the end result will be ordered by primary name.
        PriDns = aliased(DnsRecord)
        PriFqdn = aliased(Fqdn)
        PriDomain = aliased(DnsDomain)
        q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id),
                   (PriFqdn, PriDns.fqdn_id == PriFqdn.id),
                   (PriDomain, PriFqdn.dns_domain_id == PriDomain.id))
        q = q.order_by(PriFqdn.name, PriDomain.name)
        q = q.options(
            contains_eager('machine'),
            contains_eager('machine.primary_name', alias=PriDns),
            contains_eager('machine.primary_name.fqdn', alias=PriFqdn),
            contains_eager('machine.primary_name.fqdn.dns_domain',
                           alias=PriDomain))
        q = q.reset_joinpoint()

        # Hardware-specific filters
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter(Machine.location == dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))

        if model or vendor or machine_type:
            subq = Model.get_matching_query(session,
                                            name=model,
                                            vendor=vendor,
                                            machine_type=machine_type,
                                            compel=True)
            q = q.filter(Machine.model_id.in_(subq))

        if serial:
            self.deprecated_option(
                "serial",
                "Please use search machine --serial instead.",
                logger=logger,
                **arguments)
            q = q.filter(Machine.serial_no == serial)

        # DNS IP address related filters
        if mac or ip or networkip or hostname or dns_domain or shortname:
            # Inner joins are cheaper than outer joins, so make some effort to
            # use inner joins when possible
            if mac or ip or networkip:
                q = q.join(Interface)
            else:
                q = q.outerjoin(Interface)
            if ip or networkip:
                q = q.join(AddressAssignment, Network, from_joinpoint=True)
            else:
                q = q.outerjoin(AddressAssignment,
                                Network,
                                from_joinpoint=True)

            if mac:
                self.deprecated_option("mac", "Please use search machine "
                                       "--mac instead.",
                                       logger=logger,
                                       **arguments)
                q = q.filter(Interface.mac == mac)
            if ip:
                q = q.filter(AddressAssignment.ip == ip)
                q = q.filter(Network.network_environment == dbnet_env)
            if networkip:
                dbnetwork = get_network_byip(session, networkip, dbnet_env)
                q = q.filter(AddressAssignment.network == dbnetwork)

            dbdns_domain = None
            if hostname:
                (shortname, dbdns_domain) = parse_fqdn(session, hostname)
            if dns_domain:
                dbdns_domain = DnsDomain.get_unique(session,
                                                    dns_domain,
                                                    compel=True)

            if shortname or dbdns_domain:
                ARecAlias = aliased(ARecord)
                ARecFqdn = aliased(Fqdn)

                q = q.outerjoin(
                    (ARecAlias,
                     and_(ARecAlias.ip == AddressAssignment.ip,
                          ARecAlias.network_id
                          == AddressAssignment.network_id)),
                    (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id))
                if shortname:
                    q = q.filter(
                        or_(ARecFqdn.name == shortname,
                            PriFqdn.name == shortname))
                if dbdns_domain:
                    q = q.filter(
                        or_(ARecFqdn.dns_domain == dbdns_domain,
                            PriFqdn.dns_domain == dbdns_domain))
            q = q.reset_joinpoint()

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if sandbox_owner:
            dbauthor = get_user_principal(session, sandbox_owner)

        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = HostLifecycle.get_unique(session,
                                                     buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if osname and osversion and archetype:
            # archetype was already resolved above
            dbos = OperatingSystem.get_unique(session,
                                              name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
            q = q.filter_by(operating_system=dbos)
        elif osname or osversion:
            q = q.join('operating_system')
            if osname:
                q = q.filter_by(name=osname)
            if osversion:
                q = q.filter_by(version=osversion)
            q = q.reset_joinpoint()

        if service:
            dbservice = Service.get_unique(session, service, compel=True)
            if instance:
                dbsi = get_service_instance(session, dbservice, instance)
                q = q.filter(Host.services_used.contains(dbsi))
            else:
                q = q.join('services_used')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('services_used')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if server_of_service:
            dbserver_service = Service.get_unique(session,
                                                  server_of_service,
                                                  compel=True)
            if server_of_instance:
                dbssi = get_service_instance(session, dbserver_service,
                                             server_of_instance)
                q = q.join('_services_provided')
                q = q.filter_by(service_instance=dbssi)
                q = q.reset_joinpoint()
            else:
                q = q.join('_services_provided', 'service_instance')
                q = q.filter_by(service=dbserver_service)
                q = q.reset_joinpoint()
        elif server_of_instance:
            q = q.join('_services_provided', 'service_instance')
            q = q.filter_by(name=server_of_instance)
            q = q.reset_joinpoint()

        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('_cluster', 'cluster', '_metacluster')
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_cluster:
            # TODO: this does not handle metaclusters according to Wes
            dbcluster = Cluster.get_unique(session,
                                           guest_on_cluster,
                                           compel=True)
            q = q.join('machine', VirtualMachine, ClusterResource)
            q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_share:
            #v2
            v2shares = session.query(
                Share.id).filter_by(name=guest_on_share).all()
            if not v2shares:
                raise NotFoundException(
                    "No shares found with name {0}.".format(guest_on_share))

            NasAlias = aliased(VirtualDisk)
            q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if member_cluster_share:
            #v2
            v2shares = session.query(
                Share.id).filter_by(name=member_cluster_share).all()
            if not v2shares:
                raise NotFoundException(
                    "No shares found with name {0}.".format(guest_on_share))

            NasAlias = aliased(VirtualDisk)

            q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine,
                       'machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)

            persq = session.query(Personality.id)
            persq = persq.outerjoin(PersonalityGrnMap)
            persq = persq.filter(
                or_(Personality.owner_eon_id == dbgrn.eon_id,
                    PersonalityGrnMap.eon_id == dbgrn.eon_id))
            q = q.outerjoin(HostGrnMap)
            q = q.filter(
                or_(Host.owner_eon_id == dbgrn.eon_id,
                    HostGrnMap.eon_id == dbgrn.eon_id,
                    Host.personality_id.in_(persq.subquery())))
            q = q.reset_joinpoint()

        if fullinfo:
            return q.all()
        return SimpleHostList(q.all())