Exemple #1
0
    def render(self, session, logger, service, instance, comments,
               **arguments):
        dbservice = session.query(Service).filter_by(name=service).first()
        if dbservice and instance is None:
            raise ArgumentError("Service %s already exists." % dbservice.name)
        if not dbservice:
            # "add_service --service foo --comments blah" should add the comments
            # to Service,
            # "add_service --service foo --instance bar --comments blah" should
            # add the comments to ServiceInstance
            if instance:
                srvcomments = None
            else:
                srvcomments = comments
            dbservice = Service(name=service, comments=srvcomments)
            session.add(dbservice)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbservice))

        if instance:
            ServiceInstance.get_unique(session, service=dbservice,
                                       name=instance, preclude=True)
            dbsi = ServiceInstance(service=dbservice, name=instance,
                                   comments=comments)
            session.add(dbsi)
            plenaries.append(Plenary.get_plenary(dbsi))

        session.flush()
        plenaries.write()
        return
    def render(self, session, logger, service, instance, comments,
               **arguments):
        dbservice = Service.get_unique(session, service, compel=True)
        ServiceInstance.get_unique(session, service=dbservice, name=instance,
                                   preclude=True)

        dbsi = ServiceInstance(service=dbservice, name=instance,
                               comments=comments)
        session.add(dbsi)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbsi))

        session.flush()
        plenaries.write()
        return
Exemple #3
0
    def render(self, session, logger, service, instance, position, hostname,
               cluster, ip, resourcegroup, service_address, alias, **arguments):
        dbservice = Service.get_unique(session, service, compel=True)

        if instance:
            dbsi = ServiceInstance.get_unique(session, service=dbservice,
                                              name=instance, compel=True)
            dbinstances = [dbsi]
        else:
            # --position for multiple service instances sounds dangerous, so
            # disallow it until a real usecase emerges
            if position:
                raise ArgumentError("The --position option can only be "
                                    "specified for one service instance.")

            q = session.query(ServiceInstance)
            q = q.filter_by(service=dbservice)
            dbinstances = q.all()

        plenaries = PlenaryCollection(logger=logger)

        if position is not None:
            params = None
        else:
            params = lookup_target(session, plenaries, hostname, ip, cluster,
                                   resourcegroup, service_address, alias)

        for dbinstance in dbinstances:
            if position is not None:
                if position < 0 or position >= len(dbinstance.servers):
                    raise ArgumentError("Invalid server position.")
                dbsrv = dbinstance.servers[position]
                if dbsrv.host:
                    plenaries.append(Plenary.get_plenary(dbsrv.host))
                if dbsrv.cluster:
                    plenaries.append(Plenary.get_plenary(dbsrv.cluster))
            else:
                dbsrv = find_server(dbinstance, params)
                if not dbsrv:
                    if instance:
                        raise NotFoundException("No such server binding.")
                    continue

            plenaries.append(Plenary.get_plenary(dbinstance))

            if dbsrv.host:
                session.expire(dbsrv.host, ['services_provided'])
            if dbsrv.cluster:
                session.expire(dbsrv.cluster, ['services_provided'])
            dbinstance.servers.remove(dbsrv)

            if dbinstance.client_count > 0 and not dbinstance.servers:
                logger.warning("Warning: {0} was left without servers, "
                               "but it still has clients.".format(dbinstance))

        session.flush()

        plenaries.write()

        return
Exemple #4
0
def add_service_instance(sess, service_name, name):
    si = sess.query(ServiceInstance).filter_by(name=name).first()
    if not si:
        print 'Creating %s instance %s ' % (service_name, name)

        svc = sess.query(Service).filter_by(name=service_name).one()
        assert svc, 'No %s service in %s' % (service_name, func_name())

        si = ServiceInstance(name=name, service=svc)
        create(sess, si)
        assert si, 'no service instance created by %s' % func_name()
    return si
    def render(self, session, logger, service, instance, max_clients, default,
               comments, **arguments):
        dbservice = Service.get_unique(session, name=service, compel=True)
        dbsi = ServiceInstance.get_unique(session, service=dbservice,
                                          name=instance, compel=True)
        if default:
            dbsi.max_clients = None
        elif max_clients is not None:
            dbsi.max_clients = max_clients

        if comments is not None:
            dbsi.comments = comments

        session.flush()

        plenary = Plenary.get_plenary(dbsi, logger=logger)
        plenary.write()

        return
    def render(self, session, logger, service, instance, max_clients, default,
               **arguments):
        dbservice = Service.get_unique(session, name=service, compel=True)
        dbsi = ServiceInstance.get_unique(session, service=dbservice,
                                          name=instance, compel=True)
        if default:
            dbsi.max_clients = None
        elif max_clients is not None:
            dbsi.max_clients = max_clients
        else:
            raise ArgumentError("Missing --max_clients or --default argument "
                                "to update service %s instance %s." %
                                (dbservice.name, dbsi.name))
        session.add(dbsi)
        session.flush()

        plenary = Plenary.get_plenary(dbsi, logger=logger)
        plenary.write()

        return
    def render(self, session, logger, cluster, service, instance, **arguments):

        dbservice = Service.get_unique(session, service, compel=True)
        dbinstance = ServiceInstance.get_unique(session, service=dbservice,
                                                name=instance, compel=True)
        dbcluster = Cluster.get_unique(session, cluster, compel=True)
        if dbinstance not in dbcluster.service_bindings:
            raise NotFoundException("{0} is not bound to {1:l}."
                                    .format(dbinstance, dbcluster))
        if dbservice in dbcluster.required_services:
            raise ArgumentError("Cannot remove cluster service instance "
                                "binding for %s cluster aligned service %s." %
                                (dbcluster.cluster_type, dbservice.name))
        dbcluster.service_bindings.remove(dbinstance)

        session.flush()

        plenary = Plenary.get_plenary(dbcluster, logger=logger)
        plenary.write()
        return
Exemple #8
0
    def render(self, session, service, instance, archetype, personality,
               networkip, **arguments):
        dbservice = Service.get_unique(session, service, compel=True)
        dbinstance = ServiceInstance.get_unique(session,
                                                service=dbservice,
                                                name=instance,
                                                compel=True)
        dblocation = get_location(session, **arguments)

        if networkip:
            dbnet_env = NetworkEnvironment.get_unique_or_default(session)
            dbnetwork = get_network_byip(session, networkip, dbnet_env)
        else:
            dbnetwork = None

        if personality:
            if not archetype:
                # Can't get here with the standard aq client.
                raise ArgumentError("Specifying --personality requires you to "
                                    "also specify --archetype.")
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = session.query(PersonalityServiceMap)
            q = q.filter_by(personality=dbpersonality)
        else:
            q = session.query(ServiceMap)

        q = q.filter_by(location=dblocation,
                        service_instance=dbinstance,
                        network=dbnetwork)
        dbmap = q.first()

        if dbmap:
            session.delete(dbmap)
        session.flush()
        return
    def render(self, session, logger, service, instance, **arguments):
        dbservice = Service.get_unique(session, service, compel=True)
        dbsi = ServiceInstance.get_unique(session, service=dbservice,
                                          name=instance, compel=True)
        if dbsi.client_count > 0:
            raise ArgumentError("Service %s, instance %s still has clients and "
                                "cannot be deleted." %
                                (dbservice.name, dbsi.name))
        if dbsi.servers:
            msg = ", ".join([srv.fqdn for srv in dbsi.servers])
            raise ArgumentError("Service %s, instance %s is still being "
                                "provided by servers: %s." %
                                (dbservice.name, dbsi.name, msg))

        # Depend on cascading to remove any mappings
        session.delete(dbsi)
        session.flush()

        plenary_info = Plenary.get_plenary(dbsi, logger=logger)
        plenary_info.remove()

        return
Exemple #10
0
    def render(self, session, logger, service, instance, default, server,
               generate, **kwargs):
        dbservice = Service.get_unique(session, service, compel=True)
        dbsi = ServiceInstance.get_unique(session, service=dbservice,
                                          name=instance, compel=True)
        if default:
            if server:
                cls = PlenaryServiceInstanceServerDefault
            else:
                cls = PlenaryServiceInstanceClientDefault
        else:
            if server:
                cls = PlenaryServiceInstanceServer
            else:
                cls = PlenaryServiceInstanceToplevel

        plenary_info = cls.get_plenary(dbsi, logger=logger)

        if generate:
            return plenary_info._generate_content()
        else:
            return plenary_info.read()
Exemple #11
0
def determine_helper_hostname(session, logger, config, dbswitch):
    """Try to figure out a useful helper from the mappings.
    """
    helper_name = config.get("broker", "poll_helper_service")
    if not helper_name:  # pragma: no cover
        return
    helper_service = Service.get_unique(session, helper_name,
                                        compel=InternalError)
    mapped_instances = ServiceInstance.get_mapped_instance_cache(
        dbpersonality=None, dblocation=dbswitch.location,
        dbservices=[helper_service])
    for dbsi in mapped_instances.get(helper_service, []):
        if dbsi.server_hosts:
            # Poor man's load balancing...
            jump = choice(dbsi.server_hosts).fqdn
            logger.client_info("Using jump host {0} from {1:l} to run CheckNet "
                               "for {2:l}.".format(jump, dbsi, dbswitch))
            return jump

    logger.client_info("No jump host for %s, calling CheckNet from %s." %
                       (dbswitch, config.get("broker", "hostname")))
    return None
Exemple #12
0
    def render(self, session, logger, service, instance, max_clients, default,
               **arguments):
        dbservice = Service.get_unique(session, name=service, compel=True)
        dbsi = ServiceInstance.get_unique(session,
                                          service=dbservice,
                                          name=instance,
                                          compel=True)
        if default:
            dbsi.max_clients = None
        elif max_clients is not None:
            dbsi.max_clients = max_clients
        else:
            raise ArgumentError("Missing --max_clients or --default argument "
                                "to update service %s instance %s." %
                                (dbservice.name, dbsi.name))
        session.add(dbsi)
        session.flush()

        plenary = Plenary.get_plenary(dbsi, logger=logger)
        plenary.write()

        return
Exemple #13
0
    def render(self, session, service, instance, archetype, personality,
               networkip, **arguments):
        dbservice = Service.get_unique(session, service, compel=True)
        dbinstance = ServiceInstance.get_unique(session, service=dbservice,
                                                name=instance, compel=True)
        dblocation = get_location(session, **arguments)

        if networkip:
            dbnet_env = NetworkEnvironment.get_unique_or_default(session)
            dbnetwork = get_network_byip(session, networkip, dbnet_env)
        else:
            dbnetwork = None

        if personality:
            if not archetype:
                # Can't get here with the standard aq client.
                raise ArgumentError("Specifying --personality requires you to "
                                    "also specify --archetype.")
            dbarchetype = Archetype.get_unique(session, archetype,
                                               compel=True)
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = session.query(PersonalityServiceMap)
            q = q.filter_by(personality=dbpersonality)
        else:
            q = session.query(ServiceMap)

        q = q.filter_by(location=dblocation, service_instance=dbinstance,
                        network=dbnetwork)
        dbmap = q.first()

        if dbmap:
            session.delete(dbmap)
        session.flush()
        return
Exemple #14
0
 def cache_service_maps(self, dbservices):
     self.service_maps = ServiceInstance.get_mapped_instance_cache(
         self.personality, self.location, dbservices, self.network)
Exemple #15
0
    def render(
            self,
            session,
            logger,
            # search_cluster
            archetype,
            cluster_type,
            personality,
            domain,
            sandbox,
            branch,
            buildstatus,
            allowed_archetype,
            allowed_personality,
            down_hosts_threshold,
            down_maint_threshold,
            max_members,
            member_archetype,
            member_hostname,
            member_personality,
            capacity_override,
            cluster,
            esx_guest,
            instance,
            esx_metacluster,
            service,
            share,
            esx_share,
            esx_switch,
            esx_virtual_machine,
            fullinfo,
            style,
            **arguments):

        if esx_share:
            self.deprecated_option("esx_share",
                                   "Please use --share instead.",
                                   logger=logger,
                                   **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session,
                                                        buildstatus,
                                                        compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session,
                                      esx_virtual_machine,
                                      compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session,
                                                  name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session,
                                        allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session,
                                          archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session,
                                          archetype=dbma,
                                          name=member_personality,
                                          compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())
Exemple #16
0
 def cache_service_maps(self, dbservices):
     self.service_maps = ServiceInstance.get_mapped_instance_cache(
         self.personality, self.location, dbservices, self.network)
Exemple #17
0
    def render(self, session, logger,
               # search_cluster
               archetype, cluster_type, personality,
               domain, sandbox, branch, buildstatus,
               allowed_archetype, allowed_personality,
               down_hosts_threshold, down_maint_threshold, max_members,
               member_archetype, member_hostname, member_personality,
               capacity_override, cluster, esx_guest, instance,
               esx_metacluster, service, share, esx_share,
               esx_switch, esx_virtual_machine,
               fullinfo, style, **arguments):

        if esx_share:
            self.deprecated_option("esx_share", "Please use --share instead.",
                                   logger=logger, **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session, buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session, esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session, esx_virtual_machine, compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session, name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session, allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session, archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session, archetype=dbma,
                                          name=member_personality, compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())
Exemple #18
0
    def render(self, session, logger, hostname, machine, archetype,
               buildstatus, personality, host_environment, osname, osversion,
               service, instance, model, machine_type, vendor, serial, cluster,
               guest_on_cluster, guest_on_share, member_cluster_share,
               domain, sandbox, branch, sandbox_owner,
               dns_domain, shortname, mac, ip, networkip, network_environment,
               exact_location, server_of_service, server_of_instance, grn,
               eon_id, fullinfo, style, **arguments):
        dbnet_env = NetworkEnvironment.get_unique_or_default(session,
                                                             network_environment)

        q = session.query(Host)

        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            q = q.filter_by(hardware_entity=dbmachine)

        # Add the machine definition and the primary name. Use aliases to make
        # sure the end result will be ordered by primary name.
        PriDns = aliased(DnsRecord)
        PriFqdn = aliased(Fqdn)
        PriDomain = aliased(DnsDomain)
        q = q.join(HardwareEntity,
                   (PriDns, PriDns.id == Machine.primary_name_id),
                   (PriFqdn, PriDns.fqdn_id == PriFqdn.id),
                   (PriDomain, PriFqdn.dns_domain_id == PriDomain.id))
        q = q.order_by(PriFqdn.name, PriDomain.name)
        q = q.options(contains_eager('hardware_entity'),
                      contains_eager('hardware_entity.primary_name', alias=PriDns),
                      contains_eager('hardware_entity.primary_name.fqdn', alias=PriFqdn),
                      contains_eager('hardware_entity.primary_name.fqdn.dns_domain',
                                     alias=PriDomain))
        q = q.reset_joinpoint()

        # Hardware-specific filters
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter(HardwareEntity.location == dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(HardwareEntity.location_id.in_(childids))

        if model or vendor or machine_type:
            subq = Model.get_matching_query(session, name=model, vendor=vendor,
                                            model_type=machine_type,
                                            compel=True)
            q = q.filter(HardwareEntity.model_id.in_(subq))

        if serial:
            self.deprecated_option("serial",
                                   "Please use search machine --serial instead.",
                                   logger=logger, **arguments)
            q = q.filter(HardwareEntity.serial_no == serial)

        # DNS IP address related filters
        if mac or ip or networkip or hostname or dns_domain or shortname:
            # Inner joins are cheaper than outer joins, so make some effort to
            # use inner joins when possible
            if mac or ip or networkip:
                q = q.join(Interface)
            else:
                q = q.outerjoin(Interface)
            if ip or networkip:
                q = q.join(AddressAssignment, Network, from_joinpoint=True)
            else:
                q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True)

            if mac:
                self.deprecated_option("mac", "Please use search machine "
                                       "--mac instead.", logger=logger,
                                       **arguments)
                q = q.filter(Interface.mac == mac)
            if ip:
                q = q.filter(AddressAssignment.ip == ip)
                q = q.filter(Network.network_environment == dbnet_env)
            if networkip:
                dbnetwork = get_network_byip(session, networkip, dbnet_env)
                q = q.filter(AddressAssignment.network == dbnetwork)

            dbdns_domain = None
            if hostname:
                (shortname, dbdns_domain) = parse_fqdn(session, hostname)
            if dns_domain:
                dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True)

            if shortname or dbdns_domain:
                ARecAlias = aliased(ARecord)
                ARecFqdn = aliased(Fqdn)

                q = q.outerjoin((ARecAlias,
                                 and_(ARecAlias.ip == AddressAssignment.ip,
                                      ARecAlias.network_id == AddressAssignment.network_id)),
                                (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id))
                if shortname:
                    q = q.filter(or_(ARecFqdn.name == shortname,
                                     PriFqdn.name == shortname))
                if dbdns_domain:
                    q = q.filter(or_(ARecFqdn.dns_domain == dbdns_domain,
                                     PriFqdn.dns_domain == dbdns_domain))
            q = q.reset_joinpoint()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if sandbox_owner:
            dbauthor = get_user_principal(session, sandbox_owner)

        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        # Just do the lookup here, filtering will happen later
        if archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        else:
            dbarchetype = None

        if archetype or personality or host_environment:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias)

            if archetype:
                q = q.filter_by(archetype=dbarchetype)
            if personality:
                subq = Personality.get_matching_query(session, name=personality,
                                                      archetype=dbarchetype,
                                                      compel=True)
                q = q.filter(PersAlias.id.in_(subq))
            if host_environment:
                dbhost_env = HostEnvironment.get_instance(session,
                                                          host_environment)
                q = q.filter_by(host_environment=dbhost_env)

            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = HostLifecycle.get_instance(session, buildstatus)
            q = q.filter_by(status=dbbuildstatus)

        if osname or osversion:
            subq = OperatingSystem.get_matching_query(session, name=osname,
                                                      version=osversion,
                                                      archetype=dbarchetype,
                                                      compel=True)
            q = q.filter(Host.operating_system_id.in_(subq))

        if service:
            dbservice = Service.get_unique(session, service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session, service=dbservice,
                                                  name=instance, compel=True)
                q = q.filter(Host.services_used.contains(dbsi))
            else:
                q = q.join('services_used')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('services_used')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if server_of_service:
            dbserver_service = Service.get_unique(session, server_of_service,
                                                  compel=True)
            if server_of_instance:
                dbsi = ServiceInstance.get_unique(session,
                                                  service=dbserver_service,
                                                  name=server_of_instance,
                                                  compel=True)
                q = q.join('services_provided')
                q = q.filter_by(service_instance=dbsi)
                q = q.reset_joinpoint()
            else:
                q = q.join('services_provided', 'service_instance')
                q = q.filter_by(service=dbserver_service)
                q = q.reset_joinpoint()
        elif server_of_instance:
            q = q.join('services_provided', 'service_instance')
            q = q.filter_by(name=server_of_instance)
            q = q.reset_joinpoint()

        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('_cluster', 'cluster', '_metacluster')
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()

        if guest_on_cluster:
            # TODO: this does not handle metaclusters according to Wes
            dbcluster = Cluster.get_unique(session, guest_on_cluster,
                                           compel=True)
            q = q.join(Host.hardware_entity.of_type(Machine),
                       VirtualMachine, ClusterResource)
            q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()

        if guest_on_share:
            v2shares = session.query(Share.id).filter_by(name=guest_on_share)
            if not v2shares.count():
                raise NotFoundException("No shares found with name {0}."
                                        .format(guest_on_share))

            NasAlias = aliased(VirtualNasDisk)
            q = q.join(Host.hardware_entity.of_type(Machine),
                       Disk, (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(v2shares.subquery()))
            q = q.reset_joinpoint()

        if member_cluster_share:
            v2shares = session.query(Share.id).filter_by(name=member_cluster_share)
            if not v2shares.count():
                raise NotFoundException("No shares found with name {0}."
                                        .format(guest_on_share))

            NasAlias = aliased(VirtualNasDisk)
            q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine,
                       'machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(v2shares.subquery()))
            q = q.reset_joinpoint()

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)

            persq = session.query(Personality.id)
            persq = persq.outerjoin(PersonalityGrnMap)
            persq = persq.filter(or_(Personality.owner_eon_id == dbgrn.eon_id,
                                     PersonalityGrnMap.eon_id == dbgrn.eon_id))
            q = q.outerjoin(HostGrnMap)
            q = q.filter(or_(Host.owner_eon_id == dbgrn.eon_id,
                             HostGrnMap.eon_id == dbgrn.eon_id,
                             Host.personality_id.in_(persq.subquery())))
            q = q.reset_joinpoint()

        if fullinfo or style != "raw":
            return q.all()
        return StringAttributeList(q.all(), "fqdn")