Beispiel #1
0
 def render(self, session, logger, switch, type, clear, vlan, **arguments):
     Switch.check_type(type)
     dbswitch = Switch.get_unique(session, switch, compel=True)
     if type is not None and dbswitch.switch_type != type:
         raise ArgumentError("{0} is not a {1} switch.".format(dbswitch,
                                                               type))
     return self.poll(session, logger, [dbswitch], clear, vlan)
Beispiel #2
0
    def render(self, session, logger, switch, label, model, rack, type, ip,
               interface, mac, vendor, serial, comments, **arguments):
        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   machine_type='switch', compel=True)

        dblocation = get_location(session, rack=rack)

        dbdns_rec, newly_created = grab_address(session, switch, ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        if not label:
            label = dbdns_rec.fqdn.name
            try:
                Switch.check_label(label)
            except ArgumentError:
                raise ArgumentError("Could not deduce a valid hardware label "
                                    "from the switch name.  Please specify "
                                    "--label.")

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        dbswitch = Switch(label=label, switch_type=type, location=dblocation,
                          model=dbmodel, serial_no=serial, comments=comments)
        session.add(dbswitch)
        dbswitch.primary_name = dbdns_rec

        # FIXME: get default name from the model
        iftype = "oa"
        if not interface:
            interface = "xge"
            ifcomments = "Created automatically by add_switch"
        else:
            ifcomments = None
            if interface.lower().startswith("lo"):
                iftype = "loopback"

        dbinterface = get_or_create_interface(session, dbswitch,
                                              name=interface, mac=mac,
                                              interface_type=iftype,
                                              comments=ifcomments)
        dbnetwork = get_net_id_from_ip(session, ip)
        # TODO: should we call check_ip_restrictions() here?
        assign_address(dbinterface, ip, dbnetwork)

        session.flush()

        plenary = PlenarySwitch(dbswitch, logger=logger)
        with plenary.get_write_key() as key:
            plenary.stash()
            try:
                plenary.write(locked=True)
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbswitch, None)
                dsdb_runner.commit_or_rollback("Could not add switch to DSDB")
            except:
                plenary.restore_stash()
                raise

        return
Beispiel #3
0
 def render(self, session, logger, rack, type, clear, vlan, **arguments):
     dblocation = get_location(session, rack=rack)
     Switch.check_type(type)
     q = session.query(Switch)
     q = q.filter_by(location=dblocation)
     if type:
         q = q.filter_by(switch_type=type)
     switches = q.all()
     if not switches:
         raise NotFoundException("No switch found.")
     return self.poll(session, logger, switches, clear, vlan)
Beispiel #4
0
 def render(self, session, logger, rack, type, clear, vlan, **arguments):
     dblocation = get_location(session, rack=rack)
     Switch.check_type(type)
     q = session.query(Switch)
     q = q.filter_by(location=dblocation)
     if type:
         q = q.filter_by(switch_type=type)
     switches = q.all()
     if not switches:
         raise NotFoundException("No switch found.")
     return self.poll(session, logger, switches, clear, vlan)
    def render(self, session, logger, interface, switch, mac, comments,
               rename_to, **arguments):
        for arg in self.invalid_parameters:
            if arguments.get(arg) is not None:
                raise UnimplementedError(
                    "update_interface --switch cannot use "
                    "the --%s option." % arg)

        dbswitch = Switch.get_unique(session, switch, compel=True)
        dbinterface = Interface.get_unique(session,
                                           hardware_entity=dbswitch,
                                           name=interface,
                                           compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if comments:
            dbinterface.comments = comments
        if mac:
            dbinterface.mac = mac
        if rename_to:
            rename_interface(session, dbinterface, rename_to)

        session.flush()

        dsdb_runner = DSDBRunner(logger=logger)
        dsdb_runner.update_host(dbswitch, oldinfo)
        dsdb_runner.commit_or_rollback("Could not update switch in DSDB")

        return
    def render(self, session, logger, interface, switch, mac, comments,
               rename_to, **arguments):
        for arg in self.invalid_parameters:
            if arguments.get(arg) is not None:
                raise UnimplementedError("update_interface --switch cannot use "
                                         "the --%s option." % arg)

        dbswitch = Switch.get_unique(session, switch, compel=True)
        dbinterface = Interface.get_unique(session, hardware_entity=dbswitch,
                                           name=interface, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if comments:
            dbinterface.comments = comments
        if mac:
            dbinterface.mac = mac
        if rename_to:
            rename_interface(session, dbinterface, rename_to)

        session.flush()

        dsdb_runner = DSDBRunner(logger=logger)
        dsdb_runner.update_host(dbswitch, oldinfo)
        dsdb_runner.commit_or_rollback("Could not update switch in DSDB")

        return
Beispiel #7
0
    def render(self, session, logger, interface, switch, mac, type, comments,
               **arguments):
        if type and type not in self.valid_interface_types:
            raise ArgumentError("Interface type %s is not allowed for "
                                "switches." % type)

        if not type:
            if interface.lower().startswith("lo"):
                type = "loopback"
            else:
                type = "oa"

        for arg in self.invalid_parameters:
            if arguments.get(arg) is not None:
                raise ArgumentError("Cannot use argument --%s when adding an "
                                    "interface to a switch." % arg)

        dbswitch = Switch.get_unique(session, switch, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        dbinterface = get_or_create_interface(session, dbswitch,
                                              name=interface, mac=mac,
                                              interface_type=type,
                                              comments=comments, preclude=True)

        session.flush()

        dsdb_runner = DSDBRunner(logger=logger)
        dsdb_runner.update_host(dbswitch, oldinfo)
        dsdb_runner.commit_or_rollback("Could not update switch in DSDB")

        return
Beispiel #8
0
    def render(self, generate, session, logger, switch, **kwargs):
        dbswitch = Switch.get_unique(session, switch, compel=True)
        plenary_info = PlenarySwitch(dbswitch, logger=logger)

        if generate:
            return plenary_info._generate_content()
        else:
            return plenary_info.read()
Beispiel #9
0
    def render(self, session, logger, switch, **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        # Check and complain if the switch has any other addresses than its
        # primary address
        addrs = []
        for addr in dbswitch.all_addresses():
            if addr.ip == dbswitch.primary_ip:
                continue
            addrs.append(str(addr.ip))
        if addrs:
            raise ArgumentError("{0} still provides the following addresses, "
                                "delete them first: {1}.".format
                                (dbswitch, ", ".join(addrs)))

        dbdns_rec = dbswitch.primary_name
        ip = dbswitch.primary_ip
        old_fqdn = str(dbswitch.primary_name.fqdn)
        old_comments = dbswitch.comments
        session.delete(dbswitch)
        if dbdns_rec:
            delete_dns_record(dbdns_rec)

        session.flush()

        # Any switch ports hanging off this switch should be deleted with
        # the cascade delete of the switch.

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        # clusters connected to this switch
        plenaries = PlenaryCollection(logger=logger)

        for dbcluster in dbswitch.esx_clusters:
            plenaries.append(Plenary.get_plenary(dbcluster))

        key = CompileKey.merge([switch_plenary.get_remove_key(),
                                plenaries.get_write_key()])

        try:
            lock_queue.acquire(key)
            switch_plenary.stash()
            plenaries.write(locked=True)
            switch_plenary.remove(locked=True)

            if ip:
                dsdb_runner = DSDBRunner(logger=logger)
                # FIXME: restore interface name/MAC on rollback
                dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments)
                dsdb_runner.commit_or_rollback("Could not remove switch from DSDB")
            return

        except:
            plenaries.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Beispiel #10
0
    def render(self, session, logger, switch, port, mac,
               **arguments):
        q = session.query(ObservedMac)
        if switch:
            dbswitch = Switch.get_unique(session, switch, compel=True)
            q = q.filter_by(switch=dbswitch)
        if arguments.get("port_number", None):
            self.deprecated_option("port_number", "Please use --port instead.",
                                   logger=logger, **arguments)
            port = str(arguments["port_number"])

        if port is not None:
            q = q.filter_by(port=port)
        if mac:
            q = q.filter_by(mac_address=mac)
        return q.order_by(ObservedMac.mac_address).all()
Beispiel #11
0
    def render(self, session, logger, switch, port, mac, **arguments):
        q = session.query(ObservedMac)
        if switch:
            dbswitch = Switch.get_unique(session, switch, compel=True)
            q = q.filter_by(switch=dbswitch)
        if arguments.get("port_number", None):
            self.deprecated_option("port_number",
                                   "Please use --port instead.",
                                   logger=logger,
                                   **arguments)
            port = str(arguments["port_number"])

        if port is not None:
            q = q.filter_by(port=port)
        if mac:
            q = q.filter_by(mac_address=mac)
        return q.order_by(ObservedMac.mac_address).all()
Beispiel #12
0
 def render(self, session, logger, switch, discover, **arguments):
     if discover:
         options = []
     else:
         options = [subqueryload('observed_vlans'),
                    undefer('observed_vlans.creation_date'),
                    joinedload('observed_vlans.network'),
                    subqueryload('observed_macs'),
                    undefer('observed_macs.creation_date')]
     dbswitch = Switch.get_unique(session, switch, compel=True,
                                  query_options=options)
     if discover:
         results = discover_switch(session, logger, self.config, dbswitch,
                                   True)
         return StringList(results)
     else:
         return dbswitch
Beispiel #13
0
    def render(self, session, switch, type, vlan, fullinfo, **arguments):
        q = search_hardware_entity_query(session,
                                         hardware_type=Switch,
                                         **arguments)
        if type:
            q = q.filter_by(switch_type=type)
        if switch:
            dbswitch = Switch.get_unique(session, switch, compel=True)
            q = q.filter_by(id=dbswitch.id)

        if vlan:
            q = q.join("observed_vlans", "vlan").filter_by(vlan_id=vlan)
            q = q.reset_joinpoint()

        # Prefer the primary name for ordering
        q = q.outerjoin(DnsRecord, (Fqdn, DnsRecord.fqdn_id == Fqdn.id),
                        DnsDomain)
        q = q.options(contains_eager('primary_name'),
                      contains_eager('primary_name.fqdn'),
                      contains_eager('primary_name.fqdn.dns_domain'))
        q = q.reset_joinpoint()
        q = q.order_by(Fqdn.name, DnsDomain.name, Switch.label)

        if fullinfo:
            q = q.options(
                joinedload('location'),
                subqueryload('interfaces'),
                joinedload('interfaces.assignments'),
                joinedload('interfaces.assignments.dns_records'),
                joinedload('interfaces.assignments.network'),
                subqueryload('observed_macs'),
                undefer('observed_macs.creation_date'),
                subqueryload('observed_vlans'),
                undefer('observed_vlans.creation_date'),
                joinedload('observed_vlans.network'),
                subqueryload('model'),
                # Switches don't have machine specs, but the formatter
                # checks for their existence anyway
                joinedload('model.machine_specs'))
            return q.all()
        return SimpleSwitchList(q.all())
Beispiel #14
0
    def render(self, session, switch, type, vlan, fullinfo, **arguments):
        q = search_hardware_entity_query(session, hardware_type=Switch,
                                         **arguments)
        if type:
            q = q.filter_by(switch_type=type)
        if switch:
            dbswitch = Switch.get_unique(session, switch, compel=True)
            q = q.filter_by(id=dbswitch.id)

        if vlan:
            q = q.join("observed_vlans", "vlan").filter_by(vlan_id=vlan)
            q = q.reset_joinpoint()

        # Prefer the primary name for ordering
        q = q.outerjoin(DnsRecord, (Fqdn, DnsRecord.fqdn_id == Fqdn.id),
                        DnsDomain)
        q = q.options(contains_eager('primary_name'),
                      contains_eager('primary_name.fqdn'),
                      contains_eager('primary_name.fqdn.dns_domain'))
        q = q.reset_joinpoint()
        q = q.order_by(Fqdn.name, DnsDomain.name, Switch.label)

        if fullinfo:
            q = q.options(joinedload('location'),
                          subqueryload('interfaces'),
                          joinedload('interfaces.assignments'),
                          joinedload('interfaces.assignments.dns_records'),
                          joinedload('interfaces.assignments.network'),
                          subqueryload('observed_macs'),
                          undefer('observed_macs.creation_date'),
                          subqueryload('observed_vlans'),
                          undefer('observed_vlans.creation_date'),
                          joinedload('observed_vlans.network'),
                          subqueryload('model'),
                          # Switches don't have machine specs, but the formatter
                          # checks for their existence anyway
                          joinedload('model.machine_specs'))
            return q.all()
        return SimpleSwitchList(q.all())
Beispiel #15
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_basic("cluster", cluster)
        dbpersonality = Personality.get_unique(session, name=personality,
                                               archetype=archetype, compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_unique(session, buildstatus,
                                               compel=True)

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError("Adding clusters to {0:l} is not allowed."
                                .format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section, "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype, "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {'name': cluster,
              'location_constraint': dbloc,
              'personality': dbpersonality,
              'max_hosts': max_members,
              'branch': dbbranch,
              'sandbox_author': dbauthor,
              'down_hosts_threshold': dht,
              'down_hosts_percent': down_hosts_pct,
              'status': dbstatus,
              'comments': comments}

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'switch'):
            kw['switch'] = Switch.get_unique(session, switch, compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.validate_membership(dbcluster)
            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()
        session.refresh(dbcluster)

        plenaries.append(Plenary.get_plenary(dbcluster))

        key = plenaries.get_write_key()

        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Beispiel #16
0
    def render(self, session, logger, cluster, personality,
               max_members, fix_location, down_hosts_threshold,
               maint_threshold, comments,
               # ESX specific options
               switch, memory_capacity, clear_overrides, vm_to_host_ratio,
               **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster."
                                % format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count, host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger,
                                          dbcluster, fix_location,
                                          plenaries, remove_plenaries,
                                          **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session, name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError("%s has %d hosts bound, which exceeds "
                                    "the requested limit %d." %
                                    (format(dbcluster), current_members,
                                     max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Beispiel #17
0
    def render(self, session, logger, cluster, archetype, personality, domain,
               sandbox, max_members, down_hosts_threshold, maint_threshold,
               buildstatus, comments, vm_to_host_ratio, switch, metacluster,
               **arguments):

        validate_basic("cluster", cluster)
        dbpersonality = Personality.get_unique(session,
                                               name=personality,
                                               archetype=archetype,
                                               compel=True)
        if not dbpersonality.is_cluster:
            raise ArgumentError("%s is not a cluster personality." %
                                personality)

        ctype = dbpersonality.archetype.cluster_type
        section = "archetype_" + dbpersonality.archetype.name

        if not buildstatus:
            buildstatus = "build"
        dbstatus = ClusterLifecycle.get_unique(session,
                                               buildstatus,
                                               compel=True)

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     compel=True)

        if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
            raise ArgumentError(
                "Adding clusters to {0:l} is not allowed.".format(dbbranch))

        dbloc = get_location(session, **arguments)
        if not dbloc:
            raise ArgumentError("Adding a cluster requires a location "
                                "constraint.")
        if not dbloc.campus:
            raise ArgumentError("{0} is not within a campus.".format(dbloc))

        if max_members is None:
            if self.config.has_option(section, "max_members_default"):
                max_members = self.config.getint(section,
                                                 "max_members_default")

        Cluster.get_unique(session, cluster, preclude=True)
        # Not finding the cluster type is an internal consistency issue, so make
        # that show up in the logs by using AquilonError
        clus_type = Cluster.polymorphic_subclass(ctype,
                                                 "Unknown cluster type",
                                                 error=AquilonError)

        (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold)

        kw = {
            'name': cluster,
            'location_constraint': dbloc,
            'personality': dbpersonality,
            'max_hosts': max_members,
            'branch': dbbranch,
            'sandbox_author': dbauthor,
            'down_hosts_threshold': dht,
            'down_hosts_percent': down_hosts_pct,
            'status': dbstatus,
            'comments': comments
        }

        if ctype == 'esx':
            if vm_to_host_ratio is None:
                if self.config.has_option(section, "vm_to_host_ratio"):
                    vm_to_host_ratio = self.config.get(section,
                                                       "vm_to_host_ratio")
                else:
                    vm_to_host_ratio = "1:1"
            (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                                 vm_to_host_ratio)
            kw["vm_count"] = vm_count
            kw["host_count"] = host_count

        if switch and hasattr(clus_type, 'switch'):
            kw['switch'] = Switch.get_unique(session, switch, compel=True)

        if maint_threshold is not None:
            (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold)
            kw['down_maint_threshold'] = dht
            kw['down_maint_percent'] = down_hosts_pct

        dbcluster = clus_type(**kw)

        plenaries = PlenaryCollection(logger=logger)

        if metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   metacluster,
                                                   compel=True)

            dbmetacluster.validate_membership(dbcluster)
            dbmetacluster.members.append(dbcluster)

            plenaries.append(Plenary.get_plenary(dbmetacluster))

        session.add(dbcluster)
        session.flush()
        session.refresh(dbcluster)

        plenaries.append(Plenary.get_plenary(dbcluster))

        key = plenaries.get_write_key()

        try:
            lock_queue.acquire(key)
            plenaries.write(locked=True)
        except:
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)
Beispiel #18
0
    def render(self, session, logger, switch, model, rack, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments, **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if discover:
            discover_switch(session, logger, self.config, dbswitch, False)

        if vendor and not model:
            model = dbswitch.model.name
        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       machine_type='switch', compel=True)
            dbswitch.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbswitch.location = dblocation

        if serial is not None:
            dbswitch.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            Switch.check_type(type)
            dbswitch.switch_type = type

        if ip:
            update_primary_ip(session, dbswitch, ip)

        if comments is not None:
            dbswitch.comments = comments

        remove_plenary = None
        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases:
                raise ArgumentError("The switch has aliases and it cannot be "
                                    "renamed. Please remove all aliases first.")
            remove_plenary = Plenary.get_plenary(dbswitch, logger=logger)
            rename_hardware(session, dbswitch, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(switch=dbswitch).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbswitch, port, macaddr, now)

        session.flush()

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        key = switch_plenary.get_write_key()
        if remove_plenary:
            key = CompileKey.merge([key, remove_plenary.get_remove_key()])
        try:
            lock_queue.acquire(key)
            if remove_plenary:
                remove_plenary.stash()
                remove_plenary.remove(locked=True)
            switch_plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbswitch, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update switch in DSDB")
        except:
            if remove_plenary:
                remove_plenary.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Beispiel #19
0
    def render(self, session, logger,
               # search_cluster
               archetype, cluster_type, personality,
               domain, sandbox, branch, buildstatus,
               allowed_archetype, allowed_personality,
               down_hosts_threshold, down_maint_threshold, max_members,
               member_archetype, member_hostname, member_personality,
               capacity_override, cluster, esx_guest, instance,
               esx_metacluster, service, share, esx_share,
               esx_switch, esx_virtual_machine,
               fullinfo, style, **arguments):

        if esx_share:
            self.deprecated_option("esx_share", "Please use --share instead.",
                                   logger=logger, **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session, buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session, esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session, esx_virtual_machine, compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session, name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session, allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session, archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session, archetype=dbma,
                                          name=member_personality, compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())
Beispiel #20
0
    def render(self, session, logger, switch, model, rack, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments,
               **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if discover:
            discover_switch(session, logger, self.config, dbswitch, False)

        if vendor and not model:
            model = dbswitch.model.name
        if model:
            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       machine_type='switch',
                                       compel=True)
            dbswitch.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbswitch.location = dblocation

        if serial is not None:
            dbswitch.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            Switch.check_type(type)
            dbswitch.switch_type = type

        if ip:
            update_primary_ip(session, dbswitch, ip)

        if comments is not None:
            dbswitch.comments = comments

        remove_plenary = None
        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases:
                raise ArgumentError(
                    "The switch has aliases and it cannot be "
                    "renamed. Please remove all aliases first.")
            remove_plenary = Plenary.get_plenary(dbswitch, logger=logger)
            rename_hardware(session, dbswitch, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(switch=dbswitch).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbswitch, port, macaddr,
                                              now)

        session.flush()

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        key = switch_plenary.get_write_key()
        if remove_plenary:
            key = CompileKey.merge([key, remove_plenary.get_remove_key()])
        try:
            lock_queue.acquire(key)
            if remove_plenary:
                remove_plenary.stash()
                remove_plenary.remove(locked=True)
            switch_plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbswitch, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update switch in DSDB")
        except:
            if remove_plenary:
                remove_plenary.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Beispiel #21
0
    def render(self, session, logger, interface, machine, switch, chassis, mac,
               user, **arguments):

        if not (machine or switch or chassis or mac):
            raise ArgumentError("Please specify at least one of --chassis, "
                                "--machine, --switch or --mac.")

        if machine:
            dbhw_ent = Machine.get_unique(session, machine, compel=True)
        elif switch:
            dbhw_ent = Switch.get_unique(session, switch, compel=True)
        elif chassis:
            dbhw_ent = Chassis.get_unique(session, chassis, compel=True)
        else:
            dbhw_ent = None

        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                           name=interface, mac=mac, compel=True)
        if not dbhw_ent:
            dbhw_ent = dbinterface.hardware_entity

        if dbinterface.vlans:
            vlans = ", ".join([iface.name for iface in
                               dbinterface.vlans.values()])
            raise ArgumentError("{0} is the parent of the following VLAN "
                                "interfaces, delete them first: "
                                "{1}.".format(dbinterface, vlans))

        if dbinterface.slaves:
            slaves = ", ".join([iface.name for iface in dbinterface.slaves])
            raise ArgumentError("{0} is the master of the following slave "
                                "interfaces, delete them first: "
                                "{1}.".format(dbinterface, slaves))

        try:
            for addr in dbinterface.assignments:
                if addr.ip != dbhw_ent.primary_ip:
                    continue

                # Special handling: if this interface was created automatically,
                # and there is exactly one other interface with no IP address,
                # then re-assign the primary address to that interface
                if not dbinterface.mac and dbinterface.comments is not None and \
                   dbinterface.comments.startswith("Created automatically") and \
                   len(dbhw_ent.interfaces) == 2:
                    if dbinterface == dbhw_ent.interfaces[0]:
                        other = dbhw_ent.interfaces[1]
                    else:
                        other = dbhw_ent.interfaces[0]

                    if len(other.assignments) == 0:
                        assign_address(other, dbhw_ent.primary_ip,
                                       dbhw_ent.primary_name.network)
                        dbinterface.addresses.remove(dbhw_ent.primary_ip)
                        raise _Goto

                # If this is a machine, it is possible to delete the host to get rid
                # of the primary name
                if dbhw_ent.hardware_type == "machine":
                    msg = "  You should delete the host first."
                else:
                    msg = ""

                raise ArgumentError("{0} holds the primary address of the {1:cl}, "
                                    "therefore it cannot be deleted."
                                    "{2}".format(dbinterface, dbhw_ent, msg))
        except _Goto:
            pass

        addrs = ", ".join(["%s: %s" % (addr.logical_name, addr.ip) for addr in
                           dbinterface.assignments])
        if addrs:
            raise ArgumentError("{0} still has the following addresses "
                                "configured, delete them first: "
                                "{1}.".format(dbinterface, addrs))

        dbhw_ent.interfaces.remove(dbinterface)
        session.flush()

        if dbhw_ent.hardware_type == 'machine':
            plenary_info = PlenaryMachineInfo(dbhw_ent, logger=logger)
            plenary_info.write()
        return
Beispiel #22
0
    def render(
            self,
            session,
            logger,
            cluster,
            personality,
            max_members,
            fix_location,
            down_hosts_threshold,
            maint_threshold,
            comments,
            # ESX specific options
            switch,
            memory_capacity,
            clear_overrides,
            vm_to_host_ratio,
            **arguments):

        dbcluster = Cluster.get_unique(session, cluster, compel=True)

        if dbcluster.cluster_type == 'meta':
            raise ArgumentError("%s should not be a metacluster." %
                                format(dbcluster))

        cluster_updated = False
        remove_plenaries = PlenaryCollection(logger=logger)
        plenaries = PlenaryCollection(logger=logger)

        (vm_count, host_count) = force_ratio("vm_to_host_ratio",
                                             vm_to_host_ratio)
        if down_hosts_threshold is not None:
            (perc, dht) = Cluster.parse_threshold(down_hosts_threshold)
            dbcluster.down_hosts_threshold = dht
            dbcluster.down_hosts_percent = perc
            cluster_updated = True

        if dbcluster.cluster_type == "esx":
            if vm_count is not None or down_hosts_threshold is not None:
                if vm_count is None:
                    vm_count = dbcluster.vm_count
                    host_count = dbcluster.host_count

                dht = dbcluster.down_hosts_threshold
                perc = dbcluster.down_hosts_percent

                dbcluster.validate(vm_part=vm_count,
                                   host_part=host_count,
                                   down_hosts_threshold=dht,
                                   down_hosts_percent=perc)

                dbcluster.vm_count = vm_count
                dbcluster.host_count = host_count
                cluster_updated = True

        if switch is not None:
            if switch:
                # FIXME: Verify that any hosts are on the same network
                dbswitch = Switch.get_unique(session, switch, compel=True)
                plenaries.append(Plenary.get_plenary(dbswitch))
            else:
                dbswitch = None
            dbcluster.switch = dbswitch
            cluster_updated = True

        if memory_capacity is not None:
            dbcluster.memory_capacity = memory_capacity
            dbcluster.validate()
            cluster_updated = True

        if clear_overrides is not None:
            dbcluster.memory_capacity = None
            dbcluster.validate()
            cluster_updated = True

        location_updated = update_cluster_location(session, logger, dbcluster,
                                                   fix_location, plenaries,
                                                   remove_plenaries,
                                                   **arguments)

        if location_updated:
            cluster_updated = True

        if personality:
            archetype = dbcluster.personality.archetype.name
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            if not dbpersonality.is_cluster:
                raise ArgumentError("Personality {0} is not a cluster " +
                                    "personality".format(dbpersonality))
            dbcluster.personality = dbpersonality
            cluster_updated = True

        if max_members is not None:
            current_members = len(dbcluster.hosts)
            if max_members < current_members:
                raise ArgumentError(
                    "%s has %d hosts bound, which exceeds "
                    "the requested limit %d." %
                    (format(dbcluster), current_members, max_members))
            dbcluster.max_hosts = max_members
            cluster_updated = True

        if comments is not None:
            dbcluster.comments = comments
            cluster_updated = True

        if down_hosts_threshold is not None:
            (dbcluster.down_hosts_percent,
             dbcluster.down_hosts_threshold) = \
                Cluster.parse_threshold(down_hosts_threshold)
            cluster_updated = True

        if maint_threshold is not None:
            (dbcluster.down_maint_percent,
             dbcluster.down_maint_threshold) = \
                Cluster.parse_threshold(maint_threshold)
            cluster_updated = True

        if not cluster_updated:
            return

        session.add(dbcluster)
        session.flush()

        plenaries.append(Plenary.get_plenary(dbcluster))
        key = CompileKey.merge(
            [plenaries.get_write_key(),
             remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)
        except:
            remove_plenaries.restore_stash()
            plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Beispiel #23
0
    def render(
            self,
            session,
            logger,
            # search_cluster
            archetype,
            cluster_type,
            personality,
            domain,
            sandbox,
            branch,
            buildstatus,
            allowed_archetype,
            allowed_personality,
            down_hosts_threshold,
            down_maint_threshold,
            max_members,
            member_archetype,
            member_hostname,
            member_personality,
            capacity_override,
            cluster,
            esx_guest,
            instance,
            esx_metacluster,
            service,
            share,
            esx_share,
            esx_switch,
            esx_virtual_machine,
            fullinfo,
            style,
            **arguments):

        if esx_share:
            self.deprecated_option("esx_share",
                                   "Please use --share instead.",
                                   logger=logger,
                                   **arguments)
            share = esx_share

        if cluster_type == 'esx':
            cls = EsxCluster
        else:
            cls = Cluster

        # Don't load full objects if we only want to show their name
        if fullinfo or style != 'raw':
            q = session.query(cls)
        else:
            q = session.query(cls.name)

        # The ORM automatically de-duplicates the result if we query full
        # objects, but not when we query just the names. Tell the DB to do so.
        q = q.distinct()

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = ClusterLifecycle.get_unique(session,
                                                        buildstatus,
                                                        compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if cluster_type:
            q = q.filter_by(cluster_type=cluster_type)

        # Go through the arguments and make special dicts for each
        # specific set of location arguments that are stripped of the
        # given prefix.
        location_args = {'cluster_': {}, 'member_': {}}
        for prefix in location_args.keys():
            for (k, v) in arguments.items():
                if k.startswith(prefix):
                    # arguments['cluster_building'] = 'dd'
                    # becomes
                    # location_args['cluster_']['building'] = 'dd'
                    location_args[prefix][k.replace(prefix, '')] = v

        dblocation = get_location(session, **location_args['cluster_'])
        if dblocation:
            if location_args['cluster_']['exact_location']:
                q = q.filter_by(location_constraint=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Cluster.location_constraint_id.in_(childids))
        dblocation = get_location(session, **location_args['member_'])
        if dblocation:
            q = q.join('_hosts', 'host', 'machine')
            if location_args['member_']['exact_location']:
                q = q.filter_by(location=dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))
            q = q.reset_joinpoint()

        # esx stuff
        if cluster:
            q = q.filter_by(name=cluster)
        if esx_metacluster:
            dbmetacluster = MetaCluster.get_unique(session,
                                                   esx_metacluster,
                                                   compel=True)
            q = q.join('_metacluster')
            q = q.filter_by(metacluster=dbmetacluster)
            q = q.reset_joinpoint()
        if esx_virtual_machine:
            dbvm = Machine.get_unique(session,
                                      esx_virtual_machine,
                                      compel=True)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine)
            q = q.filter_by(machine=dbvm)
            q = q.reset_joinpoint()
        if esx_guest:
            dbguest = hostname_to_host(session, esx_guest)
            # TODO: support VMs inside resource groups?
            q = q.join(ClusterResource, VirtualMachine, Machine)
            q = q.filter_by(host=dbguest)
            q = q.reset_joinpoint()
        if capacity_override:
            q = q.filter(EsxCluster.memory_capacity != None)
        if esx_switch:
            dbswitch = Switch.get_unique(session, esx_switch, compel=True)
            q = q.filter_by(switch=dbswitch)

        if service:
            dbservice = Service.get_unique(session, name=service, compel=True)
            if instance:
                dbsi = ServiceInstance.get_unique(session,
                                                  name=instance,
                                                  service=dbservice,
                                                  compel=True)
                q = q.filter(Cluster.service_bindings.contains(dbsi))
            else:
                q = q.join('service_bindings')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('service_bindings')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if share:
            # Perform sanity check on the share name
            q2 = session.query(Share)
            q2 = q2.filter_by(name=share)
            if not q2.first():
                raise NotFoundException("Share %s not found." % share)

            CR = aliased(ClusterResource)
            S1 = aliased(Share)
            S2 = aliased(Share)
            RG = aliased(ResourceGroup)
            BR = aliased(BundleResource)
            q = q.join(CR)
            q = q.outerjoin((S1, S1.holder_id == CR.id))
            q = q.outerjoin((RG, RG.holder_id == CR.id),
                            (BR, BR.resourcegroup_id == RG.id),
                            (S2, S2.holder_id == BR.id))
            q = q.filter(or_(S1.name == share, S2.name == share))
            q = q.reset_joinpoint()

        if max_members:
            q = q.filter_by(max_hosts=max_members)

        if down_hosts_threshold:
            (pct, dht) = Cluster.parse_threshold(down_hosts_threshold)
            q = q.filter_by(down_hosts_percent=pct)
            q = q.filter_by(down_hosts_threshold=dht)

        if down_maint_threshold:
            (pct, dmt) = Cluster.parse_threshold(down_maint_threshold)
            q = q.filter_by(down_maint_percent=pct)
            q = q.filter_by(down_maint_threshold=dmt)

        if allowed_archetype:
            # Added to the searches as appropriate below.
            dbaa = Archetype.get_unique(session,
                                        allowed_archetype,
                                        compel=True)
        if allowed_personality and allowed_archetype:
            dbap = Personality.get_unique(session,
                                          archetype=dbaa,
                                          name=allowed_personality,
                                          compel=True)
            q = q.filter(Cluster.allowed_personalities.contains(dbap))
        elif allowed_personality:
            q = q.join('allowed_personalities')
            q = q.filter_by(name=allowed_personality)
            q = q.reset_joinpoint()
        elif allowed_archetype:
            q = q.join('allowed_personalities')
            q = q.filter_by(archetype=dbaa)
            q = q.reset_joinpoint()

        if member_hostname:
            dbhost = hostname_to_host(session, member_hostname)
            q = q.join('_hosts')
            q = q.filter_by(host=dbhost)
            q = q.reset_joinpoint()

        if member_archetype:
            # Added to the searches as appropriate below.
            dbma = Archetype.get_unique(session, member_archetype, compel=True)
        if member_personality and member_archetype:
            q = q.join('_hosts', 'host')
            dbmp = Personality.get_unique(session,
                                          archetype=dbma,
                                          name=member_personality,
                                          compel=True)
            q = q.filter_by(personality=dbmp)
            q = q.reset_joinpoint()
        elif member_personality:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(name=member_personality)
            q = q.reset_joinpoint()
        elif member_archetype:
            q = q.join('_hosts', 'host', 'personality')
            q = q.filter_by(archetype=dbma)
            q = q.reset_joinpoint()

        if cluster_type == 'esx':
            q = q.order_by(EsxCluster.name)
        else:
            q = q.order_by(Cluster.name)

        if fullinfo:
            return q.all()
        return SimpleClusterList(q.all())