def render(self, session, logger, service, need_client_list, comments, **arguments): Service.get_unique(session, service, preclude=True) dbservice = Service(name=service, comments=comments, need_client_list=need_client_list) session.add(dbservice) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbservice)) session.flush() plenaries.write() return
def render(self, session, logger, hostname, service, instance, **arguments): dbhost = hostname_to_host(session, hostname) dbservice = Service.get_unique(session, service, compel=True) dbinstance = get_service_instance(session, dbservice, instance) if dbhost in dbinstance.server_hosts: # FIXME: This should just be a warning. There is currently # no way of returning output that would "do the right thing" # on the client but still show status 200 (OK). # The right thing would generally be writing to stderr for # a CLI (either raw or csv), and some sort of generic error # page for a web client. raise ArgumentError("Server %s is already bound to service %s " "instance %s." % (hostname, service, instance)) # The ordering_list will manage the position for us dbinstance.server_hosts.append(dbhost) session.flush() plenary_info = Plenary.get_plenary(dbinstance, logger=logger) plenary_info.write() # XXX: Need to recompile... return
def render(self, session, logger, hostname, service, instance, **arguments): dbhost = hostname_to_host(session, hostname) dbservice = Service.get_unique(session, service, compel=True) msg = "Service %s" % service if instance: dbinstances = [get_service_instance(session, dbservice, instance)] msg = "Service %s, instance %s" % (service, instance) else: q = session.query(ServiceInstance) q = q.filter_by(service=dbservice) q = q.join('servers') q = q.filter_by(host=dbhost) dbinstances = q.all() for dbinstance in dbinstances: if dbhost in dbinstance.server_hosts: if (dbinstance.client_count > 0 and len(dbinstance.server_hosts) <= 1): logger.warning("WARNING: Server %s, is the last server " "bound to %s which still has clients" % (hostname, msg)) dbinstance.server_hosts.remove(dbhost) session.expire(dbhost, ['_services_provided']) session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbhost)) for dbinstance in dbinstances: plenaries.append(Plenary.get_plenary(dbinstance)) plenaries.write() return
def render(self, session, logger, service, instance, comments, **arguments): dbservice = session.query(Service).filter_by(name=service).first() if dbservice and instance is None: raise ArgumentError("Service %s already exists." % dbservice.name) if not dbservice: # "add_service --service foo --comments blah" should add the comments # to Service, # "add_service --service foo --instance bar --comments blah" should # add the comments to ServiceInstance if instance: srvcomments = None else: srvcomments = comments dbservice = Service(name=service, comments=srvcomments) session.add(dbservice) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbservice)) if instance: ServiceInstance.get_unique(session, service=dbservice, name=instance, preclude=True) dbsi = ServiceInstance(service=dbservice, name=instance, comments=comments) session.add(dbsi) plenaries.append(Plenary.get_plenary(dbsi)) session.flush() plenaries.write() return
def render(self, session, logger, service, **arguments): dbservice = Service.get_unique(session, service, compel=True) if dbservice.archetypes: msg = ", ".join( [archetype.name for archetype in dbservice.archetypes]) raise ArgumentError( "Service %s is still required by the following " "archetypes: %s." % (dbservice.name, msg)) if dbservice.personalities: msg = ", ".join([ "%s (%s)" % (personality.name, personality.archetype.name) for personality in dbservice.personalities ]) raise ArgumentError( "Service %s is still required by the following " "personalities: %s." % (dbservice.name, msg)) if dbservice.instances: raise ArgumentError("Service %s still has instances defined and " "cannot be deleted." % dbservice.name) session.delete(dbservice) session.flush() plenary_info = PlenaryService(dbservice, logger=logger) plenary_info.remove() return
def add_service(sess, name): ''' reusable add service code for other tests ''' svc = sess.query(Service).filter_by(name=name).first() if not svc: svc = Service(name=name) create(sess, svc) return svc
def render(self, session, logger, service, instance, position, hostname, cluster, ip, resourcegroup, service_address, alias, **arguments): dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = ServiceInstance.get_unique(session, service=dbservice, name=instance, compel=True) dbinstances = [dbsi] else: # --position for multiple service instances sounds dangerous, so # disallow it until a real usecase emerges if position: raise ArgumentError("The --position option can only be " "specified for one service instance.") q = session.query(ServiceInstance) q = q.filter_by(service=dbservice) dbinstances = q.all() plenaries = PlenaryCollection(logger=logger) if position is not None: params = None else: params = lookup_target(session, plenaries, hostname, ip, cluster, resourcegroup, service_address, alias) for dbinstance in dbinstances: if position is not None: if position < 0 or position >= len(dbinstance.servers): raise ArgumentError("Invalid server position.") dbsrv = dbinstance.servers[position] if dbsrv.host: plenaries.append(Plenary.get_plenary(dbsrv.host)) if dbsrv.cluster: plenaries.append(Plenary.get_plenary(dbsrv.cluster)) else: dbsrv = find_server(dbinstance, params) if not dbsrv: if instance: raise NotFoundException("No such server binding.") continue plenaries.append(Plenary.get_plenary(dbinstance)) if dbsrv.host: session.expire(dbsrv.host, ['services_provided']) if dbsrv.cluster: session.expire(dbsrv.cluster, ['services_provided']) dbinstance.servers.remove(dbsrv) if dbinstance.client_count > 0 and not dbinstance.servers: logger.warning("Warning: {0} was left without servers, " "but it still has clients.".format(dbinstance)) session.flush() plenaries.write() return
def render(self, session, logger, service, **arguments): dbservice = Service.get_unique(session, service, compel=True) if dbservice.archetypes: msg = ", ".join([archetype.name for archetype in dbservice.archetypes]) raise ArgumentError("Service %s is still required by the following " "archetypes: %s." % (dbservice.name, msg)) if dbservice.personalities: msg = ", ".join(["%s (%s)" % (personality.name, personality.archetype.name) for personality in dbservice.personalities]) raise ArgumentError("Service %s is still required by the following " "personalities: %s." % (dbservice.name, msg)) if dbservice.instances: raise ArgumentError("Service %s still has instances defined and " "cannot be deleted." % dbservice.name) session.delete(dbservice) session.flush() plenary_info = Plenary.get_plenary(dbservice, logger=logger) plenary_info.remove() return
def render(self, session, service, server, client, **arguments): instance = arguments.get("instance", None) dbserver = server and hostname_to_host(session, server) or None dbclient = client and hostname_to_host(session, client) or None dbservice = Service.get_unique(session, service, compel=True) if dbserver: q = session.query(ServiceInstance) q = q.filter_by(service=dbservice) q = q.join('servers') q = q.filter_by(host=dbserver) q = q.order_by(ServiceInstance.name) return ServiceInstanceList(q.all()) elif dbclient: service_instances = dbclient.services_used service_instances = [ si for si in service_instances if si.service == dbservice ] if instance: service_instances = [ si for si in service_instances if si.name == instance ] return ServiceInstanceList(service_instances) if not instance: return dbservice return get_service_instance(session, dbservice, instance)
def render(self, session, service, archetype, personality, **arguments): dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) dbservice = Service.get_unique(session, service, compel=True) if dbpersonality in dbservice.personalities: raise ArgumentError("Service %s is already required by personality " "%s, archetype %s." % (service, personality, archetype)) dbservice.personalities.append(dbpersonality) return
def render(self, session, logger, service, instance, position, hostname, cluster, ip, resourcegroup, service_address, alias, **arguments): # Check for invalid combinations. We allow binding as a server: # - a host, in which case the primary IP address will be used # - an auxiliary IP address of a host # - a service address of a host # - a service address of a cluster if ip: if cluster or not hostname: raise ArgumentError("Using the --ip option requires --hostname" "to be specified.") if cluster and not service_address: raise ArgumentError("Binding a cluster requires --service_address " "to be specified.") dbservice = Service.get_unique(session, service, compel=True) dbinstance = get_service_instance(session, dbservice, instance) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbinstance)) params = lookup_target(session, plenaries, hostname, ip, cluster, resourcegroup, service_address, alias) # TODO: someday we should verify that the target really points to the # host/cluster specified by the other options if "alias" in params and ("host" in params or "cluster" in params): logger.client_info("Warning: when using --alias, it is your " "responsibility to ensure it really points to " "the host/cluster you've specified - the broker " "does not verify that.") with session.no_autoflush: dbsrv = find_server(dbinstance, params) if dbsrv: raise ArgumentError("The server binding already exists.") dbsrv = ServiceInstanceServer(**params) # The ordering_list will manage the position for us if position is not None: dbinstance.servers.insert(position, dbsrv) else: dbinstance.servers.append(dbsrv) if dbsrv.host: session.expire(dbsrv.host, ['services_provided']) if dbsrv.cluster: session.expire(dbsrv.cluster, ['services_provided']) session.flush() plenaries.write() return
def render(self, session, logger, list, **arguments): check_hostlist_size(self.command, self.config, list) # The default is now --configure, but that does not play nice with # --status. Turn --configure off if --status is present if arguments.get("status", False): arguments["configure"] = None user = self.config.get("broker", "installfe_user") command = self.config.get("broker", "installfe") args = [command] args.append("--cfgfile") args.append("/dev/null") args.append("--sshdir") args.append(self.config.get("broker", "installfe_sshdir")) args.append("--logfile") logdir = self.config.get("broker", "logdir") args.append("%s/aii-installfe.log" % logdir) servers = dict() groups = dict() failed = [] for host in list: try: dbhost = hostname_to_host(session, host) if arguments.get( "install", None) and (dbhost.status.name == "ready" or dbhost.status.name == "almostready"): failed.append("%s: You should change the build status " "before switching the PXE link to install." % host) # Find what "bootserver" instance we're bound to dbservice = Service.get_unique(session, "bootserver", compel=True) si = get_host_bound_service(dbhost, dbservice) if not si: failed.append("%s: Host has no bootserver." % host) else: if si.name in groups: groups[si.name].append(dbhost) else: # for that instance, find what servers are bound to it. servers[si.name] = [ host.fqdn for host in si.server_hosts ] groups[si.name] = [dbhost] except NotFoundException, nfe: failed.append("%s: %s" % (host, nfe)) except ArgumentError, ae: failed.append("%s: %s" % (host, ae))
def render(self, session, service, archetype, personality, **arguments): dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) dbservice = Service.get_unique(session, service, compel=True) if dbpersonality in dbservice.personalities: raise ArgumentError( "Service %s is already required by personality " "%s, archetype %s." % (service, personality, archetype)) dbservice.personalities.append(dbpersonality) return
def render(self, session, service, archetype, personality, **arguments): dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) dbservice = Service.get_unique(session, service, compel=True) try: dbservice.personalities.remove(dbpersonality) except ValueError: raise NotFoundException("Service %s required for archetype " "%s, personality %s not found." % (service, archetype, personality)) session.flush() return
def render(self, session, service, archetype, justification, user, **arguments): if not justification: raise AuthorizationException("Changing the required services of " "an archetype requires " "--justification.") validate_justification(user, justification) dbarchetype = Archetype.get_unique(session, archetype, compel=True) dbservice = Service.get_unique(session, name=service, compel=True) if dbarchetype in dbservice.archetypes: raise ArgumentError("Service %s is already required by archetype " "%s" % (service, archetype)) dbservice.archetypes.append(dbarchetype) return
def render(self, session, logger, service, server, default, generate, **kwargs): dbservice = Service.get_unique(session, service, compel=True) if default: if server: plenary_info = PlenaryServiceServerDefault(dbservice, logger=logger) else: plenary_info = PlenaryServiceClientDefault(dbservice, logger=logger) else: plenary_info = PlenaryServiceToplevel(dbservice, logger=logger) if generate: return plenary_info._generate_content() else: return plenary_info.read()
def render(self, session, logger, list, **arguments): check_hostlist_size(self.command, self.config, list) # The default is now --configure, but that does not play nice with # --status. Turn --configure off if --status is present if arguments.get("status", False): arguments["configure"] = None user = self.config.get("broker", "installfe_user") command = self.config.get("broker", "installfe") args = [command] args.append("--cfgfile") args.append("/dev/null") args.append("--sshdir") args.append(self.config.get("broker", "installfe_sshdir")) args.append("--logfile") logdir = self.config.get("broker", "logdir") args.append("%s/aii-installfe.log" % logdir) servers = dict() groups = dict() failed = [] for host in list: try: dbhost = hostname_to_host(session, host) if arguments.get("install", None) and (dbhost.status.name == "ready" or dbhost.status.name == "almostready"): failed.append("%s: You should change the build status " "before switching the PXE link to install." % host) # Find what "bootserver" instance we're bound to dbservice = Service.get_unique(session, "bootserver", compel=True) si = get_host_bound_service(dbhost, dbservice) if not si: failed.append("%s: Host has no bootserver." % host) else: if si.name in groups: groups[si.name].append(dbhost) else: # for that instance, find what servers are bound to it. servers[si.name] = [host.fqdn for host in si.server_hosts] groups[si.name] = [dbhost] except NotFoundException, nfe: failed.append("%s: %s" % (host, nfe)) except ArgumentError, ae: failed.append("%s: %s" % (host, ae))
def render(self, session, logger, hostname, service, instance, force=False, **arguments): dbhost = hostname_to_host(session, hostname) dbservice = Service.get_unique(session, service, compel=True) chooser = Chooser(dbhost, logger=logger, required_only=False) if instance: dbinstance = get_service_instance(session, dbservice, instance) chooser.set_single(dbservice, dbinstance, force=force) else: chooser.set_single(dbservice, force=force) chooser.flush_changes() chooser.write_plenary_templates() return
def render(self, session, logger, service, instance, comments, **arguments): dbservice = Service.get_unique(session, service, compel=True) ServiceInstance.get_unique(session, service=dbservice, name=instance, preclude=True) dbsi = ServiceInstance(service=dbservice, name=instance, comments=comments) session.add(dbsi) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbsi)) session.flush() plenaries.write() return
def render(self, session, service, archetype, justification, user, **arguments): if not justification: raise AuthorizationException("Changing the required services of " "an archetype requires " "--justification.") validate_justification(user, justification) dbarchetype = Archetype.get_unique(session, archetype, compel=True) dbservice = Service.get_unique(session, service, compel=True) try: dbservice.archetypes.remove(dbarchetype) except ValueError: raise NotFoundException("Service %s required for archetype %s " "not found." % (service, archetype)) session.flush() return
def render(self, session, service, instance, archetype, personality, networkip, **kwargs): dbservice = Service.get_unique(session, service, compel=True) dblocation = get_location(session, **kwargs) dbinstance = get_service_instance(session, dbservice, instance) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) else: dbnetwork = None if archetype is None and personality: # Can't get here with the standard aq client. raise ArgumentError("Specifying --personality requires you to " "also specify --archetype.") kwargs = {} if archetype and personality: dbpersona = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) map_class = PersonalityServiceMap query = session.query(map_class).filter_by(personality=dbpersona) kwargs["personality"] = dbpersona else: map_class = ServiceMap query = session.query(map_class) dbmap = query.filter_by(location=dblocation, service_instance=dbinstance, network=dbnetwork).first() if not dbmap: dbmap = map_class(service_instance=dbinstance, location=dblocation, network=dbnetwork, **kwargs) session.add(dbmap) session.flush() return
def render(self, session, logger, service, instance, max_clients, default, comments, **arguments): dbservice = Service.get_unique(session, name=service, compel=True) dbsi = ServiceInstance.get_unique(session, service=dbservice, name=instance, compel=True) if default: dbsi.max_clients = None elif max_clients is not None: dbsi.max_clients = max_clients if comments is not None: dbsi.comments = comments session.flush() plenary = Plenary.get_plenary(dbsi, logger=logger) plenary.write() return
def render(self, session, logger, cluster, service, instance, **arguments): dbservice = Service.get_unique(session, service, compel=True) dbinstance = get_service_instance(session, dbservice, instance) dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbinstance not in dbcluster.service_bindings: raise NotFoundException("{0} is not bound to {1:l}." .format(dbinstance, dbcluster)) if dbservice in dbcluster.required_services: raise ArgumentError("Cannot remove cluster service instance " "binding for %s cluster aligned service %s." % (dbcluster.cluster_type, dbservice.name)) dbcluster.service_bindings.remove(dbinstance) session.flush() plenary = Plenary.get_plenary(dbcluster, logger=logger) plenary.write() return
def render(self, session, logger, hostname, **arguments): # The default is now --configure, but that does not play nice with # --status. Turn --configure off if --status is present if arguments.get("status", False): arguments["configure"] = None dbhost = hostname_to_host(session, hostname) if arguments.get("install", None) and (dbhost.status.name == "ready" or dbhost.status.name == "almostready"): raise ArgumentError("You should change the build status before " "switching the PXE link to install.") # Find what "bootserver" instance we're bound to dbservice = Service.get_unique(session, "bootserver", compel=True) si = get_host_bound_service(dbhost, dbservice) if not si: raise ArgumentError("{0} has no bootserver.".format(dbhost)) # for that instance, find what servers are bound to it. servers = [host.fqdn for host in si.server_hosts] command = self.config.get("broker", "installfe") args = [command] for (option, mapped) in self._option_map.items(): if arguments[option]: args.append(mapped) args.append(dbhost.fqdn) if args[-1] == command: raise ArgumentError("Missing required target parameter.") args.append("--cfgfile") args.append("/dev/null") args.append("--servers") user = self.config.get("broker", "installfe_user") args.append(" ".join(["%s@%s" % (user, s) for s in servers])) args.append("--sshdir") args.append(self.config.get("broker", "installfe_sshdir")) args.append("--logfile") logdir = self.config.get("broker", "logdir") args.append("%s/aii-installfe.log" % logdir) run_command(args, logger=logger, loglevel=CLIENT_INFO)
def render(self, session, personality, archetype, grn, eon_id, host_environment, config_override, required_service, fullinfo, **arguments): q = session.query(Personality) if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) q = q.filter_by(archetype=dbarchetype) if personality: q = q.filter_by(name=personality) if config_override: q = q.filter_by(config_override=True) if host_environment: dbhost_env = HostEnvironment.get_instance(session, host_environment) q = q.filter_by(host_environment=dbhost_env) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) q = q.outerjoin(PersonalityGrnMap) q = q.filter(or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.reset_joinpoint() if required_service: dbsrv = Service.get_unique(session, required_service, compel=True) q = q.filter(Personality.services.contains(dbsrv)) q = q.join(Archetype) q = q.order_by(Archetype.name, Personality.name) q = q.options(contains_eager('archetype')) if fullinfo: q = q.options(subqueryload('services'), subqueryload('_grns'), subqueryload('features'), joinedload('features.feature'), joinedload('cluster_infos')) return q.all() else: return SimplePersonalityList(q.all())
def render(self, session, logger, service, instance, max_clients, default, **arguments): dbservice = Service.get_unique(session, name=service, compel=True) dbsi = ServiceInstance.get_unique(session, service=dbservice, name=instance, compel=True) if default: dbsi.max_clients = None elif max_clients is not None: dbsi.max_clients = max_clients else: raise ArgumentError("Missing --max_clients or --default argument " "to update service %s instance %s." % (dbservice.name, dbsi.name)) session.add(dbsi) session.flush() plenary = Plenary.get_plenary(dbsi, logger=logger) plenary.write() return
def render(self, session, logger, service, max_clients, default, **arguments): dbservice = Service.get_unique(session, name=service, compel=True) if default: dbservice.max_clients = None elif max_clients is not None: dbservice.max_clients = max_clients else: raise ArgumentError("Missing --max_clients or --default argument " "to update service %s." % dbservice.name) session.add(dbservice) session.flush() plenaries = PlenaryCollection() plenaries.append(Plenary.get_plenary(dbservice)) for dbinstance in dbservice.instances: plenaries.append(Plenary.get_plenary(dbinstance)) plenaries.write() return
def render(self, session, logger, hostname, service, **arguments): dbhost = hostname_to_host(session, hostname) for srv in (dbhost.archetype.services + dbhost.personality.services): if srv.name == service: raise ArgumentError("Cannot unbind a required service. " "Perhaps you want to rebind?") dbservice = Service.get_unique(session, service, compel=True) si = get_host_bound_service(dbhost, dbservice) if si: logger.info("Removing client binding") dbhost.services_used.remove(si) session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(PlenaryHost(dbhost, logger=logger)) plenaries.append(PlenaryServiceInstanceServer(si, logger=logger)) plenaries.write() return
def render(self, session, logger, service, instance, **arguments): dbservice = Service.get_unique(session, service, compel=True) dbsi = get_service_instance(session, dbservice, instance) if dbsi.client_count > 0: raise ArgumentError("Service %s, instance %s still has clients and " "cannot be deleted." % (dbservice.name, dbsi.name)) if dbsi.server_hosts: msg = ", ".join([host.fqdn for host in dbsi.server_hosts]) raise ArgumentError("Service %s, instance %s is still being " "provided by servers: %s." % (dbservice.name, dbsi.name, msg)) # Depend on cascading to remove any mappings session.delete(dbsi) session.flush() plenary_info = Plenary.get_plenary(dbsi, logger=logger) plenary_info.remove() return
def render(self, session, logger, service, instance, **arguments): dbservice = Service.get_unique(session, service, compel=True) dbsi = get_service_instance(session, dbservice, instance) if dbsi.client_count > 0: raise ArgumentError( "Service %s, instance %s still has clients and " "cannot be deleted." % (dbservice.name, dbsi.name)) if dbsi.server_hosts: msg = ", ".join([host.fqdn for host in dbsi.server_hosts]) raise ArgumentError("Service %s, instance %s is still being " "provided by servers: %s." % (dbservice.name, dbsi.name, msg)) # Depend on cascading to remove any mappings session.delete(dbsi) session.flush() plenary_info = Plenary.get_plenary(dbsi, logger=logger) plenary_info.remove() return
def render(self, session, logger, service, instance, default, server, generate, **kwargs): dbservice = Service.get_unique(session, service, compel=True) dbsi = ServiceInstance.get_unique(session, service=dbservice, name=instance, compel=True) if default: if server: cls = PlenaryServiceInstanceServerDefault else: cls = PlenaryServiceInstanceClientDefault else: if server: cls = PlenaryServiceInstanceServer else: cls = PlenaryServiceInstanceToplevel plenary_info = cls.get_plenary(dbsi, logger=logger) if generate: return plenary_info._generate_content() else: return plenary_info.read()
def render(self, session, service, instance, archetype, personality, networkip, **arguments): dbservice = Service.get_unique(session, service, compel=True) dbinstance = ServiceInstance.get_unique(session, service=dbservice, name=instance, compel=True) dblocation = get_location(session, **arguments) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) else: dbnetwork = None if personality: if not archetype: # Can't get here with the standard aq client. raise ArgumentError("Specifying --personality requires you to " "also specify --archetype.") dbarchetype = Archetype.get_unique(session, archetype, compel=True) dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = session.query(PersonalityServiceMap) q = q.filter_by(personality=dbpersonality) else: q = session.query(ServiceMap) q = q.filter_by(location=dblocation, service_instance=dbinstance, network=dbnetwork) dbmap = q.first() if dbmap: session.delete(dbmap) session.flush() return
def determine_helper_hostname(session, logger, config, dbswitch): """Try to figure out a useful helper from the mappings. """ helper_name = config.get("broker", "poll_helper_service") if not helper_name: # pragma: no cover return helper_service = Service.get_unique(session, helper_name, compel=InternalError) mapped_instances = ServiceInstance.get_mapped_instance_cache( dbpersonality=None, dblocation=dbswitch.location, dbservices=[helper_service]) for dbsi in mapped_instances.get(helper_service, []): if dbsi.server_hosts: # Poor man's load balancing... jump = choice(dbsi.server_hosts).fqdn logger.client_info("Using jump host {0} from {1:l} to run CheckNet " "for {2:l}.".format(jump, dbsi, dbswitch)) return jump logger.client_info("No jump host for %s, calling CheckNet from %s." % (dbswitch, config.get("broker", "hostname"))) return None
def render(self, session, service, server, client, **arguments): instance = arguments.get("instance", None) dbserver = server and hostname_to_host(session, server) or None dbclient = client and hostname_to_host(session, client) or None dbservice = Service.get_unique(session, service, compel=True) if dbserver: q = session.query(ServiceInstance) q = q.filter_by(service=dbservice) q = q.join('servers') q = q.filter_by(host=dbserver) q = q.order_by(ServiceInstance.name) return ServiceInstanceList(q.all()) elif dbclient: service_instances = dbclient.services_used service_instances = [si for si in service_instances if si.service == dbservice] if instance: service_instances = [si for si in service_instances if si.name == instance] return ServiceInstanceList(service_instances) if not instance: return dbservice return get_service_instance(session, dbservice, instance)
def render(self, session, logger, service, max_clients, default, need_client_list, comments, **arguments): dbservice = Service.get_unique(session, name=service, compel=True) if default: dbservice.max_clients = None elif max_clients is not None: dbservice.max_clients = max_clients if need_client_list is not None: dbservice.need_client_list = need_client_list if comments is not None: dbservice.comments = comments session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbservice)) for dbinstance in dbservice.instances: plenaries.append(Plenary.get_plenary(dbinstance)) plenaries.write() return
def render(self, session, service, instance, archetype, personality, networkip, include_parents, **arguments): dbservice = service and Service.get_unique( session, service, compel=True) or None dblocation = get_location(session, **arguments) queries = [] # The current logic basically shoots for exact match when given # (like exact personality maps only or exact archetype maps # only), or "any" if an exact spec isn't given. if archetype and personality: dbpersona = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) q = session.query(PersonalityServiceMap) q = q.filter_by(personality=dbpersona) queries.append(q) elif personality: # Alternately, this could throw an error and ask for archetype. q = session.query(PersonalityServiceMap) q = q.join('personality').filter_by(name=personality) q = q.reset_joinpoint() queries.append(q) elif archetype: # Alternately, this could throw an error and ask for personality. q = session.query(PersonalityServiceMap) q = q.join('personality', 'archetype').filter_by(name=archetype) q = q.reset_joinpoint() queries.append(q) else: queries.append(session.query(ServiceMap)) queries.append(session.query(PersonalityServiceMap)) if dbservice: for i in range(len(queries)): queries[i] = queries[i].join('service_instance') queries[i] = queries[i].filter_by(service=dbservice) queries[i] = queries[i].reset_joinpoint() if instance: for i in range(len(queries)): queries[i] = queries[i].join('service_instance') queries[i] = queries[i].filter_by(name=instance) queries[i] = queries[i].reset_joinpoint() # Nothing fancy for now - just show any relevant explicit bindings. if dblocation: for i in range(len(queries)): if include_parents: base_cls = queries[i].column_descriptions[0]["expr"] col = base_cls.location_id queries[i] = queries[i].filter( col.in_(dblocation.parent_ids())) else: queries[i] = queries[i].filter_by(location=dblocation) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) for i in range(len(queries)): queries[i] = queries[i].filter_by(network=dbnetwork) results = ServiceMapList() for q in queries: results.extend(q.all()) if service and instance and dblocation: # This should be an exact match. (Personality doesn't # matter... either it was given and it should be an # exact match for PersonalityServiceMap or it wasn't # and this should be an exact match for ServiceMap.) if not results: raise NotFoundException("No matching map found.") return results
if config.has_option("broker", "bind_address"): bind_address = socket.gethostbyname(config.get("broker", "bind_address")) if config.has_option("broker", "cdp_send_port"): # pragma: no cover port = config.get_int("broker", "cdp_send_port") else: port = 0 sock.bind((bind_address, port)) if config.has_option("broker", "server_notifications"): servers = set() for service in config.get("broker", "server_notifications").split(): if service.strip(): try: # service may be unknown srvinfo = Service.get_unique(session, service, compel=True) for instance in srvinfo.instances: servers.update([srv.fqdn for srv in instance.servers]) except Exception, e: logger.info("failed to lookup up server module %s: %s", service, e) count = send_notification(CDB_NOTIF, servers, sock=sock, logger=logger) logger.info("sent %d server notifications", count) if (config.has_option("broker", "client_notifications") and config.getboolean("broker", "client_notifications")): # pragma: no cover count = send_notification(CCM_NOTIF, modified_index.keys(), sock=sock, logger=logger) logger.info("sent %d client notifications", count)
def render( self, session, logger, # search_cluster archetype, cluster_type, personality, domain, sandbox, branch, buildstatus, allowed_archetype, allowed_personality, down_hosts_threshold, down_maint_threshold, max_members, member_archetype, member_hostname, member_personality, capacity_override, cluster, esx_guest, instance, esx_metacluster, service, share, esx_share, esx_switch, esx_virtual_machine, fullinfo, style, **arguments): if esx_share: self.deprecated_option("esx_share", "Please use --share instead.", logger=logger, **arguments) share = esx_share if cluster_type == 'esx': cls = EsxCluster else: cls = Cluster # Don't load full objects if we only want to show their name if fullinfo or style != 'raw': q = session.query(cls) else: q = session.query(cls.name) # The ORM automatically de-duplicates the result if we query full # objects, but not when we query just the names. Tell the DB to do so. q = q.distinct() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = ClusterLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if cluster_type: q = q.filter_by(cluster_type=cluster_type) # Go through the arguments and make special dicts for each # specific set of location arguments that are stripped of the # given prefix. location_args = {'cluster_': {}, 'member_': {}} for prefix in location_args.keys(): for (k, v) in arguments.items(): if k.startswith(prefix): # arguments['cluster_building'] = 'dd' # becomes # location_args['cluster_']['building'] = 'dd' location_args[prefix][k.replace(prefix, '')] = v dblocation = get_location(session, **location_args['cluster_']) if dblocation: if location_args['cluster_']['exact_location']: q = q.filter_by(location_constraint=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Cluster.location_constraint_id.in_(childids)) dblocation = get_location(session, **location_args['member_']) if dblocation: q = q.join('_hosts', 'host', 'machine') if location_args['member_']['exact_location']: q = q.filter_by(location=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) q = q.reset_joinpoint() # esx stuff if cluster: q = q.filter_by(name=cluster) if esx_metacluster: dbmetacluster = MetaCluster.get_unique(session, esx_metacluster, compel=True) q = q.join('_metacluster') q = q.filter_by(metacluster=dbmetacluster) q = q.reset_joinpoint() if esx_virtual_machine: dbvm = Machine.get_unique(session, esx_virtual_machine, compel=True) # TODO: support VMs inside resource groups? q = q.join(ClusterResource, VirtualMachine) q = q.filter_by(machine=dbvm) q = q.reset_joinpoint() if esx_guest: dbguest = hostname_to_host(session, esx_guest) # TODO: support VMs inside resource groups? q = q.join(ClusterResource, VirtualMachine, Machine) q = q.filter_by(host=dbguest) q = q.reset_joinpoint() if capacity_override: q = q.filter(EsxCluster.memory_capacity != None) if esx_switch: dbswitch = Switch.get_unique(session, esx_switch, compel=True) q = q.filter_by(switch=dbswitch) if service: dbservice = Service.get_unique(session, name=service, compel=True) if instance: dbsi = ServiceInstance.get_unique(session, name=instance, service=dbservice, compel=True) q = q.filter(Cluster.service_bindings.contains(dbsi)) else: q = q.join('service_bindings') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('service_bindings') q = q.filter_by(name=instance) q = q.reset_joinpoint() if share: # Perform sanity check on the share name q2 = session.query(Share) q2 = q2.filter_by(name=share) if not q2.first(): raise NotFoundException("Share %s not found." % share) CR = aliased(ClusterResource) S1 = aliased(Share) S2 = aliased(Share) RG = aliased(ResourceGroup) BR = aliased(BundleResource) q = q.join(CR) q = q.outerjoin((S1, S1.holder_id == CR.id)) q = q.outerjoin((RG, RG.holder_id == CR.id), (BR, BR.resourcegroup_id == RG.id), (S2, S2.holder_id == BR.id)) q = q.filter(or_(S1.name == share, S2.name == share)) q = q.reset_joinpoint() if max_members: q = q.filter_by(max_hosts=max_members) if down_hosts_threshold: (pct, dht) = Cluster.parse_threshold(down_hosts_threshold) q = q.filter_by(down_hosts_percent=pct) q = q.filter_by(down_hosts_threshold=dht) if down_maint_threshold: (pct, dmt) = Cluster.parse_threshold(down_maint_threshold) q = q.filter_by(down_maint_percent=pct) q = q.filter_by(down_maint_threshold=dmt) if allowed_archetype: # Added to the searches as appropriate below. dbaa = Archetype.get_unique(session, allowed_archetype, compel=True) if allowed_personality and allowed_archetype: dbap = Personality.get_unique(session, archetype=dbaa, name=allowed_personality, compel=True) q = q.filter(Cluster.allowed_personalities.contains(dbap)) elif allowed_personality: q = q.join('allowed_personalities') q = q.filter_by(name=allowed_personality) q = q.reset_joinpoint() elif allowed_archetype: q = q.join('allowed_personalities') q = q.filter_by(archetype=dbaa) q = q.reset_joinpoint() if member_hostname: dbhost = hostname_to_host(session, member_hostname) q = q.join('_hosts') q = q.filter_by(host=dbhost) q = q.reset_joinpoint() if member_archetype: # Added to the searches as appropriate below. dbma = Archetype.get_unique(session, member_archetype, compel=True) if member_personality and member_archetype: q = q.join('_hosts', 'host') dbmp = Personality.get_unique(session, archetype=dbma, name=member_personality, compel=True) q = q.filter_by(personality=dbmp) q = q.reset_joinpoint() elif member_personality: q = q.join('_hosts', 'host', 'personality') q = q.filter_by(name=member_personality) q = q.reset_joinpoint() elif member_archetype: q = q.join('_hosts', 'host', 'personality') q = q.filter_by(archetype=dbma) q = q.reset_joinpoint() if cluster_type == 'esx': q = q.order_by(EsxCluster.name) else: q = q.order_by(Cluster.name) if fullinfo: return q.all() return SimpleClusterList(q.all())
if config.has_option("broker", "bind_address"): bind_address = socket.gethostbyname( config.get("broker", "bind_address")) if config.has_option("broker", "cdp_send_port"): # pragma: no cover port = config.get_int("broker", "cdp_send_port") else: port = 0 sock.bind((bind_address, port)) if config.has_option("broker", "server_notifications"): service_modules = {} for service in config.get("broker", "server_notifications").split(): if service.strip(): try: # service may be unknown srvinfo = Service.get_unique(session, service, compel=True) for instance in srvinfo.instances: for fqdn in instance.server_fqdns: service_modules[fqdn] = 1 except Exception, e: logger.info("failed to lookup up server module %s: %s" % (service, e)) count = send_notification(CDB_NOTIF, service_modules.keys(), sock=sock, logger=logger) logger.log(CLIENT_INFO, "sent %d server notifications" % count) if (config.has_option("broker", "client_notifications") and config.getboolean("broker", "client_notifications") and clientNotify): # pragma: no cover
def render(self, session, service, instance, archetype, personality, networkip, include_parents, **arguments): dbservice = service and Service.get_unique(session, service, compel=True) or None dblocation = get_location(session, **arguments) queries = [] # The current logic basically shoots for exact match when given # (like exact personality maps only or exact archetype maps # only), or "any" if an exact spec isn't given. if archetype and personality: dbpersona = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) q = session.query(PersonalityServiceMap) q = q.filter_by(personality=dbpersona) queries.append(q) elif personality: # Alternately, this could throw an error and ask for archetype. q = session.query(PersonalityServiceMap) q = q.join('personality').filter_by(name=personality) q = q.reset_joinpoint() queries.append(q) elif archetype: # Alternately, this could throw an error and ask for personality. q = session.query(PersonalityServiceMap) q = q.join('personality', 'archetype').filter_by(name=archetype) q = q.reset_joinpoint() queries.append(q) else: queries.append(session.query(ServiceMap)) queries.append(session.query(PersonalityServiceMap)) if dbservice: for i in range(len(queries)): queries[i] = queries[i].join('service_instance') queries[i] = queries[i].filter_by(service=dbservice) queries[i] = queries[i].reset_joinpoint() if instance: for i in range(len(queries)): queries[i] = queries[i].join('service_instance') queries[i] = queries[i].filter_by(name=instance) queries[i] = queries[i].reset_joinpoint() # Nothing fancy for now - just show any relevant explicit bindings. if dblocation: for i in range(len(queries)): if include_parents: base_cls = queries[i].column_descriptions[0]["expr"] col = base_cls.location_id queries[i] = queries[i].filter(col.in_(dblocation.parent_ids())) else: queries[i] = queries[i].filter_by(location=dblocation) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) for i in range(len(queries)): queries[i] = queries[i].filter_by(network=dbnetwork) results = ServiceMapList() for q in queries: results.extend(q.all()) if service and instance and dblocation: # This should be an exact match. (Personality doesn't # matter... either it was given and it should be an # exact match for PersonalityServiceMap or it wasn't # and this should be an exact match for ServiceMap.) if not results: raise NotFoundException("No matching map found.") return results
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options( contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option( "serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin( (ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter( or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter( or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query( Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query( Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter( or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter( or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())