def render(self, session, logger, grn, eon_id, **arguments): dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config, usable_only=False) q1 = session.query(Host) q1 = q1.filter_by(owner_eon_id=dbgrn.eon_id) q2 = session.query(HostGrnMap) q2 = q2.filter_by(eon_id=dbgrn.eon_id) if q1.first() or q2.first(): raise ArgumentError("GRN %s is still used by hosts, and " "cannot be deleted." % dbgrn.grn) q1 = session.query(Personality) q1 = q1.filter_by(owner_eon_id=dbgrn.eon_id) q2 = session.query(PersonalityGrnMap) q2 = q2.filter_by(eon_id=dbgrn.eon_id) if q1.first() or q2.first(): raise ArgumentError("GRN %s is still used by personalities, " "and cannot be deleted." % dbgrn.grn) session.delete(dbgrn) session.flush() return
def render(self, session, logger, grn, eon_id, rename_to, disabled, **arguments): dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config, usable_only=False) if rename_to: Grn.get_unique(session, rename_to, preclude=True) dbgrn.grn = rename_to if disabled is not None: dbgrn.disabled = disabled session.flush() return
def render(self, session, logger, target, grn, eon_id, hostname, list, personality, archetype, **arguments): dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) plenaries = PlenaryCollection(logger=logger) if hostname: objs = [hostname_to_host(session, hostname)] config_key = "host_grn_targets" elif list: check_hostlist_size(self.command, self.config, list) objs = hostlist_to_hosts(session, list) config_key = "host_grn_targets" elif personality: objs = [Personality.get_unique(session, name=personality, archetype=archetype, compel=True)] config_key = "personality_grn_targets" for obj in objs: section = "archetype_" + obj.archetype.name if self.config.has_option(section, config_key): valid_targets = [s.strip() for s in self.config.get(section, config_key).split(",")] else: raise ArgumentError("{0} has no valid GRN targets configured." .format(obj.archetype)) if target not in valid_targets: raise ArgumentError("Invalid target %s for archetype %s, please " "choose from: %s." % (target, obj.archetype.name, ", ".join(valid_targets))) plenaries.append(Plenary.get_plenary(obj)) self._update_dbobj(obj, target, dbgrn) session.flush() plenaries.write() return
def render(self, session, personality, archetype, grn, eon_id, host_environment, config_override, required_service, fullinfo, **arguments): q = session.query(Personality) if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) q = q.filter_by(archetype=dbarchetype) if personality: q = q.filter_by(name=personality) if config_override: q = q.filter_by(config_override=True) if host_environment: dbhost_env = HostEnvironment.get_instance(session, host_environment) q = q.filter_by(host_environment=dbhost_env) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) q = q.outerjoin(PersonalityGrnMap) q = q.filter(or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.reset_joinpoint() if required_service: dbsrv = Service.get_unique(session, required_service, compel=True) q = q.filter(Personality.services.contains(dbsrv)) q = q.join(Archetype) q = q.order_by(Archetype.name, Personality.name) q = q.options(contains_eager('archetype')) if fullinfo: q = q.options(subqueryload('services'), subqueryload('_grns'), subqueryload('features'), joinedload('features.feature'), joinedload('cluster_infos')) return q.all() else: return SimplePersonalityList(q.all())
def render(self, session, personality, archetype, grn, eon_id, host_environment, config_override, fullinfo, **arguments): q = session.query(Personality) if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) q = q.filter_by(archetype=dbarchetype) if personality: q = q.filter_by(name=personality) if config_override: q = q.filter_by(config_override=True) if host_environment: host_env = HostEnvironment.get_unique(session, host_environment, compel=True) q = q.filter_by(host_environment=host_env) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) q = q.outerjoin(PersonalityGrnMap) q = q.filter( or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.join(Archetype) q = q.order_by(Archetype.name, Personality.name) q = q.options(contains_eager('archetype')) if fullinfo: q = q.options(subqueryload('services'), subqueryload('_grns'), subqueryload('features'), joinedload('features.feature'), joinedload('cluster_infos')) return PersonalityList(q.all()) else: return SimplePersonalityList(q.all())
def render(self, session, logger, target, grn, eon_id, hostname, list, personality, archetype, **arguments): dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) target_type = "personality" if personality else "host" if hostname: objs = [hostname_to_host(session, hostname)] elif list: check_hostlist_size(self.command, self.config, list) objs = hostlist_to_hosts(session, list) elif personality: objs = [Personality.get_unique(session, name=personality, archetype=archetype, compel=True)] for obj in objs: # INFO: Fails for archetypes other than 'aquilon', 'vmhost' valid_targets = self.config.get("archetype_" + obj.archetype.name, target_type + "_grn_targets") if target not in map(lambda s: s.strip(), valid_targets.split(",")): raise ArgumentError("Invalid %s target %s for archetype %s, please " "choose from %s" % (target_type, target, obj.archetype.name, valid_targets)) self._update_dbobj(obj, target, dbgrn) session.flush() if personality: plenary = PlenaryPersonality(objs[0], logger=logger) plenary.write() return
def render(self, session, logger, personality, archetype, grn, eon_id, host_environment, comments, cluster_required, copy_from, config_override, **arguments): if not VALID_PERSONALITY_RE.match(personality): raise ArgumentError("Personality name '%s' is not valid." % personality) if not (grn or eon_id): raise ArgumentError("GRN or EON ID is required for adding a " "personality.") dbarchetype = Archetype.get_unique(session, archetype, compel=True) if not host_environment: try: host_environment = self.config.get("archetype_" + archetype, "default_environment") except (NoSectionError, NoOptionError): raise ArgumentError("Default environment is not configured " "for {0:l}, please specify " "--host_environment.".format(dbarchetype)) HostEnvironment.polymorphic_subclass(host_environment, "Unknown environment name") Personality.validate_env_in_name(personality, host_environment) host_env = HostEnvironment.get_unique(session, host_environment, compel=True) Personality.get_unique(session, archetype=dbarchetype, name=personality, preclude=True) dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) dbpersona = Personality(name=personality, archetype=dbarchetype, cluster_required=bool(cluster_required), host_environment=host_env, owner_grn=dbgrn, comments=comments, config_override=config_override) session.add(dbpersona) if self.config.has_option("archetype_" + archetype, "default_grn_target"): dbpersona.grns.append((dbpersona, dbgrn, self.config.get("archetype_" + archetype, "default_grn_target"))) if copy_from: ## copy config data dbfrom_persona = Personality.get_unique(session, archetype=dbarchetype, name=copy_from, compel=True) src_parameters = get_parameters(session, personality=dbfrom_persona) db_param_holder = PersonalityParameter(personality=dbpersona) for param in src_parameters: dbparameter = Parameter(value=param.value, comments=param.comments, holder=db_param_holder) session.add(dbparameter) for link in dbfrom_persona.features: params = {} params["personality"] = dbpersona if link.model: params["model"] = link.model if link.interface_name: params["interface_name"] = link.interface_name add_link(session, logger, link.feature, params) ## service maps q = session.query(PersonalityServiceMap).filter_by(personality=dbfrom_persona) for sm in q.all() : dbmap = PersonalityServiceMap(service_instance=sm.service_instance, location=sm.location, network=sm.network, personality=dbpersona) session.add(dbmap) ## required services dbpersona.services.extend(dbfrom_persona.services) session.flush() plenary = PlenaryPersonality(dbpersona, logger=logger) plenary.write() return
dbpersona.cluster_required = cluster_required if host_environment is not None: if dbpersona.host_environment.name == 'legacy': dbhost_env = HostEnvironment.get_instance(session, host_environment) Personality.validate_env_in_name(personality, dbhost_env.name) dbpersona.host_environment = dbhost_env else: raise ArgumentError("The personality '{0!s}' already has env set to '{1!s}'" " and cannot be updated" .format(dbpersona, dbpersona.host_environment)) plenaries = PlenaryCollection(logger=logger) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) old_grn = dbpersona.owner_grn dbpersona.owner_grn = dbgrn if not leave_existing: # If this is a public personality, then there may be hosts with # various GRNs inside the personality, so make sure we preserve # those GRNs by filtering on the original GRN of the personality q = session.query(Host) q = q.filter_by(personality=dbpersona, owner_grn=old_grn) for dbhost in q.all(): dbhost.owner_grn = dbgrn plenaries.append(Plenary.get_plenary(dbhost)) if config_override is not None and \ dbpersona.config_override != config_override:
def render(self, session, logger, hostname, machine, archetype, domain, sandbox, osname, osversion, buildstatus, personality, comments, zebra_interfaces, grn, eon_id, skip_dsdb_check=False, **arguments): dbarchetype = Archetype.get_unique(session, archetype, compel=True) section = "archetype_" + dbarchetype.name # This is for the various add_*_host commands if not domain and not sandbox: domain = self.config.get(section, "host_domain") (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError( "Adding hosts to {0:l} is not allowed.".format(dbbranch)) if not buildstatus: buildstatus = 'build' dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) dbmachine = Machine.get_unique(session, machine, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbmachine) if not personality: if self.config.has_option(section, "default_personality"): personality = self.config.get(section, "default_personality") else: personality = 'generic' dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if not osname: if self.config.has_option(section, "default_osname"): osname = self.config.get(section, "default_osname") if not osversion: if self.config.has_option(section, "default_osversion"): osversion = self.config.get(section, "default_osversion") if not osname or not osversion: raise ArgumentError("Can not determine a sensible default OS " "for archetype %s. Please use the " "--osname and --osversion parameters." % (dbarchetype.name)) dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) if (dbmachine.model.machine_type == 'aurora_node' and dbpersonality.archetype.name != 'aurora'): raise ArgumentError("Machines of type aurora_node can only be " "added with archetype aurora.") if dbmachine.host: raise ArgumentError("{0:c} {0.label} is already allocated to " "{1:l}.".format(dbmachine, dbmachine.host)) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) else: dbgrn = dbpersonality.owner_grn dbhost = Host(machine=dbmachine, branch=dbbranch, owner_grn=dbgrn, sandbox_author=dbauthor, personality=dbpersonality, status=dbstatus, operating_system=dbos, comments=comments) session.add(dbhost) if self.config.has_option("archetype_" + archetype, "default_grn_target"): dbhost.grns.append((dbhost, dbgrn, self.config.get("archetype_" + archetype, "default_grn_target"))) if zebra_interfaces: # --autoip does not make sense for Zebra (at least not the way it's # implemented currently) dbinterface = None else: dbinterface = get_boot_interface(dbmachine) # This method is allowed to return None. This can only happen # (currently) using add_aurora_host, add_windows_host, or possibly by # bypassing the aq client and posting a request directly. audit_results = [] ip = generate_ip(session, logger, dbinterface, audit_results=audit_results, **arguments) dbdns_rec, newly_created = grab_address(session, hostname, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) dbmachine.primary_name = dbdns_rec # Fix up auxiliary addresses to point to the primary name by default if ip: dns_env = dbdns_rec.fqdn.dns_environment for addr in dbmachine.all_addresses(): if addr.interface.interface_type == "management": continue if addr.service_address_id: # pragma: no cover continue for rec in addr.dns_records: if rec.fqdn.dns_environment == dns_env: rec.reverse_ptr = dbdns_rec.fqdn if zebra_interfaces: if not ip: raise ArgumentError( "Zebra configuration requires an IP address.") dbsrv_addr = self.assign_zebra_address(session, dbmachine, dbdns_rec, zebra_interfaces) else: if ip: if not dbinterface: raise ArgumentError( "You have specified an IP address for the " "host, but {0:l} does not have a bootable " "interface.".format(dbmachine)) assign_address(dbinterface, ip, dbdns_rec.network) dbsrv_addr = None session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if dbmachine.vm_container: plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) if dbsrv_addr: plenaries.append(Plenary.get_plenary(dbsrv_addr)) key = plenaries.get_write_key() try: lock_queue.acquire(key) plenaries.write(locked=True) # XXX: This (and some of the code above) is horrible. There # should be a generic/configurable hook here that could kick # in based on archetype and/or domain. dsdb_runner = DSDBRunner(logger=logger) if dbhost.archetype.name == 'aurora': # For aurora, check that DSDB has a record of the host. if not skip_dsdb_check: try: dsdb_runner.show_host(hostname) except ProcessException, e: raise ArgumentError("Could not find host in DSDB: %s" % e) elif not dbmachine.primary_ip: logger.info("No IP for %s, not adding to DSDB." % dbmachine.fqdn)
def render(self, session, logger, hostname, machine, archetype, domain, sandbox, osname, osversion, buildstatus, personality, comments, zebra_interfaces, grn, eon_id, skip_dsdb_check=False, **arguments): dbarchetype = Archetype.get_unique(session, archetype, compel=True) section = "archetype_" + dbarchetype.name # This is for the various add_*_host commands if not domain and not sandbox: domain = self.config.get(section, "host_domain") (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Adding hosts to {0:l} is not allowed." .format(dbbranch)) if not buildstatus: buildstatus = 'build' dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) dbmachine = Machine.get_unique(session, machine, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbmachine) if not personality: if self.config.has_option(section, "default_personality"): personality = self.config.get(section, "default_personality") else: personality = 'generic' dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if not osname: if self.config.has_option(section, "default_osname"): osname = self.config.get(section, "default_osname") if not osversion: if self.config.has_option(section, "default_osversion"): osversion = self.config.get(section, "default_osversion") if not osname or not osversion: raise ArgumentError("Can not determine a sensible default OS " "for archetype %s. Please use the " "--osname and --osversion parameters." % (dbarchetype.name)) dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) if (dbmachine.model.machine_type == 'aurora_node' and dbpersonality.archetype.name != 'aurora'): raise ArgumentError("Machines of type aurora_node can only be " "added with archetype aurora.") if dbmachine.host: raise ArgumentError("{0:c} {0.label} is already allocated to " "{1:l}.".format(dbmachine, dbmachine.host)) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) else: dbgrn = dbpersonality.owner_grn dbhost = Host(machine=dbmachine, branch=dbbranch, owner_grn=dbgrn, sandbox_author=dbauthor, personality=dbpersonality, status=dbstatus, operating_system=dbos, comments=comments) session.add(dbhost) if self.config.has_option("archetype_" + archetype, "default_grn_target"): dbhost.grns.append((dbhost, dbgrn, self.config.get("archetype_" + archetype, "default_grn_target"))) if zebra_interfaces: # --autoip does not make sense for Zebra (at least not the way it's # implemented currently) dbinterface = None else: dbinterface = get_boot_interface(dbmachine) # This method is allowed to return None. This can only happen # (currently) using add_aurora_host, add_windows_host, or possibly by # bypassing the aq client and posting a request directly. audit_results = [] ip = generate_ip(session, logger, dbinterface, audit_results=audit_results, **arguments) dbdns_rec, newly_created = grab_address(session, hostname, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) dbmachine.primary_name = dbdns_rec # Fix up auxiliary addresses to point to the primary name by default if ip: dns_env = dbdns_rec.fqdn.dns_environment for addr in dbmachine.all_addresses(): if addr.interface.interface_type == "management": continue if addr.service_address_id: # pragma: no cover continue for rec in addr.dns_records: if rec.fqdn.dns_environment == dns_env: rec.reverse_ptr = dbdns_rec.fqdn if zebra_interfaces: if not ip: raise ArgumentError("Zebra configuration requires an IP address.") dbsrv_addr = self.assign_zebra_address(session, dbmachine, dbdns_rec, zebra_interfaces) else: if ip: if not dbinterface: raise ArgumentError("You have specified an IP address for the " "host, but {0:l} does not have a bootable " "interface.".format(dbmachine)) assign_address(dbinterface, ip, dbdns_rec.network) dbsrv_addr = None session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if dbmachine.vm_container: plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) if dbsrv_addr: plenaries.append(Plenary.get_plenary(dbsrv_addr)) key = plenaries.get_write_key() try: lock_queue.acquire(key) plenaries.write(locked=True) # XXX: This (and some of the code above) is horrible. There # should be a generic/configurable hook here that could kick # in based on archetype and/or domain. dsdb_runner = DSDBRunner(logger=logger) if dbhost.archetype.name == 'aurora': # For aurora, check that DSDB has a record of the host. if not skip_dsdb_check: try: dsdb_runner.show_host(hostname) except ProcessException, e: raise ArgumentError("Could not find host in DSDB: %s" % e) elif not dbmachine.primary_ip: logger.info("No IP for %s, not adding to DSDB." % dbmachine.fqdn)
def render(self, session, logger, hostname, osname, osversion, archetype, personality, buildstatus, keepbindings, grn, eon_id, **arguments): dbhost = hostname_to_host(session, hostname) # Currently, for the Host to be created it *must* be associated with # a Machine already. If that ever changes, need to check here and # bail if dbhost.machine does not exist. if archetype and archetype != dbhost.archetype.name: if not personality: raise ArgumentError("Changing archetype also requires " "specifying --personality.") if personality: if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) if dbarchetype.cluster_type is not None: raise ArgumentError("Archetype %s is a cluster archetype" % dbarchetype.name) else: dbarchetype = dbhost.archetype if not osname and not osversion and \ dbhost.operating_system.archetype != dbarchetype: raise ArgumentError("{0} belongs to {1:l}, not {2:l}. Please " "specify --osname/--osversion.".format( dbhost.operating_system, dbhost.operating_system.archetype, dbarchetype)) dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if dbhost.cluster and dbhost.cluster.allowed_personalities and \ dbpersonality not in dbhost.cluster.allowed_personalities: allowed = [ "%s/%s" % (p.archetype.name, p.name) for p in dbhost.cluster.allowed_personalities ] raise ArgumentError("The {0:l} is not allowed by {1}. " "Specify one of {2}.".format( dbpersonality, dbhost.cluster, allowed)) dbhost.personality = dbpersonality if not osname: osname = dbhost.operating_system.name if osname and osversion: dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbhost.archetype, compel=True) # Hmm... no cluster constraint here... dbhost.operating_system = dbos elif osname != dbhost.operating_system.name: raise ArgumentError("Please specify a version to use for OS %s." % osname) if buildstatus: dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) dbhost.status.transition(dbhost, dbstatus) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) dbhost.owner_grn = dbgrn session.flush() if dbhost.archetype.is_compileable: self.compile(session, dbhost, logger, keepbindings) return
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options( contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option( "serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin( (ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter( or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter( or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query( Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query( Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter( or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter( or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())
host_environment, "Unknown environment name") Personality.validate_env_in_name(personality, host_environment) dbpersona.host_environment = HostEnvironment.get_unique( session, host_environment, compel=True) else: raise ArgumentError( "The personality '{0}' already has env set to '{1}'" " and cannot be updated".format(str(dbpersona), host_environment)) plenaries = PlenaryCollection(logger=logger) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) old_grn = dbpersona.owner_grn dbpersona.owner_grn = dbgrn if not leave_existing: # If this is a public personality, then there may be hosts with # various GRNs inside the personality, so make sure we preserve # those GRNs by filtering on the original GRN of the personality q = session.query(Host) q = q.filter_by(personality=dbpersona, owner_grn=old_grn) for dbhost in q.all(): dbhost.owner_grn = dbgrn plenaries.append(Plenary.get_plenary(dbhost))
def render(self, session, logger, hostname, osname, osversion, archetype, personality, buildstatus, keepbindings, grn, eon_id, **arguments): dbhost = hostname_to_host(session, hostname) # Currently, for the Host to be created it *must* be associated with # a Machine already. If that ever changes, need to check here and # bail if dbhost.machine does not exist. if archetype and archetype != dbhost.archetype.name: if not personality: raise ArgumentError("Changing archetype also requires " "specifying --personality.") if personality: if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) if dbarchetype.cluster_type is not None: raise ArgumentError("Archetype %s is a cluster archetype" % dbarchetype.name) else: dbarchetype = dbhost.archetype if not osname and not osversion and \ dbhost.operating_system.archetype != dbarchetype: raise ArgumentError("{0} belongs to {1:l}, not {2:l}. Please " "specify --osname/--osversion." .format(dbhost.operating_system, dbhost.operating_system.archetype, dbarchetype)) dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if dbhost.cluster and dbhost.cluster.allowed_personalities and \ dbpersonality not in dbhost.cluster.allowed_personalities: allowed = ["%s/%s" % (p.archetype.name, p.name) for p in dbhost.cluster.allowed_personalities] raise ArgumentError("The {0:l} is not allowed by {1}. " "Specify one of {2}.".format( dbpersonality, dbhost.cluster, allowed)) dbhost.personality = dbpersonality if not osname: osname = dbhost.operating_system.name if osname and osversion: dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbhost.archetype, compel=True) # Hmm... no cluster constraint here... dbhost.operating_system = dbos elif osname != dbhost.operating_system.name: raise ArgumentError("Please specify a version to use for OS %s." % osname) if buildstatus: dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) dbhost.status.transition(dbhost, dbstatus) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) dbhost.owner_grn = dbgrn session.flush() if dbhost.archetype.is_compileable: self.compile(session, dbhost, logger, keepbindings) return
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options(contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option("serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin((ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter(or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter(or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query(Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException("No shares found with name {0}." .format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter( NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query(Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException("No shares found with name {0}." .format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter( NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter(or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter(or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())
def reconfigure_list(self, session, logger, dbhosts, archetype, personality, keepbindings, buildstatus, osname, osversion, grn, eon_id, cleargrn, **arguments): failed = [] # Check all the parameters up front. # Some of these could be more intelligent about defaults # (either by checking for unique entries or relying on the list) # - starting simple. if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) if dbarchetype.cluster_type is not None: raise ArgumentError("Archetype %s is a cluster archetype" % dbarchetype.name) # TODO: Once OS is a first class object this block needs # to check that either OS is also being reset or that the # OS is valid for the new archetype. else: dbarchetype = None if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if osname and not osversion: raise ArgumentError("Please specify --osversion for OS %s." % osname) if osversion: if not osname: raise ArgumentError("Please specify --osname to use with " "OS version %s." % osversion) # Linux model names are the same under aurora and aquilon, so # allowing to omit --archetype would not be useful if not archetype: raise ArgumentError("Please specify --archetype for OS " "%s, version %s." % (osname, osversion)) dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) else: dbos = None if buildstatus: dbstatus = HostLifecycle.get_instance(session, buildstatus) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) # Take a shortcut if there's nothing to do, but only after all the other # parameters have been checked if not dbhosts: return dbbranch, dbauthor = validate_branch_author(dbhosts) personalities = {} # Do any final cross-list or dependency checks before entering # the Chooser loop. for dbhost in dbhosts: if dbos and not dbarchetype and dbhost.archetype != dbos.archetype: failed.append("{0}: Cannot change operating system because it " "needs {1:l} instead of " "{2:l}.".format(dbhost.fqdn, dbhost.archetype, dbos.archetype)) if dbarchetype and not dbos and \ dbhost.operating_system.archetype != dbarchetype: failed.append("{0}: Cannot change archetype because {1:l} needs " "{2:l}.".format(dbhost.fqdn, dbhost.operating_system, dbhost.operating_system.archetype)) if (personality and dbhost.cluster and len(dbhost.cluster.allowed_personalities) > 0 and dbpersonality not in dbhost.cluster.allowed_personalities): allowed = ["%s/%s" % (p.archetype.name, p.name) for p in dbhost.cluster.allowed_personalities] failed.append("{0}: The {1:l} is not allowed by {2}. " "Specify one of {3}.".format( dbhost.fqdn, dbpersonality, dbhost.cluster, allowed)) if personality: personalities[dbhost.fqdn] = dbpersonality elif archetype: # This is a strange case - changing archetype while keeping # the personality try: pers = Personality.get_unique(session, name=dbhost.personality.name, archetype=dbarchetype, compel=True) personalities[dbhost.fqdn] = pers except NotFoundException, err: failed.append("%s: %s" % (dbhost.fqdn, err)) if grn or eon_id: dbhost.owner_grn = dbgrn if cleargrn: dbhost.owner_grn = None