def adjust_slot(self, session, logger, dbmachine, dbchassis, slot, multislot): for dbslot in dbmachine.chassis_slot: # This update is a noop, ignore. # Technically, this could be a request to trim the list down # to just this one slot - in that case --clearchassis will be # required. if dbslot.chassis == dbchassis and dbslot.slot_number == slot: return if len(dbmachine.chassis_slot) > 1 and not multislot: raise ArgumentError("Use --multislot to support a machine in more " "than one slot, or --clearchassis to remove " "current chassis slot information.") if not multislot: slots = ", ".join([str(dbslot.slot_number) for dbslot in dbmachine.chassis_slot]) logger.info("Clearing {0:l} out of {1:l} slot(s) " "{2}".format(dbmachine, dbchassis, slots)) del dbmachine.chassis_slot[:] q = session.query(ChassisSlot) q = q.filter_by(chassis=dbchassis, slot_number=slot) dbslot = q.first() if dbslot: if dbslot.machine: raise ArgumentError("{0} slot {1} already has machine " "{2}.".format(dbchassis, slot, dbslot.machine.label)) else: dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot) dbmachine.chassis_slot.append(dbslot) return
def render(self, session, logger, machine, model, vendor, serial, chassis, slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster, comments, **arguments): dblocation = get_location(session, query_options=[ subqueryload('parents'), joinedload('parents.dns_maps') ], **arguments) if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) if slot is None: raise ArgumentError("The --chassis option requires a --slot.") if dblocation and dblocation != dbchassis.location: raise ArgumentError("{0} conflicts with chassis location " "{1}.".format(dblocation, dbchassis.location)) dblocation = dbchassis.location elif slot is not None: raise ArgumentError("The --slot option requires a --chassis.") dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if dbmodel.machine_type not in [ 'blade', 'rackmount', 'workstation', 'aurora_node', 'virtual_machine' ]: raise ArgumentError("The add_machine command cannot add machines " "of type %(type)s. Try 'add %(type)s'." % {"type": dbmodel.machine_type}) if cluster: if dbmodel.machine_type != 'virtual_machine': raise ArgumentError("Only virtual machines can have a cluster " "attribute.") dbcluster = Cluster.get_unique(session, cluster, compel=ArgumentError) # This test could be either archetype or cluster_type if dbcluster.personality.archetype.name != 'esx_cluster': raise ArgumentError("Can only add virtual machines to " "clusters with archetype esx_cluster.") # TODO implement the same to vmhosts. if dbcluster.status.name == 'decommissioned': raise ArgumentError("Cannot add virtual machines to " "decommissioned clusters.") if dblocation and dbcluster.location_constraint != dblocation: raise ArgumentError("Cannot override cluster location {0} " "with location {1}.".format( dbcluster.location_constraint, dblocation)) dblocation = dbcluster.location_constraint elif dbmodel.machine_type == 'virtual_machine': raise ArgumentError("Virtual machines must be assigned to a " "cluster.") Machine.get_unique(session, machine, preclude=True) dbmachine = create_machine(session, machine, dblocation, dbmodel, cpuname, cpuvendor, cpuspeed, cpucount, memory, serial, comments) if chassis: # FIXME: Are virtual machines allowed to be in a chassis? dbslot = session.query(ChassisSlot).filter_by( chassis=dbchassis, slot_number=slot).first() if not dbslot: dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot) dbslot.machine = dbmachine session.add(dbslot) if cluster: if not dbcluster.resholder: dbcluster.resholder = ClusterResource(cluster=dbcluster) dbvm = VirtualMachine(machine=dbmachine, name=dbmachine.label, holder=dbcluster.resholder) dbcluster.validate() session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if cluster: plenaries.append(Plenary.get_plenary(dbcluster)) plenaries.append(Plenary.get_plenary(dbvm)) # The check to make sure a plenary file is not written out for # dummy aurora hardware is within the call to write(). This way # it is consistent without altering (and forgetting to alter) # all the calls to the method. plenaries.write() return
def render(self, session, logger, machine, model, vendor, serial, chassis, slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster, vmhost, uri, comments, **arguments): dblocation = get_location(session, query_options=[subqueryload('parents'), joinedload('parents.dns_maps')], **arguments) if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) if slot is None: raise ArgumentError("The --chassis option requires a --slot.") if dblocation and dblocation != dbchassis.location: raise ArgumentError("{0} conflicts with chassis location " "{1}.".format(dblocation, dbchassis.location)) dblocation = dbchassis.location elif slot is not None: raise ArgumentError("The --slot option requires a --chassis.") dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if not dbmodel.model_type.isMachineType(): raise ArgumentError("The add_machine command cannot add machines " "of type %s." % str(dbmodel.model_type)) vmholder = None if cluster or vmhost: if cluster and vmhost: raise ArgumentError("Cluster and vmhost cannot be specified " "together.") if not dbmodel.model_type.isVirtualMachineType(): raise ArgumentError("{0} is not a virtual machine." .format(dbmodel)) # TODO: do we need VMs inside resource groups? vmholder = get_resource_holder(session, hostname=vmhost, cluster=cluster, resgroup=None, compel=False) if vmholder.holder_object.status.name == 'decommissioned': raise ArgumentError("Cannot add virtual machines to " "decommissioned clusters.") if cluster: container_loc = vmholder.holder_object.location_constraint else: container_loc = vmholder.holder_object.hardware_entity.location if dblocation and dblocation != container_loc: raise ArgumentError("Cannot override container location {0} " "with location {1}.".format(container_loc, dblocation)) dblocation = container_loc elif dbmodel.model_type.isVirtualMachineType(): raise ArgumentError("Virtual machines must be assigned to a " "cluster or a host.") Machine.get_unique(session, machine, preclude=True) dbmachine = create_machine(session, machine, dblocation, dbmodel, cpuname, cpuvendor, cpuspeed, cpucount, memory, serial, comments) if uri and not dbmodel.model_type.isVirtualAppliance(): raise ArgumentError("URI can be specified only for virtual " "appliances and the model's type is %s." % dbmodel.model_type) dbmachine.uri = uri if chassis: # FIXME: Are virtual machines allowed to be in a chassis? dbslot = session.query(ChassisSlot).filter_by(chassis=dbchassis, slot_number=slot).first() if not dbslot: dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot) dbslot.machine = dbmachine session.add(dbslot) if vmholder: dbvm = VirtualMachine(machine=dbmachine, name=dbmachine.label, holder=vmholder) if hasattr(vmholder.holder_object, "validate") and \ callable(vmholder.holder_object.validate): vmholder.holder_object.validate() session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if vmholder: plenaries.append(Plenary.get_plenary(vmholder.holder_object)) plenaries.append(Plenary.get_plenary(dbvm)) # The check to make sure a plenary file is not written out for # dummy aurora hardware is within the call to write(). This way # it is consistent without altering (and forgetting to alter) # all the calls to the method. plenaries.write() return
def render(self, session, logger, machine, model, vendor, serial, chassis, slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster, comments, **arguments): dblocation = get_location(session, query_options=[subqueryload('parents'), joinedload('parents.dns_maps')], **arguments) if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) if slot is None: raise ArgumentError("The --chassis option requires a --slot.") if dblocation and dblocation != dbchassis.location: raise ArgumentError("{0} conflicts with chassis location " "{1}.".format(dblocation, dbchassis.location)) dblocation = dbchassis.location elif slot is not None: raise ArgumentError("The --slot option requires a --chassis.") dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if dbmodel.machine_type not in ['blade', 'rackmount', 'workstation', 'aurora_node', 'virtual_machine']: raise ArgumentError("The add_machine command cannot add machines " "of type %(type)s. Try 'add %(type)s'." % {"type": dbmodel.machine_type}) if cluster: if dbmodel.machine_type != 'virtual_machine': raise ArgumentError("Only virtual machines can have a cluster " "attribute.") dbcluster = Cluster.get_unique(session, cluster, compel=ArgumentError) # This test could be either archetype or cluster_type if dbcluster.personality.archetype.name != 'esx_cluster': raise ArgumentError("Can only add virtual machines to " "clusters with archetype esx_cluster.") # TODO implement the same to vmhosts. if dbcluster.status.name == 'decommissioned': raise ArgumentError("Cannot add virtual machines to " "decommissioned clusters.") if dblocation and dbcluster.location_constraint != dblocation: raise ArgumentError("Cannot override cluster location {0} " "with location {1}.".format( dbcluster.location_constraint, dblocation)) dblocation = dbcluster.location_constraint elif dbmodel.machine_type == 'virtual_machine': raise ArgumentError("Virtual machines must be assigned to a " "cluster.") Machine.get_unique(session, machine, preclude=True) dbmachine = create_machine(session, machine, dblocation, dbmodel, cpuname, cpuvendor, cpuspeed, cpucount, memory, serial, comments) if chassis: # FIXME: Are virtual machines allowed to be in a chassis? dbslot = session.query(ChassisSlot).filter_by(chassis=dbchassis, slot_number=slot).first() if not dbslot: dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot) dbslot.machine = dbmachine session.add(dbslot) if cluster: if not dbcluster.resholder: dbcluster.resholder = ClusterResource(cluster=dbcluster) dbvm = VirtualMachine(machine=dbmachine, name=dbmachine.label, holder=dbcluster.resholder) dbcluster.validate() session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if cluster: plenaries.append(Plenary.get_plenary(dbcluster)) plenaries.append(Plenary.get_plenary(dbvm)) # The check to make sure a plenary file is not written out for # dummy aurora hardware is within the call to write(). This way # it is consistent without altering (and forgetting to alter) # all the calls to the method. plenaries.write() return
class CommandAddAuroraHost(CommandAddHost): required_parameters = ["hostname"] # Look for node name like <building><rack_id>c<chassis_id>n<node_num> nodename_re = re.compile(r'^\s*([a-zA-Z]+)(\d+)c(\d+)n(\d+)\s*$') def render(self, session, logger, hostname, osname, osversion, **kwargs): # Pull relevant info out of dsdb... dsdb_runner = DSDBRunner(logger=logger) try: fields = dsdb_runner.show_host(hostname) except ProcessException, e: raise ArgumentError("Could not find %s in DSDB: %s" % (hostname, e)) fqdn = fields["fqdn"] dsdb_lookup = fields["dsdb_lookup"] if fields["node"]: machine = fields["node"] elif fields["primary_name"]: machine = fields["primary_name"] else: machine = dsdb_lookup # Create a machine dbmodel = Model.get_unique(session, name="aurora_model", vendor="aurora_vendor", compel=True) dbmachine = Machine.get_unique(session, machine) dbslot = None if not dbmachine: m = self.nodename_re.search(machine) if m: (building, rid, cid, nodenum) = m.groups() dbbuilding = session.query(Building).filter_by( name=building).first() if not dbbuilding: raise ArgumentError("Failed to find building %s for " "node %s, please add an Aurora " "machine manually and follow with " "add_host." % (building, machine)) rack = building + rid dbrack = session.query(Rack).filter_by(name=rack).first() if not dbrack: try: rack_fields = dsdb_runner.show_rack(rack) dbrack = Rack(name=rack, fullname=rack, parent=dbbuilding, rack_row=rack_fields["rack_row"], rack_column=rack_fields["rack_col"]) session.add(dbrack) except (ProcessException, ValueError), e: logger.client_info("Rack %s not defined in DSDB." % rack) dblocation = dbrack or dbbuilding chassis = rack + "c" + cid dbdns_domain = session.query(DnsDomain).filter_by( name="ms.com").first() dbchassis = Chassis.get_unique(session, chassis) if not dbchassis: dbchassis_model = Model.get_unique(session, name="aurora_chassis_model", vendor="aurora_vendor", compel=True) dbchassis = Chassis(label=chassis, location=dblocation, model=dbchassis_model) session.add(dbchassis) dbfqdn = Fqdn.get_or_create(session, name=chassis, dns_domain=dbdns_domain, preclude=True) dbdns_rec = ReservedName(fqdn=dbfqdn) session.add(dbdns_rec) dbchassis.primary_name = dbdns_rec dbslot = session.query(ChassisSlot).filter_by( chassis=dbchassis, slot_number=nodenum).first() # Note: Could be even more persnickity here and check that # the slot is currently empty. Seems like overkill. if not dbslot: dbslot = ChassisSlot(chassis=dbchassis, slot_number=nodenum) session.add(dbslot) else: dbnet_env = NetworkEnvironment.get_unique_or_default(session) try: host_ip = gethostbyname(hostname) except gaierror, e: raise ArgumentError("Error when looking up host: %d, %s" % (e.errno, e.strerror)) dbnetwork = get_net_id_from_ip(session, IPv4Address(host_ip), dbnet_env) dblocation = dbnetwork.location.building