def render(self, session, logger, chassis, model, rack, ip, vendor, serial, comments, **arguments): dbchassis = Chassis.get_unique(session, chassis, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbchassis) if vendor and not model: model = dbchassis.model.name if model: dbmodel = Model.get_unique(session, name=model, vendor=vendor, model_type=ChassisType.Chassis, compel=True) dbchassis.model = dbmodel dblocation = get_location(session, rack=rack) if dblocation: dbchassis.location = dblocation if serial is not None: dbchassis.serial_no = serial if ip: update_primary_ip(session, logger, dbchassis, ip) if comments is not None: dbchassis.comments = comments session.flush() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbchassis, oldinfo) dsdb_runner.commit_or_rollback("Could not update chassis in DSDB") return
def render(self, session, logger, chassis, model, rack, ip, vendor, serial, comments, **arguments): dbchassis = Chassis.get_unique(session, chassis, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbchassis) if vendor and not model: model = dbchassis.model.name if model: dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='chassis', compel=True) dbchassis.model = dbmodel dblocation = get_location(session, rack=rack) if dblocation: dbchassis.location = dblocation if serial is not None: dbchassis.serial_no = serial if ip: update_primary_ip(session, dbchassis, ip) if comments is not None: dbchassis.comments = comments session.add(dbchassis) session.flush() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbchassis, oldinfo) dsdb_runner.commit_or_rollback("Could not update chassis in DSDB") return
def render(self, session, logger, switch, model, rack, type, ip, vendor, serial, rename_to, discovered_macs, clear, discover, comments, **arguments): dbswitch = Switch.get_unique(session, switch, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbswitch) if discover: discover_switch(session, logger, self.config, dbswitch, False) if vendor and not model: model = dbswitch.model.name if model: dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='switch', compel=True) dbswitch.model = dbmodel dblocation = get_location(session, rack=rack) if dblocation: dbswitch.location = dblocation if serial is not None: dbswitch.serial_no = serial # FIXME: What do the error messages for an invalid enum (switch_type) # look like? if type: Switch.check_type(type) dbswitch.switch_type = type if ip: update_primary_ip(session, dbswitch, ip) if comments is not None: dbswitch.comments = comments remove_plenary = None if rename_to: # Handling alias renaming would not be difficult in AQDB, but the # DSDB synchronization would be painful, so don't do that for now. # In theory we should check all configured IP addresses for aliases, # but this is the most common case if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases: raise ArgumentError("The switch has aliases and it cannot be " "renamed. Please remove all aliases first.") remove_plenary = Plenary.get_plenary(dbswitch, logger=logger) rename_hardware(session, dbswitch, rename_to) if clear: session.query(ObservedMac).filter_by(switch=dbswitch).delete() if discovered_macs: now = datetime.now() for (macaddr, port) in discovered_macs: update_or_create_observed_mac(session, dbswitch, port, macaddr, now) session.flush() switch_plenary = Plenary.get_plenary(dbswitch, logger=logger) key = switch_plenary.get_write_key() if remove_plenary: key = CompileKey.merge([key, remove_plenary.get_remove_key()]) try: lock_queue.acquire(key) if remove_plenary: remove_plenary.stash() remove_plenary.remove(locked=True) switch_plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbswitch, oldinfo) dsdb_runner.commit_or_rollback("Could not update switch in DSDB") except: if remove_plenary: remove_plenary.restore_stash() switch_plenary.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, machine, model, vendor, serial, chassis, slot, clearchassis, multislot, vmhost, cluster, allow_metacluster_change, cpuname, cpuvendor, cpuspeed, cpucount, memory, ip, **arguments): dbmachine = Machine.get_unique(session, machine, compel=True) plenaries = PlenaryCollection(logger=logger) oldinfo = DSDBRunner.snapshot_hw(dbmachine) if clearchassis: del dbmachine.chassis_slot[:] remove_plenaries = PlenaryCollection(logger=logger) if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) if machine_plenary_will_move(old=dbmachine.location, new=dbchassis.location): remove_plenaries.append(Plenary.get_plenary(dbmachine)) dbmachine.location = dbchassis.location if slot is None: raise ArgumentError("Option --chassis requires --slot " "information.") self.adjust_slot(session, logger, dbmachine, dbchassis, slot, multislot) elif slot is not None: dbchassis = None for dbslot in dbmachine.chassis_slot: if dbchassis and dbslot.chassis != dbchassis: raise ArgumentError("Machine in multiple chassis, please " "use --chassis argument.") dbchassis = dbslot.chassis if not dbchassis: raise ArgumentError("Option --slot requires --chassis " "information.") self.adjust_slot(session, logger, dbmachine, dbchassis, slot, multislot) dblocation = get_location(session, **arguments) if dblocation: loc_clear_chassis = False for dbslot in dbmachine.chassis_slot: dbcl = dbslot.chassis.location if dbcl != dblocation: if chassis or slot is not None: raise ArgumentError("{0} conflicts with chassis {1!s} " "location {2}.".format(dblocation, dbslot.chassis, dbcl)) else: loc_clear_chassis = True if loc_clear_chassis: del dbmachine.chassis_slot[:] if machine_plenary_will_move(old=dbmachine.location, new=dblocation): remove_plenaries.append(Plenary.get_plenary(dbmachine)) dbmachine.location = dblocation if model or vendor: # If overriding model, should probably overwrite default # machine specs as well. if not model: model = dbmachine.model.name if not vendor: vendor = dbmachine.model.vendor.name dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if dbmodel.machine_type not in ['blade', 'rackmount', 'workstation', 'aurora_node', 'virtual_machine']: raise ArgumentError("The update_machine command cannot update " "machines of type %s." % dbmodel.machine_type) # We probably could do this by forcing either cluster or # location data to be available as appropriate, but really? # Failing seems reasonable. if dbmodel.machine_type != dbmachine.model.machine_type and \ 'virtual_machine' in [dbmodel.machine_type, dbmachine.model.machine_type]: raise ArgumentError("Cannot change machine from %s to %s." % (dbmachine.model.machine_type, dbmodel.machine_type)) old_nic_model = dbmachine.model.nic_model new_nic_model = dbmodel.nic_model if old_nic_model != new_nic_model: for iface in dbmachine.interfaces: if iface.model == old_nic_model: iface.model = new_nic_model dbmachine.model = dbmodel if cpuname or cpuvendor or cpuspeed is not None: dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor, speed=cpuspeed, compel=True) dbmachine.cpu = dbcpu if cpucount is not None: dbmachine.cpu_quantity = cpucount if memory is not None: dbmachine.memory = memory if serial: dbmachine.serial_no = serial if ip: update_primary_ip(session, dbmachine, ip) # FIXME: For now, if a machine has its interface(s) in a portgroup # this command will need to be followed by an update_interface to # re-evaluate the portgroup for overflow. # It would be better to have --pg and --autopg options to let it # happen at this point. if cluster or vmhost: if not dbmachine.vm_container: raise ArgumentError("Cannot convert a physical machine to " "virtual.") old_holder = dbmachine.vm_container.holder.holder_object resholder = get_resource_holder(session, hostname=vmhost, cluster=cluster, compel=False) new_holder = resholder.holder_object # TODO: do we want to allow moving machines between the cluster and # metacluster level? if new_holder.__class__ != old_holder.__class__: raise ArgumentError("Cannot move a VM between a cluster and a " "stand-alone host.") if cluster: if new_holder.metacluster != old_holder.metacluster \ and not allow_metacluster_change: raise ArgumentError("Current {0:l} does not match " "new {1:l}." .format(old_holder.metacluster, new_holder.metacluster)) remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) dbmachine.vm_container.holder = resholder for dbdisk in dbmachine.disks: if not isinstance(dbdisk, VirtualDisk): continue old_share = dbdisk.share if isinstance(old_share.holder, BundleResource): resourcegroup = old_share.holder.name else: resourcegroup = None new_share = find_share(new_holder, resourcegroup, old_share.name, error=ArgumentError) # If the shares are registered at the metacluster level and both # clusters are in the same metacluster, then there will be no # real change here if new_share != old_share: old_share.disks.remove(dbdisk) new_share.disks.append(dbdisk) if isinstance(new_holder, Cluster): dbmachine.location = new_holder.location_constraint else: dbmachine.location = new_holder.location session.flush() plenaries.append(Plenary.get_plenary(old_holder)) plenaries.append(Plenary.get_plenary(new_holder)) if dbmachine.vm_container: plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) session.flush() # Check if the changed parameters still meet cluster capacity # requiremets if dbmachine.cluster: dbmachine.cluster.validate() if allow_metacluster_change: dbmachine.cluster.metacluster.validate() if dbmachine.host and dbmachine.host.cluster: dbmachine.host.cluster.validate() # The check to make sure a plenary file is not written out for # dummy aurora hardware is within the call to write(). This way # it is consistent without altering (and forgetting to alter) # all the calls to the method. plenaries.append(Plenary.get_plenary(dbmachine)) if remove_plenaries.plenaries and dbmachine.host: plenaries.append(Plenary.get_plenary(dbmachine.host)) key = CompileKey.merge([plenaries.get_write_key(), remove_plenaries.get_remove_key()]) try: lock_queue.acquire(key) remove_plenaries.stash() plenaries.write(locked=True) remove_plenaries.remove(locked=True) if dbmachine.host: # XXX: May need to reconfigure. pass dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not update machine in DSDB") except: plenaries.restore_stash() remove_plenaries.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, switch, model, rack, type, ip, vendor, serial, rename_to, discovered_macs, clear, discover, comments, **arguments): dbswitch = Switch.get_unique(session, switch, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbswitch) if discover: discover_switch(session, logger, self.config, dbswitch, False) if vendor and not model: model = dbswitch.model.name if model: dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='switch', compel=True) dbswitch.model = dbmodel dblocation = get_location(session, rack=rack) if dblocation: dbswitch.location = dblocation if serial is not None: dbswitch.serial_no = serial # FIXME: What do the error messages for an invalid enum (switch_type) # look like? if type: Switch.check_type(type) dbswitch.switch_type = type if ip: update_primary_ip(session, dbswitch, ip) if comments is not None: dbswitch.comments = comments remove_plenary = None if rename_to: # Handling alias renaming would not be difficult in AQDB, but the # DSDB synchronization would be painful, so don't do that for now. # In theory we should check all configured IP addresses for aliases, # but this is the most common case if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases: raise ArgumentError( "The switch has aliases and it cannot be " "renamed. Please remove all aliases first.") remove_plenary = Plenary.get_plenary(dbswitch, logger=logger) rename_hardware(session, dbswitch, rename_to) if clear: session.query(ObservedMac).filter_by(switch=dbswitch).delete() if discovered_macs: now = datetime.now() for (macaddr, port) in discovered_macs: update_or_create_observed_mac(session, dbswitch, port, macaddr, now) session.flush() switch_plenary = Plenary.get_plenary(dbswitch, logger=logger) key = switch_plenary.get_write_key() if remove_plenary: key = CompileKey.merge([key, remove_plenary.get_remove_key()]) try: lock_queue.acquire(key) if remove_plenary: remove_plenary.stash() remove_plenary.remove(locked=True) switch_plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbswitch, oldinfo) dsdb_runner.commit_or_rollback("Could not update switch in DSDB") except: if remove_plenary: remove_plenary.restore_stash() switch_plenary.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, machine, model, vendor, serial, chassis, slot, clearchassis, multislot, vmhost, cluster, allow_metacluster_change, cpuname, cpuvendor, cpuspeed, cpucount, memory, ip, uri, **arguments): dbmachine = Machine.get_unique(session, machine, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbmachine) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if dbmachine.vm_container: plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) if dbmachine.host: # Using PlenaryHostData directly, to avoid warnings if the host has # not been configured yet plenaries.append(PlenaryHostData.get_plenary(dbmachine.host)) if clearchassis: del dbmachine.chassis_slot[:] if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) dbmachine.location = dbchassis.location if slot is None: raise ArgumentError("Option --chassis requires --slot " "information.") self.adjust_slot(session, logger, dbmachine, dbchassis, slot, multislot) elif slot is not None: dbchassis = None for dbslot in dbmachine.chassis_slot: if dbchassis and dbslot.chassis != dbchassis: raise ArgumentError("Machine in multiple chassis, please " "use --chassis argument.") dbchassis = dbslot.chassis if not dbchassis: raise ArgumentError("Option --slot requires --chassis " "information.") self.adjust_slot(session, logger, dbmachine, dbchassis, slot, multislot) dblocation = get_location(session, **arguments) if dblocation: loc_clear_chassis = False for dbslot in dbmachine.chassis_slot: dbcl = dbslot.chassis.location if dbcl != dblocation: if chassis or slot is not None: raise ArgumentError("{0} conflicts with chassis {1!s} " "location {2}." .format(dblocation, dbslot.chassis, dbcl)) else: loc_clear_chassis = True if loc_clear_chassis: del dbmachine.chassis_slot[:] dbmachine.location = dblocation if dbmachine.host: for vm in dbmachine.host.virtual_machines: plenaries.append(Plenary.get_plenary(vm)) vm.location = dblocation if model or vendor: # If overriding model, should probably overwrite default # machine specs as well. if not model: model = dbmachine.model.name if not vendor: vendor = dbmachine.model.vendor.name dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if not dbmodel.model_type.isMachineType(): raise ArgumentError("The update_machine command cannot update " "machines of type %s." % dbmodel.model_type) # We probably could do this by forcing either cluster or # location data to be available as appropriate, but really? # Failing seems reasonable. if dbmodel.model_type != dbmachine.model.model_type and \ (dbmodel.model_type.isVirtualMachineType() or dbmachine.model.model_type.isVirtualMachineType()): raise ArgumentError("Cannot change machine from %s to %s." % (dbmachine.model.model_type, dbmodel.model_type)) old_nic_model = dbmachine.model.nic_model new_nic_model = dbmodel.nic_model if old_nic_model != new_nic_model: for iface in dbmachine.interfaces: if iface.model == old_nic_model: iface.model = new_nic_model dbmachine.model = dbmodel if cpuname or cpuvendor or cpuspeed is not None: dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor, speed=cpuspeed, compel=True) dbmachine.cpu = dbcpu if cpucount is not None: dbmachine.cpu_quantity = cpucount if memory is not None: dbmachine.memory = memory if serial: dbmachine.serial_no = serial if ip: update_primary_ip(session, logger, dbmachine, ip) if uri and not dbmachine.model.model_type.isVirtualAppliance(): raise ArgumentError("URI can be specified only for virtual " "appliances and the model's type is %s" % dbmachine.model.model_type) if uri: dbmachine.uri = uri # FIXME: For now, if a machine has its interface(s) in a portgroup # this command will need to be followed by an update_interface to # re-evaluate the portgroup for overflow. # It would be better to have --pg and --autopg options to let it # happen at this point. if cluster or vmhost: if not dbmachine.vm_container: raise ArgumentError("Cannot convert a physical machine to " "virtual.") old_holder = dbmachine.vm_container.holder.holder_object resholder = get_resource_holder(session, hostname=vmhost, cluster=cluster, compel=False) new_holder = resholder.holder_object if self.get_metacluster(new_holder) != self.get_metacluster(old_holder) \ and not allow_metacluster_change: raise ArgumentError("Current {0:l} does not match " "new {1:l}." .format(self.get_metacluster(old_holder), self.get_metacluster(new_holder))) plenaries.append(Plenary.get_plenary(old_holder)) plenaries.append(Plenary.get_plenary(new_holder)) dbmachine.vm_container.holder = resholder for dbdisk in dbmachine.disks: if isinstance(dbdisk, VirtualNasDisk): old_share = dbdisk.share if isinstance(old_share.holder, BundleResource): resourcegroup = old_share.holder.resourcegroup.name else: resourcegroup = None new_share = find_resource(Share, new_holder, resourcegroup, old_share.name, error=ArgumentError) # If the shares are registered at the metacluster level and both # clusters are in the same metacluster, then there will be no # real change here if new_share != old_share: old_share.disks.remove(dbdisk) new_share.disks.append(dbdisk) if isinstance(dbdisk, VirtualLocalDisk): old_filesystem = dbdisk.filesystem new_filesystem = find_resource(Filesystem, new_holder, None, old_filesystem.name, error=ArgumentError) if new_filesystem != old_filesystem: old_filesystem.disks.remove(dbdisk) new_filesystem.disks.append(dbdisk) if isinstance(new_holder, Cluster): dbmachine.location = new_holder.location_constraint else: # vmhost dbmachine.location = new_holder.hardware_entity.location session.flush() # Check if the changed parameters still meet cluster capacity # requiremets if dbmachine.cluster: dbmachine.cluster.validate() if allow_metacluster_change: dbmachine.cluster.metacluster.validate() if dbmachine.host and dbmachine.host.cluster: dbmachine.host.cluster.validate() # The check to make sure a plenary file is not written out for # dummy aurora hardware is within the call to write(). This way # it is consistent without altering (and forgetting to alter) # all the calls to the method. with plenaries.get_key(): plenaries.stash() try: plenaries.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not update machine in DSDB") except: plenaries.restore_stash() raise return
def render(self, session, logger, network_device, model, type, ip, vendor, serial, rename_to, discovered_macs, clear, discover, comments, **arguments): dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbnetdev) plenary = Plenary.get_plenary(dbnetdev, logger=logger) if discover: discover_network_device(session, logger, self.config, dbnetdev, False) if vendor and not model: model = dbnetdev.model.name if model: dbmodel = Model.get_unique(session, name=model, vendor=vendor, model_type=NetworkDeviceType.Switch, compel=True) dbnetdev.model = dbmodel dblocation = get_location(session, **arguments) if dblocation: dbnetdev.location = dblocation if serial is not None: dbnetdev.serial_no = serial # FIXME: What do the error messages for an invalid enum (switch_type) # look like? if type: NetworkDevice.check_type(type) dbnetdev.switch_type = type if ip: update_primary_ip(session, logger, dbnetdev, ip) if comments is not None: dbnetdev.comments = comments if rename_to: # Handling alias renaming would not be difficult in AQDB, but the # DSDB synchronization would be painful, so don't do that for now. # In theory we should check all configured IP addresses for aliases, # but this is the most common case if dbnetdev.primary_name and dbnetdev.primary_name.fqdn.aliases: raise ArgumentError("The network device has aliases and it cannot be " "renamed. Please remove all aliases first.") rename_hardware(session, dbnetdev, rename_to) if clear: session.query(ObservedMac).filter_by(network_device=dbnetdev).delete() if discovered_macs: now = datetime.now() for (macaddr, port) in discovered_macs: update_or_create_observed_mac(session, dbnetdev, port, macaddr, now) session.flush() with plenary.get_key(): plenary.stash() try: plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbnetdev, oldinfo) dsdb_runner.commit_or_rollback("Could not update network device in DSDB") except: plenary.restore_stash() raise return