def render(self, session, logger, city, timezone, campus, default_dns_domain, comments, **arguments): dbcity = get_location(session, city=city) # Updating machine templates is expensive, so only do that if needed update_machines = False if timezone is not None: dbcity.timezone = timezone if comments is not None: dbcity.comments = comments if default_dns_domain is not None: if default_dns_domain: dbdns_domain = DnsDomain.get_unique(session, default_dns_domain, compel=True) dbcity.default_dns_domain = dbdns_domain else: dbcity.default_dns_domain = None prev_campus = None dsdb_runner = None dsdb_runner = DSDBRunner(logger=logger) if campus is not None: dbcampus = get_location(session, campus=campus) # This one would change the template's locations hence forbidden if dbcampus.hub != dbcity.hub: # Doing this both to reduce user error and to limit # testing required. raise ArgumentError("Cannot change campus. {0} is in {1:l}, " "while {2:l} is in {3:l}.".format( dbcampus, dbcampus.hub, dbcity, dbcity.hub)) if dbcity.campus: prev_campus = dbcity.campus dbcity.update_parent(parent=dbcampus) update_machines = True session.flush() if campus is not None: if prev_campus: prev_name = prev_campus.name else: prev_name = None dsdb_runner.update_city(city, dbcampus.name, prev_name) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcity)) if update_machines: q = session.query(Machine) q = q.filter(Machine.location_id.in_(dbcity.offspring_ids())) logger.client_info("Updating %d machines..." % q.count()) for dbmachine in q: plenaries.append(Plenary.get_plenary(dbmachine)) count = plenaries.write() dsdb_runner.commit_or_rollback() logger.client_info("Flushed %d templates." % count)
def render(self, session, machine, model, vendor, machine_type, chassis, slot, **arguments): q = session.query(Machine) if machine: # TODO: This command still mixes search/show facilities. # For now, give an error if machine name not found, but # also allow the command to be used to check if the machine has # the requested attributes (via the standard query filters). # In the future, this should be clearly separated as 'show machine' # and 'search machine'. machine = AqStr.normalize(machine) Machine.check_label(machine) Machine.get_unique(session, machine, compel=True) q = q.filter_by(label=machine) dblocation = get_location(session, **arguments) if dblocation: q = q.filter_by(location=dblocation) if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) q = q.join('chassis_slot') q = q.filter_by(chassis=dbchassis) q = q.reset_joinpoint() if slot is not None: q = q.join('chassis_slot') q = q.filter_by(slot_number=slot) q = q.reset_joinpoint() if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) return q.order_by(Machine.label).all()
def render(self, session, logger, chassis, model, rack, ip, vendor, serial, comments, **arguments): dbchassis = Chassis.get_unique(session, chassis, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbchassis) if vendor and not model: model = dbchassis.model.name if model: dbmodel = Model.get_unique(session, name=model, vendor=vendor, model_type=ChassisType.Chassis, compel=True) dbchassis.model = dbmodel dblocation = get_location(session, rack=rack) if dblocation: dbchassis.location = dblocation if serial is not None: dbchassis.serial_no = serial if ip: update_primary_ip(session, logger, dbchassis, ip) if comments is not None: dbchassis.comments = comments session.flush() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbchassis, oldinfo) dsdb_runner.commit_or_rollback("Could not update chassis in DSDB") return
def render(self, session, logger, building, dryrun, incremental, **arguments): if building: dbbuilding = get_location(session, building=building) else: dbbuilding = None # --dryrun and --incremental do not mix well if dryrun and incremental: raise ArgumentError("--dryrun and --incremental cannot be given " "simultaneously.") key = SyncKey(data="network", logger=logger) lock_queue.acquire(key) rundir = self.config.get("broker", "rundir") tempdir = mkdtemp(prefix="refresh_network_", dir=rundir) try: args = [self.config.get("broker", "qip_dump_subnetdata"), "--datarootdir", tempdir, "--format", "txt", "--noaudit"] run_command(args, logger=logger) subnetdata = file(os.path.join(tempdir, "subnetdata.txt"), "r") refresher = QIPRefresh(session, logger, dbbuilding, dryrun, incremental) refresher.refresh(subnetdata) session.flush() if dryrun: session.rollback() finally: lock_queue.release(key) remove_dir(tempdir, logger=logger)
def render(self, session, logger, chassis, model, rack, ip, vendor, serial, comments, **arguments): dbchassis = Chassis.get_unique(session, chassis, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbchassis) if vendor and not model: model = dbchassis.model.name if model: dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='chassis', compel=True) dbchassis.model = dbmodel dblocation = get_location(session, rack=rack) if dblocation: dbchassis.location = dblocation if serial is not None: dbchassis.serial_no = serial if ip: update_primary_ip(session, dbchassis, ip) if comments is not None: dbchassis.comments = comments session.add(dbchassis) session.flush() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbchassis, oldinfo) dsdb_runner.commit_or_rollback("Could not update chassis in DSDB") return
def render(self, session, network, ip, network_environment, all, style, type=False, hosts=False, **arguments): options = [undefer('comments'), joinedload('location')] if hosts or style == "proto": options.extend([subqueryload("assignments"), joinedload("assignments.interface"), joinedload("assignments.dns_records"), subqueryload("dynamic_stubs")]) dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) dbnetwork = network and get_network_byname(session, network, dbnet_env, query_options=options) or None dbnetwork = ip and get_network_byip(session, ip, dbnet_env, query_options=options) or dbnetwork q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.options(*options) if dbnetwork: if hosts: return NetworkHostList([dbnetwork]) else: return dbnetwork if type: q = q.filter_by(network_type=type) dblocation = get_location(session, **arguments) if dblocation: childids = dblocation.offspring_ids() q = q.filter(Network.location_id.in_(childids)) q = q.order_by(Network.ip) q = q.options(*options) if hosts: return NetworkHostList(q.all()) else: return SimpleNetworkList(q.all())
def render(self, session, network_environment, dns_environment, comments, **arguments): validate_basic("network environment", network_environment) NetworkEnvironment.get_unique(session, network_environment, preclude=True) dbdns_env = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Currently input.xml lists --building only, but that may change location = get_location(session, **arguments) dbnet_env = NetworkEnvironment(name=network_environment, dns_environment=dbdns_env, location=location, comments=comments) if dbdns_env.is_default != dbnet_env.is_default: raise ArgumentError("Only the default network environment may be " "associated with the default DNS environment.") session.add(dbnet_env) session.flush() return
def render(self, session, logger, city, **arguments): dbcity = get_location(session, city=city) name = dbcity.name country = dbcity.country.name fullname = dbcity.fullname plenary = PlenaryCity(dbcity, logger=logger) CommandDelLocation.render(self, session=session, name=city, type='city', **arguments) session.flush() key = plenary.get_remove_key() try: lock_queue.acquire(key) plenary.remove(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.del_city(name, country, fullname) dsdb_runner.commit_or_rollback() except: plenary.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, machine, model, vendor, machine_type, chassis, slot, **arguments): q = session.query(Machine) if machine: # TODO: This command still mixes search/show facilities. # For now, give an error if machine name not found, but # also allow the command to be used to check if the machine has # the requested attributes (via the standard query filters). # In the future, this should be clearly separated as 'show machine' # and 'search machine'. machine = AqStr.normalize(machine) Machine.check_label(machine) Machine.get_unique(session, machine, compel=True) q = q.filter_by(label=machine) dblocation = get_location(session, **arguments) if dblocation: q = q.filter_by(location=dblocation) if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) q = q.join('chassis_slot') q = q.filter_by(chassis=dbchassis) q = q.reset_joinpoint() if slot is not None: q = q.join('chassis_slot') q = q.filter_by(slot_number=slot) q = q.reset_joinpoint() if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, model_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) return q.order_by(Machine.label).all()
def render(self, session, logger, switch, label, model, rack, type, ip, interface, mac, vendor, serial, comments, **arguments): dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='switch', compel=True) dblocation = get_location(session, rack=rack) dbdns_rec, newly_created = grab_address(session, switch, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) if not label: label = dbdns_rec.fqdn.name try: Switch.check_label(label) except ArgumentError: raise ArgumentError("Could not deduce a valid hardware label " "from the switch name. Please specify " "--label.") # FIXME: What do the error messages for an invalid enum (switch_type) # look like? dbswitch = Switch(label=label, switch_type=type, location=dblocation, model=dbmodel, serial_no=serial, comments=comments) session.add(dbswitch) dbswitch.primary_name = dbdns_rec # FIXME: get default name from the model iftype = "oa" if not interface: interface = "xge" ifcomments = "Created automatically by add_switch" else: ifcomments = None if interface.lower().startswith("lo"): iftype = "loopback" dbinterface = get_or_create_interface(session, dbswitch, name=interface, mac=mac, interface_type=iftype, comments=ifcomments) dbnetwork = get_net_id_from_ip(session, ip) # TODO: should we call check_ip_restrictions() here? assign_address(dbinterface, ip, dbnetwork) session.flush() plenary = PlenarySwitch(dbswitch, logger=logger) with plenary.get_write_key() as key: plenary.stash() try: plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbswitch, None) dsdb_runner.commit_or_rollback("Could not add switch to DSDB") except: plenary.restore_stash() raise return
def search_hardware_entity_query(session, hardware_type=HardwareEntity, subquery=False, model=None, vendor=None, machine_type=None, exact_location=False, mac=None, pg=None, serial=None, interface_model=None, interface_vendor=None, **kwargs): q = session.query(hardware_type) if hardware_type is HardwareEntity: q = q.with_polymorphic('*') # The ORM deduplicates the result if we query full objects, but not if we # query just the label q = q.distinct() dblocation = get_location(session, **kwargs) if dblocation: if exact_location: q = q.filter_by(location=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(HardwareEntity.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(HardwareEntity.model_id.in_(subq)) if mac or pg or interface_vendor or interface_model: q = q.join('interfaces') if mac: q = q.filter_by(mac=mac) if pg: q = q.filter_by(port_group=pg) if interface_model or interface_vendor: # HardwareEntity also has a .model relation, so we have to be # explicit here q = q.join(Interface.model) if interface_model: q = q.filter_by(name=interface_model) if interface_vendor: a_vendor = aliased(Vendor) q = q.join(a_vendor) q = q.filter_by(name=interface_vendor) q = q.reset_joinpoint() if serial: q = q.filter_by(serial_no=serial) if not subquery: # Oracle does not like "ORDER BY" in a sub-select, so we have to # suppress it if we want to use this query as a subquery q = q.order_by(HardwareEntity.label) return q
def render(self, session, logger, chassis, label, rack, model, vendor, ip, interface, mac, serial, comments, **arguments): dbdns_rec, newly_created = grab_address(session, chassis, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) if not label: label = dbdns_rec.fqdn.name try: Chassis.check_label(label) except ArgumentError: raise ArgumentError("Could not deduce a valid hardware label " "from the chassis name. Please specify " "--label.") dblocation = get_location(session, rack=rack) dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='chassis', compel=True) # FIXME: Precreate chassis slots? dbchassis = Chassis(label=label, location=dblocation, model=dbmodel, serial_no=serial, comments=comments) session.add(dbchassis) dbchassis.primary_name = dbdns_rec # FIXME: get default name from the model if not interface: interface = "oa" ifcomments = "Created automatically by add_chassis" else: ifcomments = None dbinterface = get_or_create_interface(session, dbchassis, name=interface, mac=mac, interface_type="oa", comments=ifcomments) if ip: dbnetwork = get_net_id_from_ip(session, ip) check_ip_restrictions(dbnetwork, ip) assign_address(dbinterface, ip, dbnetwork) session.flush() if ip: dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbchassis, None) dsdb_runner.commit_or_rollback("Could not add chassis to DSDB") return
def render(self, session, logger, network_device, label, model, type, ip, interface, iftype, mac, vendor, serial, comments, **arguments): dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if not dbmodel.model_type.isNetworkDeviceType(): raise ArgumentError("This command can only be used to " "add network devices.") dblocation = get_location(session, compel=True, **arguments) dbdns_rec, newly_created = grab_address(session, network_device, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) if not label: label = dbdns_rec.fqdn.name try: NetworkDevice.check_label(label) except ArgumentError: raise ArgumentError("Could not deduce a valid hardware label " "from the network device name. Please specify " "--label.") # FIXME: What do the error messages for an invalid enum (switch_type) # look like? dbnetdev = NetworkDevice(label=label, switch_type=type, location=dblocation, model=dbmodel, serial_no=serial, comments=comments) session.add(dbnetdev) dbnetdev.primary_name = dbdns_rec check_netdev_iftype(iftype) dbinterface = get_or_create_interface(session, dbnetdev, name=interface, mac=mac, interface_type=iftype) dbnetwork = get_net_id_from_ip(session, ip) # TODO: should we call check_ip_restrictions() here? assign_address(dbinterface, ip, dbnetwork, logger=logger) session.flush() plenary = Plenary.get_plenary(dbnetdev, logger=logger) with plenary.get_key(): plenary.stash() try: plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbnetdev, None) dsdb_runner.commit_or_rollback("Could not add network device to DSDB") except: plenary.restore_stash() raise return
def render(self, session, logger, rack, row, column, room, building, bunker, fullname, default_dns_domain, comments, **arguments): dbrack = get_location(session, rack=rack) if row is not None: dbrack.rack_row = row if column is not None: dbrack.rack_column = column if fullname is not None: dbrack.fullname = fullname if comments is not None: dbrack.comments = comments if default_dns_domain is not None: if default_dns_domain: dbdns_domain = DnsDomain.get_unique(session, default_dns_domain, compel=True) dbrack.default_dns_domain = dbdns_domain else: dbrack.default_dns_domain = None if bunker or room or building: dbparent = get_location(session, bunker=bunker, room=room, building=building) # This one would change the template's locations hence forbidden if dbparent.building != dbrack.building: # Doing this both to reduce user error and to limit # testing required. raise ArgumentError("Cannot change buildings. {0} is in {1} " "while {2} is in {3}.".format( dbparent, dbparent.building, dbrack, dbrack.building)) dbrack.update_parent(parent=dbparent) session.flush() plenaries = PlenaryCollection(logger=logger) q = session.query(Machine) q = q.filter(Machine.location_id.in_(dbrack.offspring_ids())) for dbmachine in q: plenaries.append(Plenary.get_plenary(dbmachine)) plenaries.write()
def render(self, session, logger, rack, type, clear, vlan, **arguments): dblocation = get_location(session, rack=rack) NetworkDevice.check_type(type) q = session.query(NetworkDevice) q = q.filter_by(location=dblocation) if type: q = q.filter_by(switch_type=type) netdevs = q.all() if not netdevs: raise NotFoundException("No network device found.") return self.poll(session, logger, netdevs, clear, vlan)
def render(self, session, logger, rack, type, clear, vlan, **arguments): dblocation = get_location(session, rack=rack) Switch.check_type(type) q = session.query(Switch) q = q.filter_by(location=dblocation) if type: q = q.filter_by(switch_type=type) switches = q.all() if not switches: raise NotFoundException("No switch found.") return self.poll(session, logger, switches, clear, vlan)
def render(self, session, network_environment, **arguments): q = session.query(NetworkEnvironment) q = q.options(undefer('comments'), joinedload('dns_environment'), undefer('dns_environment.comments'), joinedload('location')) if network_environment: q = q.filter_by(name=network_environment) location = get_location(session, **arguments) if location: q = q.filter_by(location=location) q = q.order_by(NetworkEnvironment.name) return q.all()
def search_hardware_entity_query(session, hardware_type=HardwareEntity, subquery=False, model=None, vendor=None, machine_type=None, exact_location=False, ip=None, mac=None, pg=None, serial=None, interface_model=None, interface_vendor=None, **kwargs): q = session.query(hardware_type) if hardware_type is HardwareEntity: q = q.with_polymorphic('*') # The ORM deduplicates the result if we query full objects, but not if we # query just the label q = q.distinct() dblocation = get_location(session, **kwargs) if dblocation: if exact_location: q = q.filter_by(location=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(HardwareEntity.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, model_type=machine_type, compel=True) q = q.filter(HardwareEntity.model_id.in_(subq)) if ip or mac or pg or interface_vendor or interface_model: q = q.join('interfaces') if mac: q = q.filter_by(mac=mac) if pg: q = q.filter_by(port_group=pg) if ip: q = q.join(AddressAssignment) q = q.filter(AddressAssignment.ip == ip) if interface_model or interface_vendor: # HardwareEntity also has a .model relation, so we have to be # explicit here q = q.join(Interface.model) if interface_model: q = q.filter_by(name=interface_model) if interface_vendor: a_vendor = aliased(Vendor) q = q.join(a_vendor) q = q.filter_by(name=interface_vendor) q = q.reset_joinpoint() if serial: q = q.filter_by(serial_no=serial) if not subquery: # Oracle does not like "ORDER BY" in a sub-select, so we have to # suppress it if we want to use this query as a subquery q = q.order_by(HardwareEntity.label) return q
def render(self, session, logger, campus, **arguments): dbcampus = get_location(session, campus=campus) name = dbcampus.name result = CommandDelLocation.render(self, session=session, name=name, type='campus', **arguments) session.flush() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.del_campus(name) dsdb_runner.commit_or_rollback() return result
def update_cluster_location(session, logger, dbcluster, fix_location, plenaries, remove_plenaries, **arguments): location_updated = False dblocation = get_location(session, **arguments) if fix_location: dblocation = dbcluster.minimum_location if not dblocation: raise ArgumentError("Cannot infer the cluster location from " "the host locations.") if dblocation: errors = [] if not dblocation.campus: errors.append("{0} is not within a campus.".format(dblocation)) if dbcluster.cluster_type != 'meta': for host in dbcluster.hosts: if host.machine.location != dblocation and \ dblocation not in host.machine.location.parents: errors.append("{0} has location {1}.".format( host, host.machine.location)) else: for cluster in dbcluster.members: if cluster.location_constraint != dblocation and \ dblocation not in cluster.location_constraint.parents: errors.append("{0} has location {1}.".format( cluster, cluster.location_constraint)) if errors: raise ArgumentError("Cannot set {0} location constraint to " "{1}:\n{2}".format(dbcluster, dblocation, "\n".join(errors))) if dbcluster.location_constraint != dblocation: if machine_plenary_will_move(old=dbcluster.location_constraint, new=dblocation): for dbmachine in dbcluster.machines: # This plenary will have a path to the old location. plenary = Plenary.get_plenary(dbmachine, logger=logger) remove_plenaries.append(plenary) dbmachine.location = dblocation session.add(dbmachine) # This plenary will have a path to the new location. plenaries.append(Plenary.get_plenary(dbmachine)) # Update the path to the machine plenary in the # container resource plenaries.append( Plenary.get_plenary(dbmachine.vm_container)) dbcluster.location_constraint = dblocation location_updated = True return location_updated
def update_cluster_location(session, logger, dbcluster, fix_location, plenaries, remove_plenaries, **arguments): location_updated = False dblocation = get_location(session, **arguments) if fix_location: dblocation = dbcluster.minimum_location if not dblocation: raise ArgumentError("Cannot infer the cluster location from " "the host locations.") if dblocation: errors = [] if not dblocation.campus: errors.append("{0} is not within a campus.".format(dblocation)) if dbcluster.cluster_type != 'meta': for host in dbcluster.hosts: if host.machine.location != dblocation and \ dblocation not in host.machine.location.parents: errors.append("{0} has location {1}.".format(host, host.machine.location)) else: for cluster in dbcluster.members: if cluster.location_constraint != dblocation and \ dblocation not in cluster.location_constraint.parents: errors.append("{0} has location {1}.".format(cluster, cluster.location_constraint)) if errors: raise ArgumentError("Cannot set {0} location constraint to " "{1}:\n{2}".format(dbcluster, dblocation, "\n".join(errors))) if dbcluster.location_constraint != dblocation: if machine_plenary_will_move(old=dbcluster.location_constraint, new=dblocation): for dbmachine in dbcluster.machines: # This plenary will have a path to the old location. plenary = Plenary.get_plenary(dbmachine, logger=logger) remove_plenaries.append(plenary) dbmachine.location = dblocation session.add(dbmachine) # This plenary will have a path to the new location. plenaries.append(Plenary.get_plenary(dbmachine)) # Update the path to the machine plenary in the # container resource plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) dbcluster.location_constraint = dblocation location_updated = True return location_updated
def get_or_create_rack(session, rackid, rackrow, rackcolumn, building=None, room=None, bunker=None, fullname=None, comments=None, preclude=False): dblocation = get_location(session, building=building, room=room, bunker=bunker, compel=True) dbbuilding = dblocation.building if not dbbuilding: # pragma: no cover raise ArgumentError("The rack must be inside a building.") # The database contains normalized values so we have to normalize the input # before doing any comparisons. if rackrow is not None: rackrow = str(rackrow).strip().lower() if rackcolumn is not None: rackcolumn = str(rackcolumn).strip().lower() if rackid is not None: rackid = str(rackid).strip().lower() # Because of http, rackid comes in as a string. It just # gets treated as such here. # Check for redundancy... if len(rackid) > len(dbbuilding.name) and rackid.startswith( dbbuilding.name): rack = rackid else: rack = dbbuilding.name + rackid try: dbrack = session.query(Rack).filter_by(name=rack).one() if rackrow is not None and rackrow != dbrack.rack_row: raise ArgumentError("Found rack with name %s, but the current " "row %s does not match given row %s." % (dbrack.name, dbrack.rack_row, rackrow)) if rackcolumn is not None and rackcolumn != dbrack.rack_column: raise ArgumentError("Found rack with name %s, but the current " "column %s does not match given column %s." % (dbrack.name, dbrack.rack_column, rackcolumn)) if preclude: raise ArgumentError("{0} already exists.".format(dbrack)) return dbrack except NoResultFound: pass if fullname is None: fullname = rack dbrack = Rack(name=rack, fullname=fullname, parent=dblocation, rack_row=rackrow, rack_column=rackcolumn, comments=comments) session.add(dbrack) return dbrack
def render(self, session, network_environment, clear_location, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique(session, network_environment, compel=True) # Currently input.xml lists --building only, but that may change location = get_location(session, **arguments) if location: dbnet_env.location = location if clear_location: dbnet_env.location = None if comments is not None: dbnet_env.comments = comments session.flush() return
def render(self, session, service, instance, archetype, personality, networkip, **kwargs): dbservice = Service.get_unique(session, service, compel=True) dblocation = get_location(session, **kwargs) dbinstance = get_service_instance(session, dbservice, instance) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) else: dbnetwork = None if archetype is None and personality: # Can't get here with the standard aq client. raise ArgumentError("Specifying --personality requires you to " "also specify --archetype.") kwargs = {} if archetype and personality: dbpersona = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) map_class = PersonalityServiceMap query = session.query(map_class).filter_by(personality=dbpersona) kwargs["personality"] = dbpersona else: map_class = ServiceMap query = session.query(map_class) dbmap = query.filter_by(location=dblocation, service_instance=dbinstance, network=dbnetwork).first() if not dbmap: dbmap = map_class(service_instance=dbinstance, location=dblocation, network=dbnetwork, **kwargs) session.add(dbmap) session.flush() return
def get_or_create_rack(session, rackid, rackrow, rackcolumn, building=None, room=None, bunker=None, fullname=None, comments=None): dblocation = get_location(session, building=building, room=room, bunker=bunker) dbbuilding = dblocation.building if not dbbuilding: # pragma: no cover raise ArgumentError("The rack must be inside a building.") # The database contains normalized values so we have to normalize the input # before doing any comparisons. if rackrow is not None: rackrow = str(rackrow).strip().lower() if rackcolumn is not None: rackcolumn = str(rackcolumn).strip().lower() if rackid is not None: rackid = str(rackid).strip().lower() # Because of http, rackid comes in as a string. It just # gets treated as such here. # Check for redundancy... if len(rackid) > len(dbbuilding.name) and rackid.startswith( dbbuilding.name): rack = rackid else: rack = dbbuilding.name + rackid try: dbrack = session.query(Rack).filter_by(name=rack).one() if rackrow is not None and rackrow != dbrack.rack_row: raise ArgumentError("Found rack with name %s, but the current " "row %s does not match given row %s." % (dbrack.name, dbrack.rack_row, rackrow)) if rackcolumn is not None and rackcolumn != dbrack.rack_column: raise ArgumentError("Found rack with name %s, but the current " "column %s does not match given column %s." % (dbrack.name, dbrack.rack_column, rackcolumn)) return dbrack except NoResultFound: pass if fullname is None: fullname = rack dbrack = Rack(name=rack, fullname=fullname, parent=dblocation, rack_row=rackrow, rack_column=rackcolumn, comments=comments) session.add(dbrack) return dbrack
def update_cluster_location(session, logger, dbcluster, fix_location, plenaries, **arguments): dblocation = get_location(session, **arguments) if fix_location: dblocation = dbcluster.minimum_location if not dblocation: raise ArgumentError("Cannot infer the cluster location from " "the host locations.") if dblocation: errors = [] if not dblocation.campus: errors.append("{0} is not within a campus.".format(dblocation)) if not isinstance(dbcluster, MetaCluster): for host in dbcluster.hosts: if host.hardware_entity.location != dblocation and \ dblocation not in host.hardware_entity.location.parents: errors.append("{0} has location {1}." .format(host, host.hardware_entity.location)) else: for cluster in dbcluster.members: if cluster.location_constraint != dblocation and \ dblocation not in cluster.location_constraint.parents: errors.append("{0} has location {1}." .format(cluster, cluster.location_constraint)) if errors: raise ArgumentError("Cannot set {0} location constraint to " "{1}:\n{2}".format(dbcluster, dblocation, "\n".join(errors))) if dbcluster.location_constraint != dblocation: for dbmachine in dbcluster.virtual_machines: # The plenary objects should be created before changing the # location, so they can track the change plenaries.append(Plenary.get_plenary(dbmachine, logger=logger)) # Update the path to the machine plenary in the container # resource plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) dbmachine.location = dblocation dbcluster.location_constraint = dblocation return
def render(self, session, network_environment, fullinfo, style, **arguments): q = session.query(NetworkEnvironment) q = q.options(undefer('comments'), joinedload('dns_environment'), undefer('dns_environment.comments'), joinedload('location')) if network_environment: q = q.filter_by(name=network_environment) location = get_location(session, **arguments) if location: q = q.filter_by(location=location) q = q.order_by(NetworkEnvironment.name) if fullinfo or style != "raw": return q.all() else: return StringAttributeList(q.all(), "name")
def render(self, session, network, ip, network_environment, all, style, type=False, hosts=False, **arguments): options = [undefer('comments'), joinedload('location')] if hosts or style == "proto": options.extend([ subqueryload("assignments"), joinedload("assignments.interface"), joinedload("assignments.dns_records"), subqueryload("dynamic_stubs") ]) dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) dbnetwork = network and get_network_byname( session, network, dbnet_env, query_options=options) or None dbnetwork = ip and get_network_byip( session, ip, dbnet_env, query_options=options) or dbnetwork q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.options(*options) if dbnetwork: if hosts: return NetworkHostList([dbnetwork]) else: return dbnetwork if type: q = q.filter_by(network_type=type) dblocation = get_location(session, **arguments) if dblocation: childids = dblocation.offspring_ids() q = q.filter(Network.location_id.in_(childids)) q = q.order_by(Network.ip) q = q.options(*options) if hosts: return NetworkHostList(q.all()) else: return SimpleNetworkList(q.all())
def render(self, session, logger, rack, row, column, fullinfo, **arguments): dbparent = get_location(session, **arguments) q = session.query(Rack) if rack: q = q.filter_by(name=rack) if row: q = q.filter_by(rack_row=row) if column: q = q.filter_by(rack_column=column) if dbparent: q = q.filter(Location.parents.contains(dbparent)) if fullinfo: return q.all() return SimpleLocationList(q.all())
def render(self, session, rack, row, column, fullinfo, style, **arguments): dbparent = get_location(session, **arguments) q = session.query(Rack) if rack: q = q.filter_by(name=rack) if row: q = q.filter_by(rack_row=row) if column: q = q.filter_by(rack_column=column) if dbparent: q = q.filter(Location.parents.contains(dbparent)) if fullinfo or style != "raw": return q.all() return StringAttributeList(q.all(), "name")
def render(self, session, logger, building, **arguments): dbbuilding = get_location(session, building=building) city = dbbuilding.city address = dbbuilding.address campus = dbbuilding.campus result = CommandDelLocation.render(self, session=session, name=building, type='building', **arguments) session.flush() dsdb_runner = DSDBRunner(logger=logger) if campus: dsdb_runner.del_campus_building(campus.name, building) dsdb_runner.del_building(building, city.name, address) dsdb_runner.commit_or_rollback() return result
def render(self, session, dns_domain, position, comments, **kw): dbdns_domain = DnsDomain.get_unique(session, name=dns_domain, compel=True) dblocation = get_location(session, query_options=[subqueryload('dns_maps')], **kw) if not dblocation: raise ArgumentError("Please specify a location.") DnsMap.get_unique(session, dns_domain=dbdns_domain, location=dblocation, preclude=True) dbmap = DnsMap(dns_domain=dbdns_domain, comments=comments) if position is not None: dblocation.dns_maps.insert(position, dbmap) else: dblocation.dns_maps.append(dbmap) session.flush() return
def render(self, session, dns_domain, **kw): dbdns_domain = DnsDomain.get_unique(session, name=dns_domain, compel=True) dblocation = get_location(session, query_options=[subqueryload('dns_maps')], **kw) if not dblocation: raise ArgumentError("Please specify a location.") dbmap = None for item in dblocation.dns_maps: if item.dns_domain == dbdns_domain: dbmap = item break if dbmap: dblocation.dns_maps.remove(dbmap) session.flush() return
def render(self, session, service, instance, archetype, personality, networkip, **arguments): dbservice = Service.get_unique(session, service, compel=True) dbinstance = ServiceInstance.get_unique(session, service=dbservice, name=instance, compel=True) dblocation = get_location(session, **arguments) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) else: dbnetwork = None if personality: if not archetype: # Can't get here with the standard aq client. raise ArgumentError("Specifying --personality requires you to " "also specify --archetype.") dbarchetype = Archetype.get_unique(session, archetype, compel=True) dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = session.query(PersonalityServiceMap) q = q.filter_by(personality=dbpersonality) else: q = session.query(ServiceMap) q = q.filter_by(location=dblocation, service_instance=dbinstance, network=dbnetwork) dbmap = q.first() if dbmap: session.delete(dbmap) session.flush() return
def render(self, session, network_environment, dns_environment, comments, **arguments): validate_nlist_key("network environment", network_environment) NetworkEnvironment.get_unique(session, network_environment, preclude=True) dbdns_env = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Currently input.xml lists --building only, but that may change location = get_location(session, **arguments) dbnet_env = NetworkEnvironment(name=network_environment, dns_environment=dbdns_env, location=location, comments=comments) if dbdns_env.is_default != dbnet_env.is_default: raise ArgumentError("Only the default network environment may be " "associated with the default DNS environment.") session.add(dbnet_env) session.flush() return
def render(self, session, dbuser, network, ip, network_environment, type, side, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if not network and not ip: raise ArgumentError("Please specify either --network or --ip.") q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) if network: q = q.filter_by(name=network) if ip: q = q.filter_by(ip=ip) networks = q.all() if not networks: raise NotFoundException("No matching network was found.") dblocation = get_location(session, **arguments) for dbnetwork in q.all(): if type: dbnetwork.network_type = type if side: dbnetwork.side = side if dblocation: dbnetwork.location = dblocation if comments is not None: if comments.strip() == "": dbnetwork.comments = None else: dbnetwork.comments = comments session.flush() return
def render(self, session, dbuser, network, ip, network_environment, type, side, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if not network and not ip: raise ArgumentError("Please specify either --network or --ip.") q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) if network: q = q.filter_by(name=network) if ip: q = q.filter_by(ip=ip) networks = q.all() if not networks: raise NotFoundException("No matching network was found.") dblocation = get_location(session, **arguments) for dbnetwork in q.all(): if type: dbnetwork.network_type = type if side: dbnetwork.side = side if dblocation: dbnetwork.location = dblocation if comments is not None: if comments.strip() == "": dbnetwork.comments = None else: dbnetwork.comments = comments session.flush() return
def render(self, session, dns_domain, include_parents, **kwargs): dblocation = get_location(session, **kwargs) q = session.query(DnsMap) q = q.options(undefer('comments')) if dblocation: if include_parents: location_ids = [parent.id for parent in dblocation.parents] location_ids.append(dblocation.id) q = q.filter(DnsMap.location_id.in_(location_ids)) else: q = q.filter_by(location=dblocation) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) q = q.filter_by(dns_domain=dbdns_domain) q = q.join(DnsDomain) q = q.options(contains_eager('dns_domain')) q = q.join((Location, DnsMap.location_id == Location.id)) q = q.options(contains_eager('location')) q = q.order_by(Location.location_type, Location.name, DnsMap.position) return q.all()
def render(self, session, logger, city, **arguments): dbcity = get_location(session, city=city) name = dbcity.name country = dbcity.country.name fullname = dbcity.fullname plenary = Plenary.get_plenary(dbcity, logger=logger) CommandDelLocation.render(self, session=session, name=city, type='city', **arguments) session.flush() with plenary.get_key(): try: plenary.remove(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.del_city(name, country, fullname) dsdb_runner.commit_or_rollback() except: plenary.restore_stash() raise return
def render(self, session, logger, building, dryrun, incremental, **arguments): if building: dbbuilding = get_location(session, building=building) else: dbbuilding = None # --dryrun and --incremental do not mix well if dryrun and incremental: raise ArgumentError("--dryrun and --incremental cannot be given " "simultaneously.") key = SyncKey(data="network", logger=logger) lock_queue.acquire(key) rundir = self.config.get("broker", "rundir") tempdir = mkdtemp(prefix="refresh_network_", dir=rundir) try: args = [ self.config.get("broker", "qip_dump_subnetdata"), "--datarootdir", tempdir, "--format", "txt", "--noaudit" ] run_command(args, logger=logger) subnetdata = file(os.path.join(tempdir, "subnetdata.txt"), "r") refresher = QIPRefresh(session, logger, dbbuilding, dryrun, incremental) refresher.refresh(subnetdata) session.flush() if dryrun: session.rollback() finally: lock_queue.release(key) remove_dir(tempdir, logger=logger)
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options( contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option( "serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin( (ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter( or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter( or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query( Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query( Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter( or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter( or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())
netmask = arguments["prefixlen"] elif arguments.get("mask"): netmask = 32 - int(math.log(arguments["mask"], 2)) try: address = IPv4Network("%s/%s" % (ip, netmask)) except AddressValueError, e: raise ArgumentError("Failed to parse the network address: %s" % e) except NetmaskValueError, e: raise ArgumentError("Failed to parse the netmask: %s" % e) if ip != address.network: raise ArgumentError("IP address %s is not a network address. " "Maybe you meant %s?" % (ip, address.network)) location = get_location(session, **arguments) if not type: type = 'unknown' if not side: side = 'a' dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) # Check if the name is free. Network names are not unique in QIP and # there is no uniqueness constraint in AQDB, so only warn if the name is # already in use. q = session.query(Network).filter_by(name=network) dbnetwork = q.first() if dbnetwork:
def render(self, session, logger, building, city, address, fullname, default_dns_domain, comments, **arguments): dbbuilding = get_location(session, building=building) old_city = dbbuilding.city dsdb_runner = DSDBRunner(logger=logger) if address is not None: old_address = dbbuilding.address dbbuilding.address = address dsdb_runner.update_building(dbbuilding.name, dbbuilding.address, old_address) if fullname is not None: dbbuilding.fullname = fullname if comments is not None: dbbuilding.comments = comments if default_dns_domain is not None: if default_dns_domain: dbdns_domain = DnsDomain.get_unique(session, default_dns_domain, compel=True) dbbuilding.default_dns_domain = dbdns_domain else: dbbuilding.default_dns_domain = None plenaries = PlenaryCollection(logger=logger) if city: dbcity = get_location(session, city=city) # This one would change the template's locations hence forbidden if dbcity.hub != dbbuilding.hub: # Doing this both to reduce user error and to limit # testing required. raise ArgumentError("Cannot change hubs. {0} is in {1} " "while {2} is in {3}.".format( dbcity, dbcity.hub, dbbuilding, dbbuilding.hub)) # issue svcmap warnings maps = 0 for map_type in [ServiceMap, PersonalityServiceMap]: maps = maps + session.query(map_type).\ filter_by(location=old_city).count() if maps > 0: logger.client_info("There are {0} service(s) mapped to the " "old location of the ({1:l}), please " "review and manually update mappings for " "the new location as needed.".format( maps, dbbuilding.city)) dbbuilding.update_parent(parent=dbcity) if old_city.campus and (old_city.campus != dbcity.campus): dsdb_runner.del_campus_building(old_city.campus, building) if dbcity.campus and (old_city.campus != dbcity.campus): dsdb_runner.add_campus_building(dbcity.campus, building) query = session.query(Machine) query = query.filter( Machine.location_id.in_(dbcity.offspring_ids())) for dbmachine in query: plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger)) session.flush() if plenaries.plenaries: with plenaries.get_write_key() as key: plenaries.stash() try: plenaries.write(locked=True) dsdb_runner.commit_or_rollback() except: plenaries.restore_stash() else: dsdb_runner.commit_or_rollback() return
def render(self, session, logger, switch, model, rack, type, ip, vendor, serial, rename_to, discovered_macs, clear, discover, comments, **arguments): dbswitch = Switch.get_unique(session, switch, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbswitch) if discover: discover_switch(session, logger, self.config, dbswitch, False) if vendor and not model: model = dbswitch.model.name if model: dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='switch', compel=True) dbswitch.model = dbmodel dblocation = get_location(session, rack=rack) if dblocation: dbswitch.location = dblocation if serial is not None: dbswitch.serial_no = serial # FIXME: What do the error messages for an invalid enum (switch_type) # look like? if type: Switch.check_type(type) dbswitch.switch_type = type if ip: update_primary_ip(session, dbswitch, ip) if comments is not None: dbswitch.comments = comments remove_plenary = None if rename_to: # Handling alias renaming would not be difficult in AQDB, but the # DSDB synchronization would be painful, so don't do that for now. # In theory we should check all configured IP addresses for aliases, # but this is the most common case if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases: raise ArgumentError( "The switch has aliases and it cannot be " "renamed. Please remove all aliases first.") remove_plenary = Plenary.get_plenary(dbswitch, logger=logger) rename_hardware(session, dbswitch, rename_to) if clear: session.query(ObservedMac).filter_by(switch=dbswitch).delete() if discovered_macs: now = datetime.now() for (macaddr, port) in discovered_macs: update_or_create_observed_mac(session, dbswitch, port, macaddr, now) session.flush() switch_plenary = Plenary.get_plenary(dbswitch, logger=logger) key = switch_plenary.get_write_key() if remove_plenary: key = CompileKey.merge([key, remove_plenary.get_remove_key()]) try: lock_queue.acquire(key) if remove_plenary: remove_plenary.stash() remove_plenary.remove(locked=True) switch_plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbswitch, oldinfo) dsdb_runner.commit_or_rollback("Could not update switch in DSDB") except: if remove_plenary: remove_plenary.restore_stash() switch_plenary.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, machine, model, vendor, serial, chassis, slot, clearchassis, multislot, vmhost, cluster, allow_metacluster_change, cpuname, cpuvendor, cpuspeed, cpucount, memory, ip, **arguments): dbmachine = Machine.get_unique(session, machine, compel=True) plenaries = PlenaryCollection(logger=logger) oldinfo = DSDBRunner.snapshot_hw(dbmachine) if clearchassis: del dbmachine.chassis_slot[:] remove_plenaries = PlenaryCollection(logger=logger) if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) if machine_plenary_will_move(old=dbmachine.location, new=dbchassis.location): remove_plenaries.append(Plenary.get_plenary(dbmachine)) dbmachine.location = dbchassis.location if slot is None: raise ArgumentError("Option --chassis requires --slot " "information.") self.adjust_slot(session, logger, dbmachine, dbchassis, slot, multislot) elif slot is not None: dbchassis = None for dbslot in dbmachine.chassis_slot: if dbchassis and dbslot.chassis != dbchassis: raise ArgumentError("Machine in multiple chassis, please " "use --chassis argument.") dbchassis = dbslot.chassis if not dbchassis: raise ArgumentError("Option --slot requires --chassis " "information.") self.adjust_slot(session, logger, dbmachine, dbchassis, slot, multislot) dblocation = get_location(session, **arguments) if dblocation: loc_clear_chassis = False for dbslot in dbmachine.chassis_slot: dbcl = dbslot.chassis.location if dbcl != dblocation: if chassis or slot is not None: raise ArgumentError("{0} conflicts with chassis {1!s} " "location {2}.".format(dblocation, dbslot.chassis, dbcl)) else: loc_clear_chassis = True if loc_clear_chassis: del dbmachine.chassis_slot[:] if machine_plenary_will_move(old=dbmachine.location, new=dblocation): remove_plenaries.append(Plenary.get_plenary(dbmachine)) dbmachine.location = dblocation if model or vendor: # If overriding model, should probably overwrite default # machine specs as well. if not model: model = dbmachine.model.name if not vendor: vendor = dbmachine.model.vendor.name dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if dbmodel.machine_type not in ['blade', 'rackmount', 'workstation', 'aurora_node', 'virtual_machine']: raise ArgumentError("The update_machine command cannot update " "machines of type %s." % dbmodel.machine_type) # We probably could do this by forcing either cluster or # location data to be available as appropriate, but really? # Failing seems reasonable. if dbmodel.machine_type != dbmachine.model.machine_type and \ 'virtual_machine' in [dbmodel.machine_type, dbmachine.model.machine_type]: raise ArgumentError("Cannot change machine from %s to %s." % (dbmachine.model.machine_type, dbmodel.machine_type)) old_nic_model = dbmachine.model.nic_model new_nic_model = dbmodel.nic_model if old_nic_model != new_nic_model: for iface in dbmachine.interfaces: if iface.model == old_nic_model: iface.model = new_nic_model dbmachine.model = dbmodel if cpuname or cpuvendor or cpuspeed is not None: dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor, speed=cpuspeed, compel=True) dbmachine.cpu = dbcpu if cpucount is not None: dbmachine.cpu_quantity = cpucount if memory is not None: dbmachine.memory = memory if serial: dbmachine.serial_no = serial if ip: update_primary_ip(session, dbmachine, ip) # FIXME: For now, if a machine has its interface(s) in a portgroup # this command will need to be followed by an update_interface to # re-evaluate the portgroup for overflow. # It would be better to have --pg and --autopg options to let it # happen at this point. if cluster or vmhost: if not dbmachine.vm_container: raise ArgumentError("Cannot convert a physical machine to " "virtual.") old_holder = dbmachine.vm_container.holder.holder_object resholder = get_resource_holder(session, hostname=vmhost, cluster=cluster, compel=False) new_holder = resholder.holder_object # TODO: do we want to allow moving machines between the cluster and # metacluster level? if new_holder.__class__ != old_holder.__class__: raise ArgumentError("Cannot move a VM between a cluster and a " "stand-alone host.") if cluster: if new_holder.metacluster != old_holder.metacluster \ and not allow_metacluster_change: raise ArgumentError("Current {0:l} does not match " "new {1:l}." .format(old_holder.metacluster, new_holder.metacluster)) remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) dbmachine.vm_container.holder = resholder for dbdisk in dbmachine.disks: if not isinstance(dbdisk, VirtualDisk): continue old_share = dbdisk.share if isinstance(old_share.holder, BundleResource): resourcegroup = old_share.holder.name else: resourcegroup = None new_share = find_share(new_holder, resourcegroup, old_share.name, error=ArgumentError) # If the shares are registered at the metacluster level and both # clusters are in the same metacluster, then there will be no # real change here if new_share != old_share: old_share.disks.remove(dbdisk) new_share.disks.append(dbdisk) if isinstance(new_holder, Cluster): dbmachine.location = new_holder.location_constraint else: dbmachine.location = new_holder.location session.flush() plenaries.append(Plenary.get_plenary(old_holder)) plenaries.append(Plenary.get_plenary(new_holder)) if dbmachine.vm_container: plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) session.flush() # Check if the changed parameters still meet cluster capacity # requiremets if dbmachine.cluster: dbmachine.cluster.validate() if allow_metacluster_change: dbmachine.cluster.metacluster.validate() if dbmachine.host and dbmachine.host.cluster: dbmachine.host.cluster.validate() # The check to make sure a plenary file is not written out for # dummy aurora hardware is within the call to write(). This way # it is consistent without altering (and forgetting to alter) # all the calls to the method. plenaries.append(Plenary.get_plenary(dbmachine)) if remove_plenaries.plenaries and dbmachine.host: plenaries.append(Plenary.get_plenary(dbmachine.host)) key = CompileKey.merge([plenaries.get_write_key(), remove_plenaries.get_remove_key()]) try: lock_queue.acquire(key) remove_plenaries.stash() plenaries.write(locked=True) remove_plenaries.remove(locked=True) if dbmachine.host: # XXX: May need to reconfigure. pass dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not update machine in DSDB") except: plenaries.restore_stash() remove_plenaries.restore_stash() raise finally: lock_queue.release(key) return