def render(self, **arguments): # pragma: no cover """ Implement this method to create a functional broker command. The base __init__ method wraps all implementations using _update_render() to enforce the class requires_* flags. """ if self.__class__.__module__ == 'aquilon.worker.broker': # Default class... no useful command info to repeat back... raise UnimplementedError("Command has not been implemented.") raise UnimplementedError("%s has not been implemented" % self.__class__.__module__)
def render(self, session, logger, interface, chassis, mac, comments, rename_to, **arguments): for arg in self.invalid_parameters: if arguments.get(arg) is not None: raise UnimplementedError("update_interface --chassis cannot use " "the --%s option." % arg) dbchassis = Chassis.get_unique(session, chassis, compel=True) dbinterface = Interface.get_unique(session, hardware_entity=dbchassis, name=interface, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbchassis) if comments: dbinterface.comments = comments if mac: dbinterface.mac = mac if rename_to: rename_interface(session, dbinterface, rename_to) session.flush() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbchassis, oldinfo) dsdb_runner.commit_or_rollback("Could not update chassis in DSDB") return
def generate_mac(self, session, dbmachine): """ Generate a mac address for virtual hardware. Algorithm: * Query for first mac address in aqdb starting with vendor prefix, order by mac descending. * If no address, or address less than prefix start, use prefix start. * If the found address is not suffix end, increment by one and use it. * If the address is suffix end, requery for the full list and scan through for holes. Use the first hole. * If no holes, error. [In this case, we're still not completely dead in the water - the mac address would just need to be given manually.] """ if dbmachine.model.machine_type != "virtual_machine": raise ArgumentError("Can only automatically generate MAC " "addresses for virtual hardware.") if not dbmachine.cluster or dbmachine.cluster.cluster_type != 'esx': raise UnimplementedError("MAC address auto-generation has only " "been enabled for ESX Clusters.") # FIXME: These values should probably be configurable. mac_prefix_esx = "00:50:56" mac_start_esx = mac_prefix_esx + ":01:20:00" mac_end_esx = mac_prefix_esx + ":3f:ff:ff" mac_start = MACAddress(mac_start_esx) mac_end = MACAddress(mac_end_esx) q = session.query(Interface.mac) q = q.filter(Interface.mac.between(str(mac_start), str(mac_end))) # This query (with a different order_by) is used below. mac = q.order_by(desc(Interface.mac)).first() if not mac: return str(mac_start) highest_mac = MACAddress(mac[0]) if highest_mac < mac_start: return str(mac_start) if highest_mac < mac_end: return str(highest_mac.next()) potential_hole = mac_start for mac in q.order_by(asc(Interface.mac)).all(): current_mac = MACAddress(mac[0]) if current_mac < mac_start: continue if potential_hole < current_mac: return str(potential_hole) potential_hole = current_mac.next() raise ArgumentError("All MAC addresses between %s and %s inclusive " "are currently in use." % (mac_start, mac_end))
def render(self, session, feature, type, post_personality, comments, **arguments): cls = Feature.polymorphic_subclass(type, "Unknown feature type") if _name_re.search(feature): raise ArgumentError("Path components in the feature name must not " "start with a dot.") if post_personality and not cls.post_personality_allowed: raise UnimplementedError("The post_personality attribute is " "implemented only for host features.") cls.get_unique(session, name=feature, preclude=True) dbfeature = cls(name=feature, post_personality=post_personality, comments=comments) session.add(dbfeature) session.flush() return
def render(self, session, logger, model, vendor, newmodel, newvendor, comments, leave_existing, **arguments): for (arg, value) in arguments.items(): # Cleaning the strings isn't strictly necessary but allows # for simple equality checks below and removes the need to # call refresh(). if arg in [ 'newmodel', 'newvendor', 'machine_type', 'cpuname', 'cpuvendor', 'disktype', 'diskcontroller', 'nicmodel', 'nicvendor' ]: if value is not None: arguments[arg] = value.lower().strip() dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if leave_existing and (newmodel or newvendor): raise ArgumentError("Cannot update model name or vendor without " "updating any existing machines.") fix_existing = not leave_existing dbmachines = set() # The sub-branching here is a little difficult to read... # Basically, there are three different checks to handle # setting a new vendor, a new name, or both. if newvendor: dbnewvendor = Vendor.get_unique(session, newvendor, compel=True) if newmodel: Model.get_unique(session, name=newmodel, vendor=dbnewvendor, preclude=True) else: Model.get_unique(session, name=dbmodel.name, vendor=dbnewvendor, preclude=True) dbmodel.vendor = dbnewvendor if newmodel: if not newvendor: Model.get_unique(session, name=newmodel, vendor=dbmodel.vendor, preclude=True) dbmodel.name = newmodel if newvendor or newmodel: q = session.query(Machine).filter_by(model=dbmodel) dbmachines.update(q.all()) # For now, can't update machine_type. There are too many spots # that special case things like aurora_node or virtual_machine to # know that the transistion is safe. If there is enough need we # can always add those transitions later. if arguments['machine_type'] is not None: raise UnimplementedError("Cannot (yet) change a model's " "machine type.") if comments: dbmodel.comments = comments # The comments also do not affect the templates. cpu_args = ['cpuname', 'cpuvendor', 'cpuspeed'] cpu_info = dict([(self.argument_lookup[arg], arguments[arg]) for arg in cpu_args]) cpu_values = [v for v in cpu_info.values() if v is not None] nic_args = ['nicmodel', 'nicvendor'] nic_info = dict([(self.argument_lookup[arg], arguments[arg]) for arg in nic_args]) nic_values = [v for v in nic_info.values() if v is not None] spec_args = [ 'cpunum', 'memory', 'disktype', 'diskcontroller', 'disksize', 'nics' ] specs = dict([(self.argument_lookup[arg], arguments[arg]) for arg in spec_args]) spec_values = [v for v in specs.values() if v is not None] if not dbmodel.machine_specs: if cpu_values or nic_values or spec_values: if not cpu_values or len(spec_values) < len(spec_args): raise ArgumentError("Missing required parameters to store " "machine specs for the model. Please " "give all CPU, disk, RAM, and NIC " "count information.") dbcpu = Cpu.get_unique(session, compel=True, **cpu_info) if nic_values: dbnic = Model.get_unique(session, compel=True, machine_type='nic', **nic_info) else: dbnic = Model.default_nic_model(session) dbmachine_specs = MachineSpecs(model=dbmodel, cpu=dbcpu, nic_model=dbnic, **specs) session.add(dbmachine_specs) # Anything below that updates specs should have been verified above. if cpu_values: dbcpu = Cpu.get_unique(session, compel=True, **cpu_info) self.update_machine_specs(model=dbmodel, dbmachines=dbmachines, attr='cpu', value=dbcpu, fix_existing=fix_existing) for arg in ['memory', 'cpunum']: if arguments[arg] is not None: self.update_machine_specs(model=dbmodel, dbmachines=dbmachines, attr=self.argument_lookup[arg], value=arguments[arg], fix_existing=fix_existing) if arguments['disktype']: if fix_existing: raise ArgumentError("Please specify --leave_existing to " "change the model disktype. This cannot " "be converted automatically.") dbmodel.machine_specs.disk_type = arguments['disktype'] for arg in ['diskcontroller', 'disksize']: if arguments[arg] is not None: self.update_disk_specs(model=dbmodel, dbmachines=dbmachines, attr=self.argument_lookup[arg], value=arguments[arg], fix_existing=fix_existing) if nic_values: dbnic = Model.get_unique(session, compel=True, **nic_info) self.update_interface_specs(model=dbmodel, dbmachines=dbmachines, value=dbnic, fix_existing=fix_existing) if arguments['nics'] is not None: dbmodel.machine_specs.nic_count = arguments['nics'] session.flush() plenaries = PlenaryCollection(logger=logger) for dbmachine in dbmachines: plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger)) plenaries.write() return
def render(self, session, logger, service_address, ip, name, interfaces, hostname, cluster, resourcegroup, network_environment, map_to_primary, comments, **arguments): validate_basic("name", name) # TODO: generalize the error message - Layer-3 failover may be # implemented by other software, not just Zebra. if name == "hostname": raise ArgumentError( "The hostname service address is reserved for " "Zebra. Please specify the --zebra_interfaces " "option when calling add_host if you want the " "primary name of the host to be managed by " "Zebra.") ifnames = [ifname.strip().lower() for ifname in interfaces.split(",")] if not ifnames: raise ArgumentError("Please specify at least one interface name.") holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) # Address assignments should be added based on the host/cluster, so we # have to resolve resource groups first if isinstance(holder.holder_object, ResourceGroup): real_holder = holder.holder_object.holder.holder_object else: real_holder = holder.holder_object ServiceAddress.get_unique(session, name=name, holder=holder, preclude=True) # TODO: add allow_multi=True dbdns_rec, newly_created = grab_address(session, service_address, ip, network_environment) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network if map_to_primary: if not isinstance(real_holder, Host): raise ArgumentError("The --map_to_primary option works only " "for host-based service addresses.") dbdns_rec.reverse_ptr = real_holder.machine.primary_name.fqdn # Disable autoflush, since the ServiceAddress object won't be complete # until add_resource() is called with session.no_autoflush: dbsrv = ServiceAddress(name=name, dns_record=dbdns_rec, comments=comments) holder.resources.append(dbsrv) oldinfo = None if isinstance(real_holder, Cluster): if not real_holder.hosts: # The interface names are only stored in the # AddressAssignment objects, so we can't handle a cluster # with no hosts and thus no interfaces raise ArgumentError("Cannot assign a service address to a " "cluster that has no members.") for host in real_holder.hosts: apply_service_address(host, ifnames, dbsrv) elif isinstance(real_holder, Host): oldinfo = DSDBRunner.snapshot_hw(real_holder.machine) apply_service_address(real_holder, ifnames, dbsrv) else: # pragma: no cover raise UnimplementedError("{0} as a resource holder is not " "implemented.".format(real_holder)) add_resource(session, logger, holder, dbsrv, dsdb_callback=add_srv_dsdb_callback, real_holder=real_holder, oldinfo=oldinfo, newly_created=newly_created, comments=comments) return
def render(self, session, logger, feature, archetype, personality, model, vendor, interface, justification, user, **arguments): # Binding a feature to a named interface makes sense in the scope of a # personality, but not for a whole archetype. if interface and not personality: raise ArgumentError("Binding to a named interface needs " "a personality.") q = session.query(Personality) dbarchetype = None feature_type = "host" justification_required = True # Warning: order matters here! params = {} if personality: justification_required = False dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) params["personality"] = dbpersonality if interface: params["interface_name"] = interface feature_type = "interface" dbarchetype = dbpersonality.archetype q = q.filter_by(archetype=dbarchetype) q = q.filter_by(name=personality) elif archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) params["archetype"] = dbarchetype q = q.filter_by(archetype=dbarchetype) else: # It's highly unlikely that a feature template would work for # _any_ archetype, so disallow this case for now. As I can't # rule out that such a case will not have some uses in the # future, the restriction is here and not in the model. raise ArgumentError("Please specify either an archetype or " "a personality when binding a feature.") if model: dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if dbmodel.machine_type == "nic": feature_type = "interface" else: feature_type = "hardware" params["model"] = dbmodel if dbarchetype and not dbarchetype.is_compileable: raise UnimplementedError("Binding features to non-compilable " "archetypes is not implemented.") if not feature_type: # pragma: no cover raise InternalError("Feature type is not known.") dbfeature = Feature.get_unique(session, name=feature, feature_type=feature_type, compel=True) cnt = q.count() # TODO: should the limit be configurable? if justification_required and cnt > 0: if not justification: raise AuthorizationException( "Changing feature bindings for more " "than just a personality requires --justification.") validate_justification(user, justification) self.do_link(session, logger, dbfeature, params) session.flush() idx = 0 written = 0 successful = [] failed = [] with CompileKey(logger=logger): personalities = q.all() for personality in personalities: idx += 1 if idx % 1000 == 0: # pragma: no cover logger.client_info("Processing personality %d of %d..." % (idx, cnt)) if not personality.archetype.is_compileable: # pragma: no cover continue try: plenary_personality = PlenaryPersonality(personality) written += plenary_personality.write(locked=True) successful.append(plenary_personality) except IncompleteError: pass except Exception, err: # pragma: no cover failed.append("{0} failed: {1}".format(personality, err)) if failed: # pragma: no cover for plenary in successful: plenary.restore_stash() raise PartialError([], failed)
def updated_render(self, *args, **kwargs): principal = kwargs["user"] request = kwargs["request"] logger = kwargs["logger"] raising_exception = None rollback_failed = False try: if self.requires_transaction or self.requires_azcheck: # Set up a session... if not "session" in kwargs: if self.is_lock_free: kwargs["session"] = self.dbf.NLSession() else: kwargs["session"] = self.dbf.Session() session = kwargs["session"] if session.bind.dialect.name == "oracle": # Make the name of the command and the request ID # available in v$session. Trying to set a value longer # than the allowed length will generate ORA-24960, so # do an explicit truncation. conn = session.connection() dbapi_con = conn.connection.connection dbapi_con.action = str(self.action)[:32] # TODO: we should include the command number as well, # since that is easier to find in the logs dbapi_con.clientinfo = str(kwargs["requestid"])[:64] # This does a COMMIT, which in turn invalidates the session. # We should therefore avoid looking up anything in the DB # before this point which might be used later. self._record_xtn(session, logger.get_status()) dbuser = get_or_create_user_principal(session, principal, commitoncreate=True) kwargs["dbuser"] = dbuser if self.requires_azcheck: self.az.check(principal=principal, dbuser=dbuser, action=self.action, resource=request.path) if self.requires_readonly: self._set_readonly(session) # begin() is only required if session transactional=False #session.begin() if self.badmode: # pragma: no cover raise UnimplementedError("Command %s not available on " "a %s broker." % (self.command, self.badmode)) for key in kwargs.keys(): if key in self.parameter_checks: kwargs[key] = self.parameter_checks[key]("--" + key, kwargs[key]) # Command is an instance method already having self... retval = command(*args, **kwargs) if self.requires_format: style = kwargs.get("style", None) retval = self.formatter.format(style, retval, request) if "session" in kwargs: session.commit() return retval except Exception, e: raising_exception = e # Need to close after the rollback, or the next time session # is accessed it tries to commit the transaction... (?) if "session" in kwargs: try: session.rollback() except: # pragma: no cover rollback_failed = True raise session.close() raise