def wrap_failure(failure): prefix = f"Unable to send commissioning results for {name}({pod_id}) because" if failure.check(UnknownPodType): raise PodProblem(f"{prefix} `{pod_type}` is an unknown Pod type.") elif failure.check(NotImplementedError): raise PodProblem( f"{prefix} `{pod_type}` driver does not implement the 'send_pod_commissioning_results' method." ) elif failure.check(PodActionFail): raise PodProblem(prefix + ": " + str(failure.value)) else: return failure
def wrap_failure(failure): prefix = "Unable to decompose machine because" if failure.check(UnknownPodType): raise PodProblem(prefix + " '%s' is an unknown pod type." % pod_type) elif failure.check(NotImplementedError): raise PodProblem( prefix + " '%s' driver does not implement the 'decompose' method." % pod_type) elif failure.check(PodActionFail): raise PodProblem(prefix + ": " + str(failure.value)) else: return failure
def catch_no_racks(result): discovered_pod, discovered = result if discovered_pod is None: raise PodProblem( "Unable to start the pod discovery process. " "No rack controllers connected.") return discovered_pod, discovered
def get_requested_machine(self, known_host_interfaces): """Return the `RequestedMachine`.""" block_devices = [] storage_constraints = get_storage_constraints_from_string( self.get_value_for("storage")) # LXD Pods currently only support one block device. if self.pod.power_type == "lxd" and len(storage_constraints) > 1: raise PodProblem( "LXD Pod virtual machines currently only support one block device." ) for _, size, tags in storage_constraints: if tags is None: tags = [] block_devices.append( RequestedMachineBlockDevice(size=size, tags=tags)) interfaces_label_map = self.get_value_for("interfaces") if interfaces_label_map is not None: requested_machine_interfaces = self._get_requested_machine_interfaces_via_constraints( interfaces_label_map) else: requested_machine_interfaces = [RequestedMachineInterface()] return RequestedMachine( hostname=self.get_value_for("hostname"), architecture=self.get_value_for("architecture"), cores=self.get_value_for("cores"), memory=self.get_value_for("memory"), cpu_speed=self.get_value_for("cpu_speed"), block_devices=block_devices, interfaces=requested_machine_interfaces, known_host_interfaces=known_host_interfaces, )
def db_work(client): # Check overcommit ratios. over_commit_message = self.pod.check_over_commit_ratios( requested_cores=self.get_value_for("cores"), requested_memory=self.get_value_for("memory"), ) if over_commit_message: raise PodProblem( "Unable to compose KVM instance in '%s'. %s" % (self.pod.name, over_commit_message) ) # Update the default storage pool. if self.pod.default_storage_pool is not None: power_parameters[ "default_storage_pool_id" ] = self.pod.default_storage_pool.pool_id # Find the pod's known host interfaces. if self.pod.host is not None: interfaces = get_known_host_interfaces(self.pod.host) else: interfaces = [] return client, interfaces
def check_over_commit_ratios(result): # Check over commit ratios. over_commit_message = self.pod.check_over_commit_ratios( requested_cores=self.get_value_for('cores'), requested_memory=self.get_value_for('memory')) if over_commit_message: raise PodProblem(over_commit_message) return result
async def compose(self, params): """Compose a machine in a Pod.""" @transactional def get_object(params): # Running inside new database thread, be sure the rbac cache is # cleared so accessing information will not be already cached. rbac.clear() obj = self.get_object(params) if not self.user.has_perm(PodPermission.compose, obj): raise HandlerPermissionError() return obj @transactional def get_form(obj, params): request = HttpRequest() request.user = self.user form = ComposeMachineForm(pod=obj, data=params, request=request) if not form.is_valid(): raise HandlerValidationError(form.errors) return form @transactional def render_obj(obj): return self.full_dehydrate(reload_object(obj)) pod = await deferToDatabase(get_object, params) if Capabilities.COMPOSABLE not in pod.capabilities: raise HandlerValidationError("Pod does not support composability.") form = await deferToDatabase(get_form, pod, params) try: await form.compose( skip_commissioning=params.get("skip_commissioning", False) ) except Exception as error: log.err(error, "Failed to compose machine.") raise PodProblem("Pod unable to compose machine: %s" % str(error)) return await deferToDatabase(render_obj, pod)
def wrap_errors(failure): raise PodProblem("Pod unable to compose machine: %s" % str(failure.value))
def compose(self, timeout=120, creation_type=NODE_CREATION_TYPE.MANUAL, skip_commissioning=None): """Compose the machine. Internal operation of this form is asynchronous. It will block the calling thread until the asynchronous operation is complete. Adjust `timeout` to minimize the maximum wait for the asynchronous operation. """ if skip_commissioning is None: skip_commissioning = self.get_value_for('skip_commissioning') def check_over_commit_ratios(result): # Check over commit ratios. over_commit_message = self.pod.check_over_commit_ratios( requested_cores=self.get_value_for('cores'), requested_memory=self.get_value_for('memory')) if over_commit_message: raise PodProblem(over_commit_message) return result def create_and_sync(result): discovered_machine, pod_hints = result created_machine = self.pod.create_machine( discovered_machine, self.request.user, skip_commissioning=skip_commissioning, creation_type=creation_type, domain=self.get_value_for('domain'), pool=self.get_value_for('pool'), zone=self.get_value_for('zone')) self.pod.sync_hints(pod_hints) return created_machine power_parameters = self.pod.power_parameters.copy() def _set_default_pool_id(): if self.pod.default_storage_pool is not None: power_parameters['default_storage_pool_id'] = ( self.pod.default_storage_pool.pool_id) if isInIOThread(): # Running under the twisted reactor, before the work from inside. d = deferToDatabase(transactional(self.pod.get_client_identifiers)) d.addCallback(getClientFromIdentifiers) d.addCallback( partial(deferToDatabase, transactional(check_over_commit_ratios))) d.addCallback(callOutToDatabase, _set_default_pool_id) d.addCallback(compose_machine, self.pod.power_type, power_parameters, self.get_requested_machine(), pod_id=self.pod.id, name=self.pod.name) d.addCallback( partial(deferToDatabase, transactional(create_and_sync))) return d else: # Running outside of reactor. Do the work inside and then finish # the work outside. @asynchronous def wrap_compose_machine(client_idents, pod_type, parameters, request, pod_id, name): """Wrapper to get the client.""" d = getClientFromIdentifiers(client_idents) d.addCallback( partial(deferToDatabase, transactional(check_over_commit_ratios))) d.addCallback(compose_machine, pod_type, parameters, request, pod_id=pod_id, name=name) return d _set_default_pool_id() try: result = wrap_compose_machine( self.pod.get_client_identifiers(), self.pod.power_type, power_parameters, self.get_requested_machine(), pod_id=self.pod.id, name=self.pod.name).wait(timeout) except crochet.TimeoutError: raise PodProblem( "Unable to compose a machine because '%s' driver " "timed out after %d seconds." % (self.pod.power_type, timeout)) return create_and_sync(result)
def wrap_errors(failure): if failure.check(PodProblem): return failure else: raise PodProblem(str(failure.value))
def discover_and_sync_pod(self): """Discover and sync the pod information.""" def update_db(result): discovered_pod, discovered = result # When called with an instance that has no name, be sure to set # it before going any further. If this is a new instance this will # also create it in the database. if not self.instance.name: self.instance.set_random_name() self.instance.sync(discovered_pod, self.request.user) # Save which rack controllers can route and which cannot. discovered_rack_ids = [ rack_id for rack_id, _ in discovered[0].items() ] for rack_controller in RackController.objects.all(): routable = rack_controller.system_id in discovered_rack_ids bmc_route_model = BMCRoutableRackControllerRelationship relation, created = (bmc_route_model.objects.get_or_create( bmc=self.instance.as_bmc(), rack_controller=rack_controller, defaults={'routable': routable})) if not created and relation.routable != routable: relation.routable = routable relation.save() return self.instance if isInIOThread(): # Running in twisted reactor, do the work inside the reactor. d = discover_pod(self.instance.power_type, self.instance.power_parameters, pod_id=self.instance.id, name=self.instance.name) d.addCallback(lambda discovered: (get_best_discovered_result(discovered), discovered)) def catch_no_racks(result): discovered_pod, discovered = result if discovered_pod is None: raise PodProblem( "Unable to start the pod discovery process. " "No rack controllers connected.") return discovered_pod, discovered def wrap_errors(failure): if failure.check(PodProblem): return failure else: raise PodProblem(str(failure.value)) d.addCallback(catch_no_racks) d.addCallback(partial(deferToDatabase, transactional(update_db))) d.addErrback(wrap_errors) return d else: # Perform the actions inside the executing thread. try: discovered = discover_pod(self.instance.power_type, self.instance.power_parameters, pod_id=self.instance.id, name=self.instance.name) except Exception as exc: raise PodProblem(str(exc)) from exc # Use the first discovered pod object. All other objects are # ignored. The other rack controllers that also provided a result # can route to the pod. try: discovered_pod = get_best_discovered_result(discovered) except Exception as error: raise PodProblem(str(error)) if discovered_pod is None: raise PodProblem("Unable to start the pod discovery process. " "No rack controllers connected.") return update_db((discovered_pod, discovered))
def wrap_errors(failure): if failure.check(PodProblem): return failure else: log.err(failure, "Failed to discover pod.") raise PodProblem(str(failure.value))
def wrap_errors(failure): log.err(failure, "Failed to compose machine.") raise PodProblem( "Pod unable to compose machine: %s" % str(failure.value) )
def compose( self, timeout=120, creation_type=NODE_CREATION_TYPE.MANUAL, skip_commissioning=None, ): """Compose the machine. Internal operation of this form is asynchronous. It will block the calling thread until the asynchronous operation is complete. Adjust `timeout` to minimize the maximum wait for the asynchronous operation. """ if skip_commissioning is None: skip_commissioning = self.get_value_for("skip_commissioning") def db_work(client): # Check overcommit ratios. over_commit_message = self.pod.check_over_commit_ratios( requested_cores=self.get_value_for("cores"), requested_memory=self.get_value_for("memory"), ) if over_commit_message: raise PodProblem("Unable to compose KVM instance in '%s'. %s" % (self.pod.name, over_commit_message)) # Update the default storage pool. if self.pod.default_storage_pool is not None: power_parameters[ "default_storage_pool_id"] = self.pod.default_storage_pool.pool_id # Find the pod's known host interfaces. if self.pod.host is not None: interfaces = get_known_host_interfaces(self.pod.host) else: interfaces = [] return client, interfaces def create_and_sync(result): requested_machine, result = result discovered_machine, pod_hints = result created_machine = self.pod.create_machine( discovered_machine, self.request.user, skip_commissioning=skip_commissioning, creation_type=creation_type, interfaces=self.get_value_for("interfaces"), requested_machine=requested_machine, domain=self.get_value_for("domain"), pool=self.get_value_for("pool"), zone=self.get_value_for("zone"), ) self.pod.sync_hints(pod_hints) return created_machine @inlineCallbacks def async_compose_machine(result, power_type, power_paramaters, **kwargs): client, result = result requested_machine = yield deferToDatabase( self.get_requested_machine, result) result = yield compose_machine( client, power_type, power_paramaters, requested_machine, **kwargs, ) return requested_machine, result power_parameters = self.pod.power_parameters.copy() if isInIOThread(): # Running under the twisted reactor, before the work from inside. d = deferToDatabase(transactional(self.pod.get_client_identifiers)) d.addCallback(getClientFromIdentifiers) d.addCallback(partial(deferToDatabase, transactional(db_work))) d.addCallback( async_compose_machine, self.pod.power_type, power_parameters, pod_id=self.pod.id, name=self.pod.name, ) d.addCallback( partial(deferToDatabase, transactional(create_and_sync))) d.addCallback( lambda created_machine, _: created_machine, request_commissioning_results(self.pod), ) return d else: # Running outside of reactor. Do the work inside and then finish # the work outside. @asynchronous def wrap_compose_machine(client_idents, pod_type, parameters, request, pod_id, name): """Wrapper to get the client.""" d = getClientFromIdentifiers(client_idents) d.addCallback( compose_machine, pod_type, parameters, request, pod_id=pod_id, name=name, ) return d _, result = db_work(None) try: requested_machine = self.get_requested_machine(result) result = wrap_compose_machine( self.pod.get_client_identifiers(), self.pod.power_type, power_parameters, requested_machine, pod_id=self.pod.id, name=self.pod.name, ).wait(timeout) except crochet.TimeoutError: raise PodProblem( "Unable to compose a machine because '%s' driver " "timed out after %d seconds." % (self.pod.power_type, timeout)) created_machine = create_and_sync((requested_machine, result)) post_commit_do(reactor.callLater, 0, request_commissioning_results, self.pod) return created_machine
def discover_and_sync_pod(self): """Discover and sync the pod information.""" def update_db(result): discovered_pod, discovered = result if self.request is not None: user = self.request.user else: user = self.user # If this is a new instance it will be stored in the database # at the end of sync. self.instance.sync(discovered_pod, user) # Save which rack controllers can route and which cannot. discovered_rack_ids = [ rack_id for rack_id, _ in discovered[0].items() ] for rack_controller in RackController.objects.all(): routable = rack_controller.system_id in discovered_rack_ids bmc_route_model = BMCRoutableRackControllerRelationship relation, created = bmc_route_model.objects.get_or_create( bmc=self.instance.as_bmc(), rack_controller=rack_controller, defaults={"routable": routable}, ) if not created and relation.routable != routable: relation.routable = routable relation.save() return self.instance if isInIOThread(): # Running in twisted reactor, do the work inside the reactor. d = discover_pod( self.instance.power_type, self.instance.power_parameters, pod_id=self.instance.id, name=self.instance.name, ) d.addCallback(lambda discovered: ( get_best_discovered_result(discovered), discovered, )) def catch_no_racks(result): discovered_pod, discovered = result if discovered_pod is None: raise PodProblem( "Unable to start the pod discovery process. " "No rack controllers connected.") return discovered_pod, discovered def wrap_errors(failure): if failure.check(PodProblem): return failure else: log.err(failure, "Failed to discover pod.") raise PodProblem(str(failure.value)) d.addCallback(catch_no_racks) d.addCallback(partial(deferToDatabase, transactional(update_db))) d.addCallback(request_commissioning_results) d.addErrback(wrap_errors) return d else: # Perform the actions inside the executing thread. try: discovered = discover_pod( self.instance.power_type, self.instance.power_parameters, pod_id=self.instance.id, name=self.instance.name, ) except Exception as exc: raise PodProblem(str(exc)) from exc # Use the first discovered pod object. All other objects are # ignored. The other rack controllers that also provided a result # can route to the pod. try: discovered_pod = get_best_discovered_result(discovered) except Exception as error: raise PodProblem(str(error)) if discovered_pod is None: raise PodProblem("Unable to start the pod discovery process. " "No rack controllers connected.") update_db((discovered_pod, discovered)) # The data isn't committed to the database until the transaction is # complete. The commissioning results must be sent after the # transaction completes so the metadata server can process the # data. post_commit_do( reactor.callLater, 0, request_commissioning_results, self.instance, ) # Run commissioning request here return self.instance
def compose(self, timeout=120, skip_commissioning=False, creation_type=NODE_CREATION_TYPE.MANUAL): """Compose the machine. Internal operation of this form is asynchronously. It will block the calling thread until the asynchronous operation is complete. Adjust `timeout` to minimize the maximum wait for the asynchronous operation. """ def create_and_sync(result): discovered_machine, pod_hints = result created_machine = self.pod.create_machine( discovered_machine, self.request.user, skip_commissioning=skip_commissioning, creation_type=creation_type, domain=self.get_value_for('domain'), zone=self.get_value_for('zone')) self.pod.sync_hints(pod_hints) return created_machine if isInIOThread(): # Running under the twisted reactor, before the work from inside. d = deferToDatabase(transactional(self.pod.get_client_identifiers)) d.addCallback(getClientFromIdentifiers) d.addCallback(compose_machine, self.pod.power_type, self.pod.power_parameters, self.get_requested_machine(), pod_id=self.pod.id, name=self.pod.name) d.addCallback( partial(deferToDatabase, transactional(create_and_sync))) return d else: # Running outside of reactor. Do the work inside and then finish # the work outside. @asynchronous def wrap_compose_machine(client_idents, pod_type, parameters, request, pod_id, name): """Wrapper to get the client.""" d = getClientFromIdentifiers(client_idents) d.addCallback(compose_machine, pod_type, parameters, request, pod_id=pod_id, name=name) return d try: result = wrap_compose_machine( self.pod.get_client_identifiers(), self.pod.power_type, self.pod.power_parameters, self.get_requested_machine(), pod_id=self.pod.id, name=self.pod.name).wait(timeout) except crochet.TimeoutError: raise PodProblem( "Unable to compose a machine because '%s' driver " "timed out after %d seconds." % (self.pod.power_type, timeout)) return create_and_sync(result)