def _execute(self, cmd, args): name = yield db.get(self.context, '__name__') parent = yield db.get(self.context, '__parent__') submitter = IVirtualizationContainerSubmitter(parent) yield submitter.submit(IUndeployVM, name) @db.transact def finalize_vm(): ippools = db.get_root()['oms_root']['ippools'] ip = netaddr.IPAddress(self.context.ipv4_address.split('/')[0]) if ippools.free(ip): ulog = UserLogger(principal=cmd.protocol.interaction.participations[0].principal, subject=self.context, owner=self.context.__owner__) ulog.log('Deallocated IP: %s', ip) vm = traverse1(canonical_path(self.context)) if vm is not None: noLongerProvides(vm, IDeployed) alsoProvides(vm, IUndeployed) yield finalize_vm() vm_parameters = yield self.get_parameters() utils = getAllUtilitiesRegisteredFor(IPostUndeployHook) for util in utils: yield defer.maybeDeferred(util.execute, self.context, cmd, vm_parameters)
def _execute(self, cmd, args): name = yield db.get(self.context, '__name__') parent = yield db.get(self.context, '__parent__') submitter = IVirtualizationContainerSubmitter(parent) yield submitter.submit(IUndeployVM, name) @db.transact def finalize_vm(): ippools = db.get_root()['oms_root']['ippools'] ip = netaddr.IPAddress(self.context.ipv4_address.split('/')[0]) log.msg('Attempting to deallocate IP %s from the pools' % ip, system='undeploy-action') if ippools.free(ip): ulog = UserLogger(principal=cmd.protocol.interaction. participations[0].principal, subject=self.context, owner=self.context.__owner__) ulog.log('Deallocated IP: %s', ip) log.msg('Deallocated IP %s' % ip, system='ippool') vm = traverse1(canonical_path(self.context)) if vm is not None: noLongerProvides(vm, IDeployed) alsoProvides(vm, IUndeployed) yield finalize_vm() vm_parameters = yield self.get_parameters() utils = getAllUtilitiesRegisteredFor(IPostUndeployHook) for util in utils: yield defer.maybeDeferred(util.execute, self.context, cmd, vm_parameters)
def _sync_virtual(self): parent = yield db.get(self.context, '__parent__') uuid = yield db.get(self.context, '__name__') submitter = IVirtualizationContainerSubmitter(parent) vm = yield submitter.submit(IInfoVM, uuid) yield self.sync_owner(vm) yield self._sync_vm(vm)
def _sync_virtual(self): parent = yield db.get(self.context, '__parent__') name = yield db.get(self.context, '__name__') submitter = IVirtualizationContainerSubmitter(parent) vmlist = yield submitter.submit(IListVMS) for vm in vmlist: if vm['uuid'] == name: yield self.sync_owner(vm) yield self._sync_vm(vm)
def handle_virtual_compute_config_change_request(compute, event): c = sudo(compute) compute_p = yield db.get(c, '__parent__') compute_type = yield db.get(compute_p, 'backend') # At the moment we only handle openvz backend updates (OMS-568) if compute_type != 'openvz': return update_param_whitelist = ['diskspace', 'memory', 'num_cores', 'swap_size'] param_modifier = {'diskspace': lambda d: d['total']} unit_corrections_coeff = {'memory': 1 / 1024.0, 'swap_size': 1 / 1024.0, 'diskspace': 1 / 1024.0} params_to_update = filter(lambda (k, v): k in update_param_whitelist, event.modified.iteritems()) if len(params_to_update) == 0: return # correct unit coefficients (usually MB -> GB) params_to_update = map(lambda (k, v): (k, param_modifier.get(k, lambda x: x)(v)), params_to_update) params_to_update = map(lambda (k, v): (k, unit_corrections_coeff.get(k) * v if k in unit_corrections_coeff else v), params_to_update) @db.transact def update_vm_limits(cpu_limit): logger.debug("Setting cpu_limit to %s, previous value %s" % (cpu_limit / 100.0, c.cpu_limit)) c.cpu_limit = cpu_limit / 100.0 cores_setting = filter(lambda(k, v): k == 'num_cores', params_to_update) if len(cores_setting) == 1: # adjust cpu_limit to follow the number of cores as well cpu_limit = int(cores_setting[0][1] * get_config().getfloat('vms', 'cpu_limit', 80)) log.msg("Updating cpulimit to %s" % cpu_limit, system='vm-configuration-update') params_to_update.append(('cpu_limit', cpu_limit)) yield update_vm_limits(cpu_limit) submitter = IVirtualizationContainerSubmitter((yield db.get(compute, '__parent__'))) try: yield submitter.submit(IUpdateVM, (yield db.get(compute, '__name__')), dict(params_to_update)) except Exception as e: @db.transact def reset_to_original_values(): for mk, mv in event.modified.iteritems(): setattr(compute, mk, event.original[mk]) yield reset_to_original_values() raise e # must re-throw, because sys.exc_info seems to get erased with the yield else: owner = (yield db.get(compute, '__owner__')) UserLogger(subject=compute, owner=owner).log('Compute "%s" configuration changed' % compute)
def _execute(self, cmd, args): name = yield db.get(self.context, '__name__') parent = yield db.get(self.context, '__parent__') submitter = IVirtualizationContainerSubmitter(parent) try: vm = yield submitter.submit(IInfoVM, name) max_key_len = max(len(key) for key in vm) for key, value in vm.items(): cmd.write("%s %s\n" % ((key + ':').ljust(max_key_len), value)) except Exception as e: cmd.write("%s\n" % format_error(e)) log.err(system='action-info')
def _execute(self, cmd, args): name = yield db.get(self.context, '__name__') parent = yield db.get(self.context, '__parent__') submitter = IVirtualizationContainerSubmitter(parent) try: for vm in (yield submitter.submit(IListVMS)): if vm['uuid'] == name: max_key_len = max(len(key) for key in vm) for key, value in vm.items(): cmd.write("%s %s\n" % ((key + ':').ljust(max_key_len), value)) except Exception as e: cmd.write("%s\n" % format_error(e))
def delete_virtual_compute(model, event): if not ICompute.providedBy(model.__parent__.__parent__): return if IDeployed.providedBy(model): log.msg( 'Deleting compute %s which is in IDeployed state, shutting down and ' 'undeploying first' % model.hostname, system='compute-backend') yield DestroyComputeAction(model).execute(DetachedProtocol(), object()) yield UndeployAction(model).execute(DetachedProtocol(), object()) else: log.msg('Deleting compute %s which is already in IUndeployed state' % model.hostname, system='compute-backend') owner = (yield db.get(model, '__owner__')) ulog = UserLogger(subject=model, owner=owner) ulog.log('Deleted %s' % model) @db.transact def deallocate_ip(): ippools = db.get_root()['oms_root']['ippools'] ip = netaddr.IPAddress(model.ipv4_address.split('/')[0]) if ippools.free(ip): ulog.log('Deallocated IP: %s', ip) yield deallocate_ip()
def gather_phy(self): name = yield db.get(self.context, 'hostname') try: data = yield IGetHostMetrics(self.context).run(__killhook=self._killhook) log.msg('%s: host metrics received: %s' % (name, len(data)), system='metrics', logLevel=logging.DEBUG) timestamp = int(time.time() * 1000) # db transact is needed only to traverse the zodb. @db.ro_transact def get_streams(): streams = [] host_metrics = self.context['metrics'] if host_metrics: for k in data: if host_metrics[k]: streams.append((IStream(host_metrics[k]), (timestamp, data[k]))) return streams for stream, data_point in (yield get_streams()): stream.add(data_point) except OperationRemoteError as e: log.msg('%s: remote error: %s' % (name, e), system='metrics', logLevel=logging.WARNING) except Exception: log.msg("%s: error gathering host metrics" % name, system='metrics', logLevel=logging.ERROR) if get_config().getboolean('debug', 'print_exceptions'): log.err(system='metrics')
def delete_virtual_compute(model, event): if not ICompute.providedBy(model.__parent__.__parent__): return if IDeployed.providedBy(model): log.msg('Deleting compute %s which is in IDeployed state, shutting down and ' 'undeploying first' % model.hostname, system='compute-backend') yield DestroyComputeAction(model).execute(DetachedProtocol(), object()) yield UndeployAction(model).execute(DetachedProtocol(), object()) else: log.msg('Deleting compute %s which is already in IUndeployed state' % model.hostname, system='compute-backend') owner = (yield db.get(model, '__owner__')) ulog = UserLogger(subject=model, owner=owner) ulog.log('Deleted %s' % model) @db.transact def deallocate_ip(): ippools = db.get_root()['oms_root']['ippools'] ip = netaddr.IPAddress(model.ipv4_address.split('/')[0]) if ippools.free(ip): ulog.log('Deallocated IP: %s', ip) yield deallocate_ip()
def _execute(self, cmd, args): action_name = getattr(self, 'action_name', self._name + "ing") name = yield db.get(self.context, '__name__') parent = yield db.get(self.context, '__parent__') yield self.set_inprogress() self._action_log(cmd, '%s %s' % (action_name, name)) submitter = IVirtualizationContainerSubmitter(parent) try: yield submitter.submit(self.job, name) except Exception as e: self._action_log(cmd, '%s' % (format_error(e))) raise
def add_log_event(self, cmd, msg, *args, **kwargs): self._action_log(cmd, msg) owner = yield db.get(self.context, '__owner__') ulog = UserLogger( principal=cmd.protocol.interaction.participations[0].principal, subject=self.context, owner=owner) ulog.log(msg, *args, **kwargs)
def gather_vms(self): @db.ro_transact def get_vms_if_not_empty(): vms = follow_symlinks(self.context['vms']) or [] for vm in vms: if IVirtualCompute.providedBy(vm): return vms log.msg('%s: no VMs' % (self.context.hostname), system='metrics', logLevel=logging.DEBUG) vms = yield get_vms_if_not_empty() # get the metrics for all running VMS if not vms or self.context.state != u'active': return name = yield db.get(self.context, 'hostname') try: log.msg('%s: gather VM metrics' % (name), system='metrics', logLevel=logging.DEBUG) submitter = IVirtualizationContainerSubmitter(vms) metrics = yield submitter.submit(IGetGuestMetrics, __killhook=self._killhook) except OperationRemoteError as e: log.msg('%s: remote error: %s' % (name, e), system='metrics', logLevel=logging.DEBUG) if e.remote_tb: log.msg(e.remote_tb, system='metrics', logLevel=logging.DEBUG) return except Exception: log.msg("%s: error gathering VM metrics" % name, system='metrics', logLevel=logging.ERROR) if get_config().getboolean('debug', 'print_exceptions'): log.err(system='metrics') return if not metrics: log.msg('%s: no VM metrics received!' % name, system='metrics', logLevel=logging.WARNING) return log.msg('%s: VM metrics received: %s' % (name, len(metrics)), system='metrics') timestamp = int(time.time() * 1000) # db transact is needed only to traverse the zodb. @db.ro_transact def get_streams(): streams = [] for uuid, data in metrics.items(): if vms[uuid] and vms[uuid]['metrics']: vm_metrics = vms[uuid]['metrics'] for k in data: if vm_metrics[k]: streams.append((IStream(vm_metrics[k]), (timestamp, data[k]))) return streams # streams could defer the data appending but we don't care for stream, data_point in (yield get_streams()): stream.add(data_point)
def gather_vms(self): @db.ro_transact def get_vms_if_not_empty(): vms = follow_symlinks(self.context['vms']) or [] for vm in vms: if IVirtualCompute.providedBy(vm): return vms log.msg('%s: no VMs' % (self.context.hostname), system='metrics', logLevel=logging.DEBUG) vms = yield get_vms_if_not_empty() # get the metrics for all running VMS if not vms or self.context.state != u'active': return name = yield db.get(self.context, 'hostname') try: log.msg('%s: gather VM metrics' % (name), system='metrics', logLevel=logging.DEBUG) submitter = IVirtualizationContainerSubmitter(vms) metrics = yield submitter.submit(IGetGuestMetrics, __killhook=self._killhook) except OperationRemoteError as e: log.msg('%s: remote error: %s' % (name, e), system='metrics', logLevel=logging.DEBUG) if e.remote_tb: log.msg(e.remote_tb, system='metrics', logLevel=logging.DEBUG) return except Exception: log.msg("%s: error gathering VM metrics" % name, system='metrics', logLevel=logging.ERROR) if get_config().getboolean('debug', 'print_exceptions'): log.err(system='metrics') if not metrics: log.msg('%s: no VM metrics received!' % name, system='metrics', logLevel=logging.WARNING) return log.msg('%s: VM metrics received: %s' % (name, len(metrics)), system='metrics') timestamp = int(time.time() * 1000) # db transact is needed only to traverse the zodb. @db.ro_transact def get_streams(): streams = [] for uuid, data in metrics.items(): if vms[uuid] and vms[uuid]['metrics']: vm_metrics = vms[uuid]['metrics'] for k in data: if vm_metrics[k]: streams.append((IStream(vm_metrics[k]), (timestamp, data[k]))) return streams # streams could defer the data appending but we don't care for stream, data_point in (yield get_streams()): stream.add(data_point)
def sync_owner(self, vm): owner = yield db.get(self.context, '__owner__') parent = yield db.get(self.context, '__parent__') uuid = yield db.get(self.context, '__name__') if vm.get('owner'): if owner != vm['owner']: @db.transaft def pull_owner(): compute = TmpObj(self.context) newowner = getUtility(IAuthentication).getPrincipal(vm['owner']) compute.__owner__ = newowner compute.apply() yield pull_owner() elif owner is not None: log.msg('Attempting to push owner (%s) of %s to agent' % (owner, self.context), system='sync') submitter = IVirtualizationContainerSubmitter(parent) yield submitter.submit(ISetOwner, uuid, owner) log.msg('Owner pushing for %s successful' % self.context, system='sync')
def _execute(self, cmd, args): action_name = getattr(self, 'action_name', self._name + "ing") name = yield db.get(self.context, '__name__') if not self.context.license_activated: self._action_log( cmd, '%s %s failed: VM license is not activated yet' % (action_name, name)) return parent = yield db.get(self.context, '__parent__') yield self.set_inprogress() self._action_log(cmd, '%s %s' % (action_name, name)) submitter = IVirtualizationContainerSubmitter(parent) try: yield submitter.submit(self.job, name) except Exception as e: self._action_log(cmd, '%s' % (format_error(e))) raise
def gather_machines(self): @db.ro_transact def get_gatherers(): oms_root = db.get_root()["oms_root"] computes = filter( lambda c: c and ICompute.providedBy(c) and not c.failure, map(follow_symlinks, oms_root["computes"].listcontent()), ) gatherers = filter(None, (queryAdapter(c, IMetricsGatherer) for c in computes)) return gatherers def handle_success(r, c): self.log_msg("%s: metrics gathered" % (c), logLevel=logging.DEBUG) if str(c) in self.outstanding_requests: del self.outstanding_requests[str(c)] def handle_errors(e, c): e.trap(Exception) self.log_msg("%s: got exception when gathering metrics: %s" % (c, e), logLevel=logging.ERROR) self.log_err() if str(c) in self.outstanding_requests: del self.outstanding_requests[str(c)] for g in (yield get_gatherers()): hostname = yield db.get(g.context, "hostname") targetkey = str(g.context) if targetkey not in self.outstanding_requests or self.outstanding_requests[targetkey][2] > 5: if targetkey in self.outstanding_requests and self.outstanding_requests[targetkey][2] > 5: self.log_msg("Killing all previous requests to %s (%s)" % (hostname, targetkey)) self.outstanding_requests[targetkey][3].kill() self.log_msg( "%s: gathering metrics %s" % (hostname, "(after timeout!)" if targetkey in self.outstanding_requests else ""), logLevel=logging.DEBUG, ) d = g.gather() curtime = datetime.datetime.now().isoformat() self.outstanding_requests[targetkey] = [d, curtime, 0, g] d.addCallback(handle_success, g.context) d.addErrback(handle_errors, g.context) else: self.outstanding_requests[targetkey][2] += 1 self.log_msg( 'Skipping: another outstanding request to "%s" (%s) is found from %s.' % (g.context, hostname, self.outstanding_requests[targetkey][1]), logLevel=logging.DEBUG, )
def handle_virtual_compute_config_change_request(compute, event): update_param_whitelist = ['cpu_limit', 'diskspace', 'memory', 'num_cores', 'swap_size'] param_modifier = {'diskspace': lambda d: d['total']} unit_corrections_coeff = {'memory': 1 / 1024.0, 'swap_size': 1 / 1024.0, 'diskspace': 1 / 1024.0} params_to_update = filter(lambda (k, v): k in update_param_whitelist, event.modified.iteritems()) if len(params_to_update) == 0: return # correct unit coefficients (usually MB -> GB) params_to_update = map(lambda (k, v): (k, param_modifier.get(k, lambda x: x)(v)), params_to_update) params_to_update = map(lambda (k, v): (k, unit_corrections_coeff.get(k) * v if k in unit_corrections_coeff else v), params_to_update) submitter = IVirtualizationContainerSubmitter((yield db.get(compute, '__parent__'))) try: yield submitter.submit(IUpdateVM, (yield db.get(compute, '__name__')), dict(params_to_update)) except Exception as e: @db.transact def reset_to_original_values(): for mk, mv in event.modified.iteritems(): setattr(compute, mk, event.original[mk]) yield reset_to_original_values() raise e # must re-throw, because sys.exc_info seems to get erased with the yield else: owner = (yield db.get(compute, '__owner__')) UserLogger(subject=compute, owner=owner).log('Compute "%s" configuration changed' % compute)
def _check_vm_pre(self, cmd, name, destination_hostname, destination_vms): vmlist = yield self._get_vmlist(destination_vms) if (name in map(lambda x: x['uuid'], vmlist)): self._action_log(cmd, 'Failed migration of %s to %s: destination already contains this VM' % (name, destination_hostname)) defer.returnValue(False) if ((yield db.get(self.context, 'ctid')) in map(lambda x: x.get('ctid'), vmlist)): self._action_log(cmd, 'Failed migration of %s to %s: destination container ID conflict' % (name, destination_hostname)) defer.renderValue(False) defer.returnValue(True)
def execute(self, cmd, args): yield BaseHostRequestAction.execute(self, cmd, args) hostname = yield db.get(self.context, 'hostname') # Acceptance of a new HN should trigger its syncing uuid = yield register_machine(hostname, mgt_stack=ISaltInstalled) cmd.write('Host %s accepted. Syncing shortly...\n' % hostname) log.msg('Host %s accepted. Syncing in 5 seconds...' % hostname, system='action-accept') yield async_sleep(5) compute = yield get_machine_by_uuid(uuid) assert compute is not None, 'Machine not found after accept: %s' % uuid log.msg('Syncing NOW...', system='action-accept') syncaction = SyncAction(compute) syncaction._do_not_enqueue = False args = argparse.Namespace() args.full = True yield syncaction.execute(DetachedProtocol(), args)
def gather_machines(self): @db.ro_transact def get_gatherers(): oms_root = db.get_root()['oms_root'] computes = filter(lambda c: c and ICompute.providedBy(c) and not c.failure, map(follow_symlinks, oms_root['computes'].listcontent())) gatherers = filter(None, (queryAdapter(c, IMetricsGatherer) for c in computes)) return gatherers def handle_success(r, c): self.log_msg('%s: metrics gathered' % (c), logLevel=logging.DEBUG) if str(c) in self.outstanding_requests: del self.outstanding_requests[str(c)] def handle_errors(e, c): e.trap(Exception) self.log_msg("%s: got exception when gathering metrics: %s" % (c, e), logLevel=logging.ERROR) self.log_err() if str(c) in self.outstanding_requests: del self.outstanding_requests[str(c)] for g in (yield get_gatherers()): hostname = yield db.get(g.context, 'hostname') targetkey = str(g.context) if (targetkey not in self.outstanding_requests or self.outstanding_requests[targetkey][2] > 5): if (targetkey in self.outstanding_requests and self.outstanding_requests[targetkey][2] > 5): self.log_msg('Killing all previous requests to %s (%s)' % (hostname, targetkey)) self.outstanding_requests[targetkey][3].kill() self.log_msg('%s: gathering metrics %s' % (hostname, '(after timeout!)' if targetkey in self.outstanding_requests else ''), logLevel=logging.DEBUG) d = g.gather() curtime = datetime.datetime.now().isoformat() self.outstanding_requests[targetkey] = [d, curtime, 0, g] d.addCallback(handle_success, g.context) d.addErrback(handle_errors, g.context) else: self.outstanding_requests[targetkey][2] += 1 self.log_msg('Skipping: another outstanding request to "%s" (%s) is found from %s.' % (g.context, hostname, self.outstanding_requests[targetkey][1]), logLevel=logging.DEBUG)
def execute(self, cmd, args): hostname = yield db.get(self.context, 'hostname') remote_salt_key_cmd = get_config().getstring('salt', 'remote_key_command', None) if remote_salt_key_cmd: try: output = subprocess.check_output([remote_salt_key_cmd, self._remote_option, hostname, '--no-color', '--out=raw']) log.msg('Salt output: %s' % output, system='action-accept') except subprocess.CalledProcessError as e: cmd.write("%s\n" % format_error(e)) else: try: import salt.config from salt.key import Key c_path = get_config().getstring('salt', 'master_config_path', '/etc/salt/master') opts = salt.config.client_config(c_path) yield getattr(Key(opts), self._action)(hostname) except Exception as e: cmd.write("%s\n" % format_error(e))
def execute(self, cmd, args): hostname = yield db.get(self.context, 'hostname') remote_salt_key_cmd = get_config().getstring('salt', 'remote_key_command', None) if remote_salt_key_cmd: try: output = subprocess.check_output([ remote_salt_key_cmd, self._remote_option, hostname, '--no-color', '--out=raw' ]) log.msg('Salt output: %s' % output, system='action-accept') except subprocess.CalledProcessError as e: cmd.write("%s\n" % format_error(e)) else: try: import salt.config from salt.key import Key c_path = get_config().getstring('salt', 'master_config_path', '/etc/salt/master') opts = salt.config.client_config(c_path) yield getattr(Key(opts), self._action)(hostname) except Exception as e: cmd.write("%s\n" % format_error(e))
def _execute(self, cmd, args): @db.ro_transact def get_destination(): return (args.__parent__ if IVirtualizationContainer.providedBy(args) else cmd.traverse(args.dest_path)) @db.ro_transact def get_hostname(target): return target.hostname name = yield db.get(self.context, '__name__') source_vms = yield db.get(self.context, '__parent__') destination = yield get_destination() assert ICompute.providedBy( destination), 'Destination must be a Compute' assert not IVirtualCompute.providedBy( destination), 'Cannot migrate to a VM' destination_hostname = yield get_hostname(destination) destination_vms = follow_symlinks(destination['vms']) assert (yield db.get(destination_vms, 'backend')) == (yield db.get(source_vms, 'backend')),\ 'Destination backend is different from source' @db.transact def set_additional_keys(): self._additional_keys = [ canonical_path(destination_vms), canonical_path(destination) ] yield set_additional_keys() yield self.reacquire_until_clear() log.msg('Initiating migration for %s to %s' % (name, destination_hostname), system='migrate') try: if not (yield self._check_vm_pre(cmd, name, destination_hostname, destination_vms)): return source_submitter = IVirtualizationContainerSubmitter(source_vms) yield source_submitter.submit(IMigrateVM, name, destination_hostname, (not args.offline), False) log.msg('Migration done. Checking... %s' % destination_vms, system='migrate') if (yield self._check_vm_post(cmd, name, destination_hostname, destination_vms)): log.msg('Migration finished successfully!', system='migrate') @db.transact def mv_and_inherit(): machines = db.get_root()['oms_root']['machines'] computes = db.get_root()['oms_root']['computes'] try: destination_compute = machines[destination.__name__] vm_compute = follow_symlinks( computes[self.context.__name__]) vm_compute.failure = destination_compute.failure vm_compute.suspicious = destination_compute.suspicious dvms = follow_symlinks(destination_compute['vms']) dvms.add(vm_compute) log.msg('Model moved.', system='migrate') except IndexError: log.msg( 'Model NOT moved: destination compute or vms do not exist', system='migrate') except KeyError: log.msg('Model NOT moved: already moved by sync?', system='migrate') yield mv_and_inherit() except OperationRemoteError as e: self._action_log(cmd, 'Failed migration of %s to %s: remote error %s' % (name, destination_hostname, '\n%s' % e.remote_tb if e.remote_tb else ''), logLevel=ERROR, system='migrate')
def handle_virtual_compute_config_change_request(compute, event): c = sudo(compute) compute_p = yield db.get(c, '__parent__') compute_type = yield db.get(compute_p, 'backend') # At the moment we only handle openvz backend updates (OMS-568) if compute_type != 'openvz': return update_param_whitelist = ['diskspace', 'memory', 'num_cores', 'swap_size'] param_modifier = {'diskspace': lambda d: d['total']} unit_corrections_coeff = { 'memory': 1 / 1024.0, 'swap_size': 1 / 1024.0, 'diskspace': 1 / 1024.0 } params_to_update = filter(lambda (k, v): k in update_param_whitelist, event.modified.iteritems()) if len(params_to_update) == 0: return # correct unit coefficients (usually MB -> GB) params_to_update = map( lambda (k, v): (k, param_modifier.get(k, lambda x: x)(v)), params_to_update) params_to_update = map( lambda (k, v): (k, unit_corrections_coeff.get(k) * v if k in unit_corrections_coeff else v), params_to_update) @db.transact def update_vm_limits(cpu_limit): logger.debug("Setting cpu_limit to %s, previous value %s" % (cpu_limit / 100.0, c.cpu_limit)) c.cpu_limit = cpu_limit / 100.0 cores_setting = filter(lambda (k, v): k == 'num_cores', params_to_update) if len(cores_setting) == 1: # adjust cpu_limit to follow the number of cores as well cpu_limit = int(cores_setting[0][1] * get_config().getfloat('vms', 'cpu_limit', 80)) log.msg("Updating cpulimit to %s" % cpu_limit, system='vm-configuration-update') params_to_update.append(('cpu_limit', cpu_limit)) yield update_vm_limits(cpu_limit) submitter = IVirtualizationContainerSubmitter( (yield db.get(compute, '__parent__'))) try: yield submitter.submit(IUpdateVM, (yield db.get(compute, '__name__')), dict(params_to_update)) except Exception as e: @db.transact def reset_to_original_values(): for mk, mv in event.modified.iteritems(): setattr(compute, mk, event.original[mk]) yield reset_to_original_values() raise e # must re-throw, because sys.exc_info seems to get erased with the yield else: owner = (yield db.get(compute, '__owner__')) UserLogger(subject=compute, owner=owner).log( 'Compute "%s" configuration changed' % compute)
def _execute(self, cmd, args): template = yield db.get(self.context, 'template') if not template: self._action_log(cmd, 'Cannot deploy %s (%s) because no template was specified' % (self.context.hostname, self.context), system='deploy', logLevel=ERROR) return if (yield db.ro_transact(IDeployed.providedBy)(self.context)): log.msg('Attempt to deploy a deployed compute: %s' % (self.context), system='deploy') return @db.transact def allocate_ip_address(): ippools = db.get_root()['oms_root']['ippools'] ip = ippools.allocate() if ip is not None: self._action_log(cmd, 'Allocated IP: %s for %s' % (ip, self.context), system='deploy') ulog = UserLogger(principal=cmd.protocol.interaction.participations[0].principal, subject=self.context, owner=self.context.__owner__) ulog.log('Allocated IP for %s: %s' % (self.context, ip)) return ip else: raise Exception('Could not allocate IP for the new compute: pools exhausted or undefined') @db.transact def cleanup_root_password(): if getattr(self.context, 'root_password', None) is not None: self.context.root_password = None target = (args if IVirtualizationContainer.providedBy(args) else (yield db.get(self.context, '__parent__'))) try: yield db.transact(alsoProvides)(self.context, IDeploying) vm_parameters = yield self.get_parameters() ipaddr = netaddr.IPAddress(vm_parameters['ip_address']) if vm_parameters['ip_address'] in (None, u'0.0.0.0/32', u'0.0.0.0', '0.0.0.0/32', '0.0.0.0'): ipaddr = yield allocate_ip_address() vm_parameters.update({'ip_address': str(ipaddr)}) utils = getAllUtilitiesRegisteredFor(IPreDeployHook) for util in utils: yield defer.maybeDeferred(util.execute, self.context, cmd, vm_parameters) log.msg('Deploying %s to %s: issuing agent command' % (self.context, target), system='deploy') res = yield IVirtualizationContainerSubmitter(target).submit(IDeployVM, vm_parameters) yield cleanup_root_password() name = yield db.get(self.context, '__name__') hostname = yield db.get(self.context, 'hostname') owner = yield db.get(self.context, '__owner__') owner_obj = getUtility(IAuthentication).getPrincipal(owner) log.msg('Checking post-deploy...', system='deploy') if not (yield self._check_vm_post(cmd, name, hostname, target)): self._action_log(cmd, 'Deployment failed. Deployment request result: %s' % res, system='deploy') return @db.transact def add_deployed_model_remove_from_hangar(c, target): path = canonical_path(target) target = traverse1(path) cpath = canonical_path(c) c = traverse1(cpath) if c is None: raise Exception('Compute not found: "%s"' % cpath) new_compute = Compute(unicode(hostname), u'inactive') new_compute.__name__ = name new_compute.__owner__ = owner_obj new_compute.template = unicode(template) new_compute._ipv4_address = unicode(ipaddr) new_compute.mac_address = getattr(c, 'mac_address', None) new_compute.memory = getattr(c, 'memory', 0) new_compute.diskspace = getattr(c, 'diskspace', {u'total': 0}) new_compute.num_cores = getattr(c, 'num_cores', 0) alsoProvides(new_compute, IVirtualCompute) alsoProvides(new_compute, IDeployed) noLongerProvides(new_compute, IManageable) target.add(new_compute) container = c.__parent__ del container[name] timestamp = int(time.time() * 1000) IStream(new_compute).add((timestamp, {'event': 'change', 'name': 'features', 'value': new_compute.features, 'old_value': self.context.features})) IStream(new_compute).add((timestamp, {'event': 'change', 'name': 'ipv4_address', 'value': new_compute._ipv4_address, 'old_value': self.context._ipv4_address})) yield add_deployed_model_remove_from_hangar(self.context, target) self._action_log(cmd, 'Deployment of "%s"(%s) is finished' % (vm_parameters['hostname'], self.context.__name__), system='deploy') auto_allocate = get_config().getboolean('vms', 'auto_allocate', True) if not auto_allocate and not get_config().getboolean('stats', 'only_report_on_sync', True): yield defer.maybeDeferred(getUtility(IUserStatisticsProvider).update, owner) except Exception as e: log.err(system='deploy') @db.transact def cleanup_deploying(): noLongerProvides(self.context, IDeploying) yield cleanup_deploying() raise e
def _execute(self, cmd, args): @db.ro_transact def get_destination(): return (args.__parent__ if IVirtualizationContainer.providedBy(args) else cmd.traverse(args.dest_path)) @db.ro_transact def get_hostname(target): return target.hostname name = yield db.get(self.context, '__name__') source_vms = yield db.get(self.context, '__parent__') destination = yield get_destination() assert ICompute.providedBy(destination), 'Destination must be a Compute' assert not IVirtualCompute.providedBy(destination), 'Cannot migrate to a VM' destination_hostname = yield get_hostname(destination) destination_vms = follow_symlinks(destination['vms']) assert (yield db.get(destination_vms, 'backend')) == (yield db.get(source_vms, 'backend')),\ 'Destination backend is different from source' @db.transact def set_additional_keys(): self._additional_keys = [canonical_path(destination_vms), canonical_path(destination)] yield set_additional_keys() yield self.reacquire_until_clear() log.msg('Initiating migration for %s to %s' % (name, destination_hostname), system='migrate') try: if not (yield self._check_vm_pre(name, destination_hostname, destination_vms)): return source_submitter = IVirtualizationContainerSubmitter(source_vms) yield source_submitter.submit(IMigrateVM, name, destination_hostname, (not args.offline), False) log.msg('Migration done. Checking... %s' % destination_vms, system='migrate') if (yield self._check_vm_post(cmd, name, destination_hostname, destination_vms)): log.msg('Migration finished successfully!', system='migrate') @db.transact def mv_and_inherit(): machines = db.get_root()['oms_root']['machines'] computes = db.get_root()['oms_root']['computes'] try: destination_compute = machines[destination.__name__] vm_compute = follow_symlinks(computes[self.context.__name__]) vm_compute.failure = destination_compute.failure vm_compute.suspicious = destination_compute.suspicious dvms = follow_symlinks(destination_compute['vms']) dvms.add(vm_compute) log.msg('Model moved.', system='migrate') except IndexError: log.msg('Model NOT moved: destination compute or vms do not exist', system='migrate') except KeyError: log.msg('Model NOT moved: already moved by sync?', system='migrate') yield mv_and_inherit() except OperationRemoteError as e: self._action_log(cmd, 'Failed migration of %s to %s: remote error %s' % ( name, destination_hostname, '\n%s' % e.remote_tb if e.remote_tb else ''), logLevel=ERROR, system='migrate')
def _execute(self, cmd, args): template = yield db.get(self.context, 'template') if not template: self._action_log( cmd, 'Cannot deploy %s (%s) because no template was specified' % (self.context.hostname, self.context), system='deploy', logLevel=ERROR) return if (yield db.ro_transact(IDeployed.providedBy)(self.context)): log.msg('Attempt to deploy a deployed compute: %s' % (self.context), system='deploy') return @db.transact def allocate_ip_address(): ippools = db.get_root()['oms_root']['ippools'] ip = ippools.allocate() if ip is not None: self._action_log(cmd, 'Allocated IP: %s for %s' % (ip, self.context), system='deploy') ulog = UserLogger(principal=cmd.protocol.interaction. participations[0].principal, subject=self.context, owner=self.context.__owner__) ulog.log('Allocated IP for %s: %s' % (self.context, ip)) return ip else: raise Exception( 'Could not allocate IP for the new compute: pools exhausted or undefined' ) @db.transact def cleanup_root_password(): if getattr(self.context, 'root_password', None) is not None: self.context.root_password = None @db.transact def adjust_cpulimit(): """Set cpulimit to a configured percentage * cores""" cores = getattr(self.context, 'num_cores', 1) cpu_limit_factor = get_config().getfloat('vms', 'cpu_limit', 80) cpu_limit = cores * cpu_limit_factor / 100.0 log.msg("Updating cpulimit to %s" % cpu_limit, system='deploy') self.context.cpu_limit = cpu_limit target = (args if IVirtualizationContainer.providedBy(args) else (yield db.get(self.context, '__parent__'))) try: yield db.transact(alsoProvides)(self.context, IDeploying) vm_parameters = yield self.get_parameters() ipaddr = netaddr.IPAddress(vm_parameters['ip_address']) if vm_parameters['ip_address'] in (None, u'0.0.0.0/32', u'0.0.0.0', '0.0.0.0/32', '0.0.0.0'): ipaddr = yield allocate_ip_address() vm_parameters.update({'ip_address': str(ipaddr)}) utils = getAllUtilitiesRegisteredFor(IPreDeployHook) for util in utils: yield defer.maybeDeferred(util.execute, self.context, cmd, vm_parameters) log.msg('Deploying %s to %s: issuing agent command' % (self.context, target), system='deploy') res = yield IVirtualizationContainerSubmitter(target).submit( IDeployVM, vm_parameters) yield cleanup_root_password() yield adjust_cpulimit() name = yield db.get(self.context, '__name__') hostname = yield db.get(self.context, 'hostname') owner = yield db.get(self.context, '__owner__') owner_obj = getUtility(IAuthentication).getPrincipal(owner) log.msg('Checking post-deploy...', system='deploy') @db.transact def set_notify_admin(): if self.context.notify_admin: self.context.license_activated = False admin_logger.warning( '%s (hostname=%s; owner=%s; targethost=%s(%s); ipaddr=%s) ' 'requires activation!', self.context, self.context.hostname, self.context.__owner__, target.__parent__, target.__parent__.hostname, vm_parameters['ip_address']) yield set_notify_admin() if not (yield self._check_vm_post(cmd, name, hostname, target)): self._action_log(cmd, 'Deployment failed. Request result: %s' % res, system='deploy') return @db.transact def add_deployed_model_remove_from_hangar(c, target): path = canonical_path(target) target = traverse1(path) cpath = canonical_path(c) c = traverse1(cpath) if c is None: raise Exception('Compute not found: "%s"' % cpath) new_compute = Compute(unicode(hostname), u'inactive') new_compute.__name__ = name new_compute.__owner__ = owner_obj new_compute.template = unicode(template) new_compute._ipv4_address = unicode(ipaddr) new_compute.mac_address = getattr(c, 'mac_address', None) new_compute.memory = getattr(c, 'memory', 0) new_compute.diskspace = getattr(c, 'diskspace', {u'total': 0}) new_compute.num_cores = getattr(c, 'num_cores', 0) new_compute.license_activated = getattr( c, 'license_activated', True) alsoProvides(new_compute, IVirtualCompute) alsoProvides(new_compute, IDeployed) noLongerProvides(new_compute, IManageable) target.add(new_compute) container = c.__parent__ del container[name] timestamp = int(time.time() * 1000) IStream(new_compute).add((timestamp, { 'event': 'change', 'name': 'features', 'value': new_compute.features, 'old_value': self.context.features })) IStream(new_compute).add((timestamp, { 'event': 'change', 'name': 'ipv4_address', 'value': new_compute._ipv4_address, 'old_value': self.context._ipv4_address })) yield add_deployed_model_remove_from_hangar(self.context, target) self._action_log( cmd, 'Deployment of "%s"(%s) is finished' % (vm_parameters['hostname'], self.context.__name__), system='deploy') auto_allocate = get_config().getboolean('vms', 'auto_allocate', True) if not auto_allocate and not get_config().getboolean( 'stats', 'only_report_on_sync', True): yield defer.maybeDeferred( getUtility(IUserStatisticsProvider).update, owner) except Exception as e: log.err(system='deploy') @db.transact def cleanup_deploying(): noLongerProvides(self.context, IDeploying) yield cleanup_deploying() raise e
def add_log_event(self, cmd, msg, *args, **kwargs): self._action_log(cmd, msg) owner = yield db.get(self.context, '__owner__') ulog = UserLogger(principal=cmd.protocol.interaction.participations[0].principal, subject=self.context, owner=owner) ulog.log(msg, *args, **kwargs)