def mv_and_inherit(): machines = db.get_root()['oms_root']['machines'] computes = db.get_root()['oms_root']['computes'] try: destination_compute = machines[destination.__name__] vm_compute = follow_symlinks(computes[self.context.__name__]) vm_compute.failure = destination_compute.failure vm_compute.suspicious = destination_compute.suspicious dvms = follow_symlinks(destination_compute['vms']) dvms.add(vm_compute) log.msg('Model moved.', system='migrate') except IndexError: log.msg('Model NOT moved: destination compute or vms do not exist', system='migrate') except KeyError: log.msg('Model NOT moved: already moved by sync?', system='migrate')
def get_compute_ips(): try: computes = map(follow_symlinks, filter(lambda c: ICompute.providedBy(follow_symlinks(c)), db.get_root()['oms_root']['computes'].listcontent())) pools = db.get_root()['oms_root']['ippools'] for c in computes: ip = IPAddress(c.ipv4_address.split('/')[0]) pool = pools.find_pool(ip) if pool is not None and not pool.get(ip): log.msg('Marking %s as used...' % ip, system='sync-ippool') pool.use(ip) except Exception: log.err(system='sync-ippool') raise
def get_computes(): oms_root = db.get_root()['oms_root'] res = [(i, i.hostname) for i in map( follow_symlinks, oms_root['computes'].listcontent()) if ICompute.providedBy(i)] return res
def _items(self): machines = db.get_root()['oms_root']['machines'] computes = {} def allowed_classes_gen(item): from opennode.knot.model.machines import Machines from opennode.knot.model.virtualizationcontainer import IVirtualizationContainer yield isinstance(item, Machines) yield isinstance(item, Computes) yield ICompute.providedBy(item) yield IVirtualizationContainer.providedBy(item) yield IHangar.providedBy(item) def collect(container): seen = set() for item in container.listcontent(): if ICompute.providedBy(item): computes[item.__name__] = Symlink(item.__name__, item) if any(allowed_classes_gen(item)): if item.__name__ not in seen: seen.add(item.__name__) collect(item) collect(machines) return computes
def execute(self, args): for machine in db.get_root()['oms_root']['machines']: if not ICompute.providedBy(machine) or IVirtualCompute.providedBy( machine): continue InstallSaltAction(machine).execute(DetachedProtocol(), object())
def _items(self): # break an import cycle from opennode.oms.zodb import db machines = db.get_root()['oms_root']['machines'] templates = {} def allowed_classes_gen(item): from opennode.knot.model.compute import ICompute, IVirtualCompute from opennode.knot.model.machines import Machines from opennode.knot.model.virtualizationcontainer import IVirtualizationContainer yield isinstance(item, Machines) yield isinstance(item, Templates) yield IVirtualizationContainer.providedBy(item) yield ICompute.providedBy(item) yield IVirtualCompute.providedBy(item) def collect(container): seen = set() for item in container.listcontent(): if ITemplate.providedBy(item) and item.__name__ not in templates: templates[item.__name__] = Symlink(item.__name__, item) if any(allowed_classes_gen(item)): if item.__name__ not in seen: seen.add(item.__name__) collect(item) collect(machines) return templates
def render(self, request): timestamp = int(time.time() * 1000) oms_root = db.get_root()['oms_root'] limit = int(request.args.get('limit', ['100'])[0]) after = int(request.args.get('after', ['0'])[0]) subscription_hash = request.args.get('subscription_hash', [''])[0] if subscription_hash: if subscription_hash in self.cached_subscriptions: data = self.cached_subscriptions[subscription_hash] else: raise BadRequest("Unknown subscription hash") elif not request.content.getvalue(): return {} else: data = json.load(request.content) subscription_hash = sha1(request.content.getvalue()).hexdigest() self.cached_subscriptions[subscription_hash] = data request.responseHeaders.addRawHeader('X-OMS-Subscription-Hash', subscription_hash) def val(r): objs, unresolved_path = traverse_path(oms_root, r) if unresolved_path: return [(timestamp, dict(event='delete', name=os.path.basename(r), url=r))] return IStream(objs[-1]).events(after, limit=limit) # ONC wants it in ascending time order # while internally we prefer to keep it newest first to # speed up filtering. # Reversed is not json serializable so we have to reify to list. res = [list(reversed(val(resource))) for resource in data] res = [(i, v) for i, v in enumerate(res) if v] return [timestamp, dict(res)]
def test_modify_compute(self): computes = db.get_root()['oms_root']['computes'] cid = computes.add(self.make_compute()) transaction.commit() self._cmd('set computes/%s hostname=TUX-FOR-TEST' % cid) self.terminal.reset_mock() self._cmd('cat computes/%s' % cid) with assert_mock(self.terminal) as t: t.write('Host name: TUX-FOR-TEST\n') whatever(t) t.write('Architecture: x86_64, linux, centos\n') whatever(t) t.write('State: active\n') whatever(t) t.write('RAM Size: 2000\n') self.terminal.reset_mock() self._cmd('set computes/123') with assert_mock(self.terminal) as t: t.write("No such object: computes/123\n") self.terminal.reset_mock() self._cmd('set computes') with assert_mock(self.terminal) as t: t.write("No schema found for object\n")
def test_rm_compute(self): self._cmd('cat computes/1') with assert_mock(self.terminal) as t: t.write("No such object: computes/1\n") self.terminal.reset_mock() computes = db.get_root()['oms_root']['computes'] cid = computes.add(self.make_compute()) transaction.commit() self._cmd('cat computes/%s' % cid) with assert_mock(self.terminal) as t: t.write('Host name: tux-for-test\n') whatever(t) t.write('Architecture: x86_64, linux, centos\n') whatever(t) t.write('State: active\n') whatever(t) t.write('RAM Size: 2000\n') self._cmd('rm computes/%s' % cid) self.terminal.reset_mock() self._cmd('cat computes/%s' % cid) with assert_mock(self.terminal) as t: t.write("No such object: computes/%s\n" % cid) self.terminal.reset_mock() self._cmd('rm computes/%s' % cid) with assert_mock(self.terminal) as t: t.write("No such object: computes/%s\n" % cid)
def _items(self): # break an import cycle from opennode.oms.zodb import db machines = db.get_root()['oms_root']['machines'] templates = {} def allowed_classes_gen(item): from opennode.knot.model.compute import ICompute, IVirtualCompute from opennode.knot.model.machines import Machines from opennode.knot.model.virtualizationcontainer import IVirtualizationContainer yield isinstance(item, Machines) yield isinstance(item, Templates) yield IVirtualizationContainer.providedBy(item) yield ICompute.providedBy(item) yield IVirtualCompute.providedBy(item) def collect(container): seen = set() for item in container.listcontent(): if ITemplate.providedBy( item) and item.__name__ not in templates: templates[item.__name__] = Symlink(item.__name__, item) if any(allowed_classes_gen(item)): if item.__name__ not in seen: seen.add(item.__name__) collect(item) collect(machines) return templates
def get_computes(): oms_root = db.get_root()['oms_root'] res = [(i, i.hostname) for i in map(follow_symlinks, oms_root['computes'].listcontent()) if ICompute.providedBy(i)] return res
def _add(self, item): machines = db.get_root()['oms_root']['machines'] # TODO: fix adding computes to vms instead of hangar if not machines.hangar['vms']: pass return (machines.hangar if IVirtualCompute.providedBy(item) else machines).add(item)
def test_modify_compute_tags(self): computes = db.get_root()['oms_root']['computes'] cmpt = self.make_compute() cid = computes.add(cmpt) transaction.commit() self._cmd('set computes/%s tags=taga,tagb' % cid) self.terminal.reset_mock() self._cmd('cat computes/%s' % cid) with assert_mock(self.terminal) as t: whatever(t) t.write( 'Tags: arch:centos, arch:linux, arch:x86_64, label:taga, label:tagb, state:active, type:compute\n' ) self._cmd('set computes/%s tags=taga,-tagb' % cid) self.terminal.reset_mock() self._cmd('cat computes/%s' % cid) with assert_mock(self.terminal) as t: whatever(t) t.write( 'Tags: arch:centos, arch:linux, arch:x86_64, label:taga, state:active, type:compute\n' )
def get_credit(self, username): profile = db.get_root()['oms_root']['home'][username] if profile: return profile.credit else: log.warning('%s is not found among user profiles under /home!', username) return 0
def get_gatherers(): oms_root = db.get_root()["oms_root"] computes = filter( lambda c: c and ICompute.providedBy(c) and not c.failure, map(follow_symlinks, oms_root["computes"].listcontent()), ) gatherers = filter(None, (queryAdapter(c, IMetricsGatherer) for c in computes)) return gatherers
def test_modify_compute_errors(self): computes = db.get_root()['oms_root']['computes'] cid = computes.add(self.make_compute()) transaction.commit() self._cmd('set computes/%s hostname=x' % cid) with assert_mock(self.terminal) as t: t.write("hostname: Value is too short\n")
def test_complete_container_symlink(self): computes = db.get_root()['oms_root']['computes'] cid = computes.add(self.make_compute()) transaction.commit() self._tab_after('cd /computes/%s' % cid) with assert_mock(self.terminal) as t: t.write('/')
def get_compute_ips(): try: computes = map( follow_symlinks, filter( lambda c: ICompute.providedBy(follow_symlinks(c)), db.get_root()['oms_root']['computes'].listcontent())) pools = db.get_root()['oms_root']['ippools'] for c in computes: ip = IPAddress(c.ipv4_address.split('/')[0]) pool = pools.find_pool(ip) if pool is not None and not pool.get(ip): log.msg('Marking %s as used...' % ip, system='sync-ippool') pool.use(ip) except Exception: log.err(system='sync-ippool') raise
def get_computes(self, args): computes = db.get_root()['oms_root']['computes'] user_vms = [] for c in map(follow_symlinks, computes.listcontent()): if not IVirtualCompute.providedBy(c): continue if c.__owner__ == args.u: user_vms.append(c) return user_vms
def get_user_log(): eventlog = db.get_root()['oms_root']['eventlog'] if self.user.id not in eventlog.listnames(): return usereventlog = eventlog[self.user.id] for event in sorted(usereventlog.listcontent(), key=lambda event: event.timestamp, reverse=True): self.write('%s %s %s\n' % (event.timestamp, event.levelname, event.message))
def get_computes(self, username): computes = db.get_root()['oms_root']['computes'] user_computes = [] for compute in map(follow_symlinks, computes.listcontent()): if not IVirtualCompute.providedBy(compute): continue if compute.__owner__ == username and IDeployed.providedBy(compute): user_computes.append(compute) return user_computes
def test_context_dependent_help(self): computes = db.get_root()['oms_root']['computes'] cid = computes.add(self.make_compute()) transaction.commit() self.terminal.reset_mock() self._cmd('set computes/%s -h' % cid) with assert_mock(self.terminal) as t: assert 'hostname=' in current_call(t).arg
def render_GET(self, request): q = request.args.get('q', [''])[0] if not q: return super(SearchView, self).render_GET(request) search = db.get_root()['oms_root']['search'] res = SearchResult(search, q.decode('utf-8')) return IHttpRestView(res).render_GET(request)
def mv_and_inherit(): machines = db.get_root()['oms_root']['machines'] computes = db.get_root()['oms_root']['computes'] try: destination_compute = machines[destination.__name__] vm_compute = follow_symlinks( computes[self.context.__name__]) vm_compute.failure = destination_compute.failure vm_compute.suspicious = destination_compute.suspicious dvms = follow_symlinks(destination_compute['vms']) dvms.add(vm_compute) log.msg('Model moved.', system='migrate') except IndexError: log.msg( 'Model NOT moved: destination compute or vms do not exist', system='migrate') except KeyError: log.msg('Model NOT moved: already moved by sync?', system='migrate')
def flush(): eventlog = db.get_root()['oms_root']['eventlog'] try: while True: if self.queue.empty(): break record = self.queue.get_nowait() eventlog.add_event(record) except Empty: pass
def allocate_ip_address(): ippools = db.get_root()['oms_root']['ippools'] ip = ippools.allocate() if ip is not None: self._action_log(cmd, 'Allocated IP: %s for %s' % (ip, self.context), system='deploy') ulog = UserLogger(principal=cmd.protocol.interaction.participations[0].principal, subject=self.context, owner=self.context.__owner__) ulog.log('Allocated IP for %s: %s' % (self.context, ip)) return ip else: raise Exception('Could not allocate IP for the new compute: pools exhausted or undefined')
def finalize_vm(): ippools = db.get_root()['oms_root']['ippools'] ip = netaddr.IPAddress(self.context.ipv4_address.split('/')[0]) if ippools.free(ip): ulog = UserLogger(principal=cmd.protocol.interaction.participations[0].principal, subject=self.context, owner=self.context.__owner__) ulog.log('Deallocated IP: %s', ip) vm = traverse1(canonical_path(self.context)) if vm is not None: noLongerProvides(vm, IDeployed) alsoProvides(vm, IUndeployed)
def handle_request(self, request): """Takes a request, maps it to a domain object and a corresponding IHttpRestView and returns the rendered output of that view. """ principal = self.check_auth(request) oms_root = db.get_root()['oms_root'] objs, unresolved_path = traverse_path(oms_root, request.path[1:]) if not objs and unresolved_path: objs = [oms_root] obj = objs[-1] interaction = self.get_interaction(request, principal) request.interaction = interaction if self.use_security_proxy: obj = proxy_factory(obj, interaction) view = self.find_view(obj, unresolved_path, request) needs_rw_transaction = view.rw_transaction(request) # create a security proxy if we have a secured interaction if interaction: try: view = proxy_factory(view, interaction) except: # XXX: TODO: define a real exception for this proxy creation error # right now we want to ignore security when there are no declared rules # on how to secure a view pass def get_renderer(view, method): try: return getattr(view, method, None) except zope.security.interfaces.Unauthorized: raise Forbidden('User does not have permission to access this resource') for method in ('render_' + request.method, 'render_' + request.method.lower(), 'render'): renderer = get_renderer(view, method) if renderer: from opennode.oms.endpoint.httprest.auth import AuthView if isinstance(view, AuthView) and renderer.__name__ == 'render': res = renderer(request, self.use_keystone_tokens) else: res = renderer(request) return res if needs_rw_transaction else db.RollbackValue(res) raise NotImplementedError("Method %s is not implemented in %s\n" % (request.method, view))
def run(): dbroot = db.get_root() oms_root = dbroot['oms_root'] import sys if len(sys.argv) < 2: print "Usage: %s output_file_name" % sys.argv[0] else: setup_environ() output_file = sys.argv[1] build_chart(oms_root, output_file)
def get_matching_machines(container): all_machines = db.get_root()['oms_root']['machines'] param = unicode(get_config().getstring('allocate', 'diskspace_filter_param', default=u'/storage')) def condition_generator(m): yield ICompute.providedBy(m) yield find_compute_v12n_container(m, container) yield not getattr(m, 'exclude_from_allocation', None) if not get_config().getboolean('overcommit', 'memory', False): yield self.context.memory_usage < m.memory else: log.msg('Memory filtering is disabled.', system='action-allocate') if not get_config().getboolean('overcommit', 'disk', False): yield sum(map(lambda (pk, pv): pv, filter(lambda (pk, pv): pk != 'total', self.context.diskspace.iteritems()))) < (m.diskspace.get(param, 0) - m.diskspace_usage.get(param, 0)) else: log.msg('Diskspace filtering is disabled.', system='action-allocate') if not get_config().getboolean('overcommit', 'cores', False): yield self.context.num_cores <= m.num_cores else: log.msg('\'Total # of cores\' filtering is disabled.', system='action-allocate') templates = m['vms-%s' % container]['templates'] yield self.context.template in map(lambda t: t.name, filter(lambda t: ITemplate.providedBy(t), templates.listcontent() if templates else [])) def unwind_until_false(generator): fail_description = ['Not a compute', 'No virt container %s' % container, 'Excluded from allocation', 'Has less than %s MB memory' % self.context.memory_usage, 'Not enough diskspace', 'Not enough CPU cores', 'Template is unavailable'] try: for idx, r in enumerate(generator): if not r: return 'Fail at %d: %s' % (idx, fail_description[idx]) return 'Match' except Exception as e: log.err(system='action-allocate') return 'Fail (exception)' % (fail_description, e) results = map(lambda m: (str(m), unwind_until_false(condition_generator(m))), all_machines) log.msg('Searching in: %s' % (results), logLevel=DEBUG, system='action-allocate') return filter(lambda m: all(condition_generator(m)), all_machines)
def _process(self): log.msg("indexing a batch of objects", system="indexer") searcher = db.get_root()['oms_root']['search'] def currently_queued(): while self.queue: yield self.queue.popleft() for model, event in currently_queued(): self.index(searcher, model, event) log.msg("done", system="indexer")
def get_users_with_vms_to_update(): home = db.get_root()['oms_root']['home'] update_list = [] for profile in home.listcontent(): timeout = ((datetime(*map(int, re.split('[^\d]', profile.vm_stats_timestamp)[:-1])) + timedelta(seconds=credit_check_cooldown)) if profile.vm_stats_timestamp else datetime.min) if timeout < datetime.now(): update_list.append(profile.name) return update_list
def run(): dbroot = db.get_root() oms_root = dbroot["oms_root"] import sys if len(sys.argv) < 2: print "Usage: %s output_file_name" % sys.argv[0] else: setup_environ() output_file = sys.argv[1] build_chart(oms_root, output_file)
def ensure_hangar_v12ncontainers(): machines = db.get_root()['oms_root']['machines'] backends = set() for machine in machines.listcontent(): for o in machine.listcontent(): if IVirtualizationContainer.providedBy(o): backends.add(o.backend) hangar = machines['hangar'] for backend in map(unicode, backends): if backend not in map(lambda vms: vms.backend, hangar.listcontent()): vms = VirtualizationContainer(backend) hangar.add(vms)
def get_users_with_vms_to_update(): home = db.get_root()['oms_root']['home'] update_list = [] for profile in home.listcontent(): timeout = ((datetime( *map(int, re.split('[^\d]', profile.vm_stats_timestamp)[:-1])) + timedelta(seconds=credit_check_cooldown)) if profile.vm_stats_timestamp else datetime.min) if timeout < datetime.now(): update_list.append(profile.name) return update_list
def get_computes(self, username): computes = db.get_root()['oms_root']['computes'] user_computes = [] for compute in map(follow_symlinks, computes.listcontent()): if not IVirtualCompute.providedBy(compute): continue if not compute.license_activated: continue if compute.__owner__ == username and IDeployed.providedBy(compute): user_computes.append(compute) return user_computes
def get_users(): home = db.get_root()['oms_root']['home'] auth = getUtility(IAuthentication) for pname, pobj in auth.principals.iteritems(): if type(pobj) is User: if pobj.id not in home.listnames(): up = UserProfile(pobj.id, pobj.groups, uid=pobj.uid) log.msg('Adding %s to /home' % (up)) home.add(up) else: if pobj.uid != home[pobj.id].uid: home[pobj.id].uid = pobj.uid if pobj.groups != home[pobj.id].groups: home[pobj.id].groups = pobj.groups
def test_rename_compute(self): computes = db.get_root()['oms_root']['computes'] compute = self.make_compute() cid = computes.add(compute) transaction.commit() self._cmd('mv /machines/%s /machines/123' % cid) eq_(compute.__name__, '123') self.terminal.reset_mock() self._cmd('cat /machines/123') with assert_mock(self.terminal) as t: t.write('Host name: tux-for-test\n')
def test_complete_keyword_switches(self): computes = db.get_root()['oms_root']['computes'] cid = computes.add(self.make_compute()) transaction.commit() self._tab_after('set /computes/%s st' % cid) with assert_mock(self.terminal) as t: t.write('ate=') no_more_calls(t) self._tab_after('ina') with assert_mock(self.terminal) as t: t.write('ctive ') no_more_calls(t)
def test_modify_compute_verbose(self): computes = db.get_root()['oms_root']['computes'] cid = computes.add(self.make_compute()) transaction.commit() self._cmd('set computes/%s hostname=TUX-FOR-TEST -v' % cid) with assert_mock(self.terminal) as t: t.write("Setting hostname=TUX-FOR-TEST\n") self.terminal.reset_mock() self._cmd('cat computes/%s' % cid) with assert_mock(self.terminal) as t: t.write('Host name: TUX-FOR-TEST\n')
def test_cd_errors(self): computes = db.get_root()['oms_root']['computes'] computes.add(self.make_compute()) # TODO: reenable this when we'll have another leaf object. #self._cmd('cd /computes/%s' % cid) #with assert_mock(self.terminal) as t: # t.write('Cannot cd to a non-container\n') #self.terminal.reset_mock() self._cmd('cd /nonexisting') with assert_mock(self.terminal) as t: t.write('No such object: /nonexisting\n')
def doit(): search = db.get_root()['oms_root']['search'] search.clear() objs = set() def collect(container): for item in container.listcontent(): # HACK, handle non indexable stuff: if IContainer.providedBy(item) and not isinstance(item, Container): continue if IModel.providedBy(item) and not isinstance(item, Symlink): objs.add(item) if IContainer.providedBy(item): collect(item) collect(db.get_root()['oms_root']) for obj in objs: search.index_object(obj) cmd.write("reindexed %s objects\n" % (len(objs)))
def traverse1(path): """Provides a shortcut for absolute path traversals without needing to pass in the root object. """ # Do it here just in case; to avoid circular imports: from opennode.oms.zodb import db oms_root = db.get_root()['oms_root'] objs, untraversed_path = traverse_path(oms_root, path) if objs and not untraversed_path: return objs[-1] else: return None