def list_cloud_machines(request): """ Tags: machines --- Lists machines on cloud along with their metadata. Check Permissions takes place in filter_list_machines. READ permission required on cloud. READ permission required on machine. --- cloud: in: path required: true type: string """ auth_context = auth_context_from_request(request) cloud_id = request.matchdict['cloud'] params = params_from_request(request) cached = bool(params.get('cached', False)) # SEC get filtered resources based on auth_context try: Cloud.objects.get(owner=auth_context.owner, id=cloud_id, deleted=None) except Cloud.DoesNotExist: raise NotFoundError('Cloud does not exist') machines = methods.filter_list_machines(auth_context, cloud_id, cached=cached) return machines
def list_machines(request): """ Tags: machines --- Gets machines and their metadata from all clouds. Check Permissions take place in filter_list_machines. READ permission required on cloud. READ permission required on location. READ permission required on machine. """ auth_context = auth_context_from_request(request) params = params_from_request(request) cached = not params.get('fresh', False) # return cached by default # to prevent iterate throw every cloud auth_context.check_perm("cloud", "read", None) clouds = filter_list_clouds(auth_context) machines = [] for cloud in clouds: if cloud.get('enabled'): try: cloud_machines = methods.filter_list_machines(auth_context, cloud.get('id'), cached=cached) machines.extend(cloud_machines) except (CloudUnavailableError, CloudUnauthorizedError): pass return machines
def list_machines(request): """ List machines on cloud Gets machines and their metadata from a cloud Check Permissions take place in filter_list_machines READ permission required on cloud. READ permission required on machine. --- cloud: in: path required: true type: string """ auth_context = auth_context_from_request(request) cloud_id = request.matchdict['cloud'] # SEC get filtered resources based on auth_context try: cloud = Cloud.objects.get(owner=auth_context.owner, id=cloud_id, deleted=None) except Cloud.DoesNotExist: raise NotFoundError('Cloud does not exist') machines = methods.filter_list_machines(auth_context, cloud_id) if cloud.machine_count != len(machines): try: tasks.update_machine_count.delay(auth_context.owner.id, cloud_id, len(machines)) except Exception as e: log.error('Cannot update machine count for user %s: %r' % (auth_context.owner.id, e)) return machines
def find_metrics_by_resource_type(auth_context, resource_type, tags): from mist.api.clouds.methods import filter_list_clouds from mist.api.machines.methods import filter_list_machines resources = [] if resource_type == "machine": clouds = filter_list_clouds(auth_context, as_dict=False) for cloud in clouds: try: resources += filter_list_machines(auth_context, cloud.id, cached=True, as_dict=False) except Cloud.DoesNotExist: log.error("Cloud with id=%s does not exist" % cloud.id) else: resources = filter_list_resources(resource_type, auth_context, as_dict=False) if tags and resources: resources = filter_resources_by_tags(resources, tags) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) metrics = loop.run_until_complete(async_find_metrics(resources)) loop.close() return metrics
def list_clouds(self): if config.ACTIVATE_POLLER: self.update_poller() self.send('list_clouds', filter_list_clouds(self.auth_context)) clouds = Cloud.objects(owner=self.owner, enabled=True, deleted=None) log.info(clouds) periodic_tasks = [] if not config.ACTIVATE_POLLER: periodic_tasks.append(('list_machines', tasks.ListMachines())) else: for cloud in clouds: after = datetime.datetime.utcnow() - datetime.timedelta(days=1) machines = Machine.objects(cloud=cloud, missing_since=None, last_seen__gt=after) machines = filter_list_machines( self.auth_context, cloud_id=cloud.id, machines=[machine.as_dict() for machine in machines] ) if machines: log.info("Emitting list_machines from poller's cache.") self.send('list_machines', {'cloud_id': cloud.id, 'machines': machines}) periodic_tasks.extend([('list_images', tasks.ListImages()), ('list_sizes', tasks.ListSizes()), ('list_networks', tasks.ListNetworks()), ('list_zones', tasks.ListZones()), ('list_locations', tasks.ListLocations()), ('list_projects', tasks.ListProjects())]) for key, task in periodic_tasks: for cloud in clouds: cached = task.smart_delay(self.owner.id, cloud.id) if cached is not None: log.info("Emitting %s from cache", key) if key == 'list_machines': cached['machines'] = filter_list_machines( self.auth_context, **cached ) if cached['machines'] is None: continue self.send(key, cached)
def find_metrics_by_resource_id(auth_context, resource_id, resource_type): from mist.api.machines.methods import filter_list_machines resource_types = ['cloud', 'machine'] if resource_type: resource_types = [resource_type] else: # If we have an id which corresponds to a cloud but no resource # type we return all the metrics of all resources of that cloud metrics = {} try: # SEC require permission READ on resource auth_context.check_perm("cloud", "read", resource_id) resource_objs = list_resources_by_id("cloud", resource_id, as_dict=False) if resource_objs: machines = [ machine for machine in filter_list_machines( auth_context, resource_id, as_dict=False) ] for machine in machines: metrics.update(find_metrics(machine)) return metrics except Cloud.DoesNotExist: pass for resource_type in resource_types: try: # SEC require permission READ on resource auth_context.check_perm(resource_type, "read", resource_id) resource_objs = list_resources_by_id(resource_type, resource_id, as_dict=False) if resource_objs: return find_metrics(resource_objs[0]) except NotFoundError: pass except PolicyUnauthorizedError: pass raise NotFoundError("resource with id:%s" % resource_id)
def machine_actions(request): """ Tags: machines --- Calls a machine action on cloud that supports it. READ permission required on cloud. ACTION permission required on machine(ACTION can be START, STOP, DESTROY, REBOOT or RESIZE, RENAME for some providers). --- machine_uuid: in: path required: true type: string action: enum: - start - stop - reboot - destroy - resize - rename - create_snapshot - remove_snapshot - revert_to_snapshot required: true type: string name: description: The new name of the renamed machine type: string size: description: The size id of the plan to resize type: string snapshot_name: description: The name of the snapshot to create/remove/revert_to snapshot_description: description: The description of the snapshot to create snapshot_dump_memory: description: Dump the machine's memory in the snapshot default: false snapshot_quiesce: description: Enable guest file system quiescing default: false """ cloud_id = request.matchdict.get('cloud') params = params_from_request(request) action = params.get('action', '') name = params.get('name', '') size_id = params.get('size', '') memory = params.get('memory', '') cpus = params.get('cpus', '') cpu_shares = params.get('cpu_shares', '') cpu_units = params.get('cpu_units', '') snapshot_name = params.get('snapshot_name') snapshot_description = params.get('snapshot_description') snapshot_dump_memory = params.get('snapshot_dump_memory') snapshot_quiesce = params.get('snapshot_quiesce') auth_context = auth_context_from_request(request) if cloud_id: machine_id = request.matchdict['machine'] auth_context.check_perm("cloud", "read", cloud_id) try: machine = Machine.objects.get(cloud=cloud_id, machine_id=machine_id, state__ne='terminated') # used by logging_view_decorator request.environ['machine_uuid'] = machine.id except Machine.DoesNotExist: raise NotFoundError("Machine %s doesn't exist" % machine_id) else: machine_uuid = request.matchdict['machine_uuid'] try: machine = Machine.objects.get(id=machine_uuid) # VMs in libvirt can be started no matter if they are terminated if machine.state == 'terminated' and not isinstance( machine.cloud, LibvirtCloud): raise NotFoundError("Machine %s has been terminated" % machine_uuid) # used by logging_view_decorator request.environ['machine_id'] = machine.machine_id request.environ['cloud_id'] = machine.cloud.id except Machine.DoesNotExist: raise NotFoundError("Machine %s doesn't exist" % machine_uuid) cloud_id = machine.cloud.id auth_context.check_perm("cloud", "read", cloud_id) if machine.cloud.owner != auth_context.owner: raise NotFoundError("Machine %s doesn't exist" % machine.id) auth_context.check_perm("machine", action, machine.id) actions = ('start', 'stop', 'reboot', 'destroy', 'resize', 'rename', 'undefine', 'suspend', 'resume', 'remove', 'list_snapshots', 'create_snapshot', 'remove_snapshot', 'revert_to_snapshot', 'clone') if action not in actions: raise BadRequestError("Action '%s' should be " "one of %s" % (action, actions)) if not methods.run_pre_action_hooks(machine, action, auth_context.user): return OK # webhook requires stopping action propagation if action == 'destroy': result = methods.destroy_machine(auth_context.owner, cloud_id, machine.machine_id) elif action == 'remove': log.info('Removing machine %s in cloud %s' % (machine.machine_id, cloud_id)) # if machine has monitoring, disable it if machine.monitoring.hasmonitoring: try: disable_monitoring(auth_context.owner, cloud_id, machine_id, no_ssh=True) except Exception as exc: log.warning("Didn't manage to disable monitoring, maybe the " "machine never had monitoring enabled. Error: %r" % exc) result = machine.ctl.remove() # Schedule a UI update trigger_session_update(auth_context.owner, ['clouds']) elif action in ('start', 'stop', 'reboot', 'clone', 'undefine', 'suspend', 'resume'): result = getattr(machine.ctl, action)() elif action == 'rename': if not name: raise BadRequestError("You must give a name!") result = getattr(machine.ctl, action)(name) elif action == 'resize': _, constraints = auth_context.check_perm("machine", "resize", machine.id) # check cost constraint cost_constraint = constraints.get('cost', {}) if cost_constraint: try: from mist.rbac.methods import check_cost check_cost(auth_context.org, cost_constraint) except ImportError: pass kwargs = {} if memory: kwargs['memory'] = memory if cpus: kwargs['cpus'] = cpus if cpu_shares: kwargs['cpu_shares'] = cpu_shares if cpu_units: kwargs['cpu_units'] = cpu_units result = getattr(machine.ctl, action)(size_id, kwargs) elif action == 'list_snapshots': return machine.ctl.list_snapshots() elif action in ('create_snapshot', 'remove_snapshot', 'revert_to_snapshot'): kwargs = {} if snapshot_description: kwargs['description'] = snapshot_description if snapshot_dump_memory: kwargs['dump_memory'] = bool(snapshot_dump_memory) if snapshot_quiesce: kwargs['quiesce'] = bool(snapshot_quiesce) result = getattr(machine.ctl, action)(snapshot_name, **kwargs) methods.run_post_action_hooks(machine, action, auth_context.user, result) # TODO: We shouldn't return list_machines, just OK. Save the API! return methods.filter_list_machines(auth_context, cloud_id)
def process_update(self, ch, method, properties, body): routing_key = method.routing_key try: result = json.loads(body) except: result = body log.info("Got %s", routing_key) if routing_key in set([ 'notify', 'probe', 'list_sizes', 'list_images', 'list_networks', 'list_machines', 'list_zones', 'list_locations', 'list_projects', 'ping', 'list_resource_groups', 'list_storage_accounts' ]): if routing_key == 'list_machines': # probe newly discovered running machines machines = result['machines'] cloud_id = result['cloud_id'] filtered_machines = filter_list_machines( self.auth_context, cloud_id, machines) if filtered_machines is not None: self.send(routing_key, { 'cloud_id': cloud_id, 'machines': filtered_machines }) # update cloud machine count in multi-user setups cloud = Cloud.objects.get(owner=self.owner, id=cloud_id, deleted=None) for machine in machines: bmid = (cloud_id, machine['machine_id']) if bmid in self.running_machines: # machine was running if machine['state'] != 'running': # machine no longer running self.running_machines.remove(bmid) continue if machine['state'] != 'running': # machine not running continue # machine just started running self.running_machines.add(bmid) ips = filter(lambda ip: ':' not in ip, machine.get('public_ips', [])) if not ips: # if not public IPs, search for private IPs, otherwise # continue iterating over the list of machines ips = filter(lambda ip: ':' not in ip, machine.get('private_ips', [])) if not ips: continue elif routing_key == 'list_zones': zones = result['zones'] cloud_id = result['cloud_id'] filtered_zones = filter_list_zones(self.auth_context, cloud_id, zones) self.send(routing_key, filtered_zones) elif routing_key == 'list_networks': networks = result['networks'] cloud_id = result['cloud_id'] filtered_networks = filter_list_networks( self.auth_context, cloud_id, networks) self.send(routing_key, { 'cloud_id': cloud_id, 'networks': filtered_networks }) else: self.send(routing_key, result) elif routing_key == 'update': self.owner.reload() sections = result if 'clouds' in sections: self.list_clouds() if 'keys' in sections: self.list_keys() if 'scripts' in sections: self.list_scripts() if 'schedules' in sections: self.list_schedules() if 'zones' in sections: task = tasks.ListZones() clouds = Cloud.objects(owner=self.owner, enabled=True, deleted=None) for cloud in clouds: if cloud.dns_enabled: task.smart_delay(self.owner.id, cloud.id) if 'templates' in sections: self.list_templates() if 'stacks' in sections: self.list_stacks() if 'tunnels' in sections: self.list_tunnels() if 'notifications' in sections: self.update_notifications() if 'monitoring' in sections: self.check_monitoring() if 'user' in sections: self.auth_context.user.reload() self.update_user() if 'org' in sections: self.auth_context.org.reload() self.update_org() elif routing_key == 'patch_notifications': if result.get('user') == self.user.id: self.send('patch_notifications', result) elif routing_key == 'patch_machines': cloud_id = result['cloud_id'] patch = result['patch'] machine_ids = [] for line in patch: machine_id, line['path'] = line['path'][1:].split('-', 1) machine_ids.append(machine_id) if not self.auth_context.is_owner(): allowed_machine_ids = filter_machine_ids( self.auth_context, cloud_id, machine_ids) else: allowed_machine_ids = machine_ids patch = [ line for line, m_id in zip(patch, machine_ids) if m_id in allowed_machine_ids ] for line in patch: line['path'] = '/clouds/%s/machines/%s' % (cloud_id, line['path']) if patch: self.batch.extend(patch) elif routing_key in [ 'patch_locations', 'patch_sizes', 'patch_networks' ]: cloud_id = result['cloud_id'] patch = result['patch'] for line in patch: _id = line['path'][1:] if routing_key == 'patch_locations': line['path'] = '/clouds/%s/locations/%s' % (cloud_id, _id) elif routing_key == 'patch_sizes': line['path'] = '/clouds/%s/sizes/%s' % (cloud_id, _id) elif routing_key == 'patch_networks': line['path'] = '/clouds/%s/networks/%s' % (cloud_id, _id) if patch: self.batch.extend(patch)
def list_clouds(self): self.update_poller() self.send('list_clouds', filter_list_clouds(self.auth_context)) clouds = Cloud.objects(owner=self.owner, enabled=True, deleted=None) periodic_tasks = [] for cloud in clouds: self.internal_request( 'api/v1/clouds/%s/machines' % cloud.id, params={'cached': True}, callback=lambda machines, cloud_id=cloud.id: self.send( 'list_machines', { 'cloud_id': cloud_id, 'machines': machines }), ) self.internal_request( 'api/v1/clouds/%s/locations' % cloud.id, params={'cached': True}, callback=lambda locations, cloud_id=cloud.id: self.send( 'list_locations', { 'cloud_id': cloud_id, 'locations': locations }), ) self.internal_request( 'api/v1/clouds/%s/sizes' % cloud.id, params={'cached': True}, callback=lambda sizes, cloud_id=cloud.id: self.send( 'list_sizes', { 'cloud_id': cloud_id, 'sizes': sizes }), ) self.internal_request( 'api/v1/clouds/%s/networks' % cloud.id, params={'cached': True}, callback=lambda networks, cloud_id=cloud.id: self.send( 'list_networks', { 'cloud_id': cloud_id, 'networks': networks }), ) periodic_tasks.extend([ ('list_images', tasks.ListImages()), ('list_zones', tasks.ListZones()), ('list_resource_groups', tasks.ListResourceGroups()), ('list_storage_accounts', tasks.ListStorageAccounts()), ('list_projects', tasks.ListProjects()) ]) for key, task in periodic_tasks: for cloud in clouds: # Avoid submitting new celery tasks, when it's certain that # they will exit immediately without performing any actions. if not maybe_submit_cloud_task(cloud, key): continue cached = task.smart_delay(self.owner.id, cloud.id) if cached is not None: log.info("Emitting %s from cache", key) if key == 'list_machines': cached['machines'] = filter_list_machines( self.auth_context, **cached) if cached['machines'] is None: continue elif key == 'list_zones': cached = filter_list_zones(self.auth_context, cloud.id, cached['zones']) if cached is None: continue elif key == 'list_networks': cached['networks'] = filter_list_networks( self.auth_context, **cached) if not (cached['networks']['public'] or cached['networks']['private']): continue self.send(key, cached)
def machine_actions(request): """ Call an action on machine Calls a machine action on cloud that support it READ permission required on cloud. ACTION permission required on machine(ACTION can be START, STOP, DESTROY, REBOOT). --- machine_uuid: in: path required: true type: string action: enum: - start - stop - reboot - destroy - resize - rename required: true type: string name: description: The new name of the renamed machine type: string size: description: The size id of the plan to resize type: string """ cloud_id = request.matchdict.get('cloud') params = params_from_request(request) action = params.get('action', '') plan_id = params.get('plan_id', '') memory = params.get('memory', '') cpus = params.get('cpus', '') cpu_shares = params.get('cpu_shares', '') cpu_units = params.get('cpu_units', '') name = params.get('name', '') auth_context = auth_context_from_request(request) if cloud_id: # this is depracated, keep it for backwards compatibility machine_id = request.matchdict['machine'] auth_context.check_perm("cloud", "read", cloud_id) try: machine = Machine.objects.get(cloud=cloud_id, machine_id=machine_id, state__ne='terminated') # used by logging_view_decorator request.environ['machine_uuid'] = machine.id except Machine.DoesNotExist: raise NotFoundError("Machine %s doesn't exist" % machine_id) else: machine_uuid = request.matchdict['machine_uuid'] try: machine = Machine.objects.get(id=machine_uuid) # VMs in libvirt can be started no matter if they are terminated if machine.state == 'terminated' and not isinstance( machine.cloud, LibvirtCloud): raise # used by logging_view_decorator request.environ['machine_id'] = machine.machine_id request.environ['cloud_id'] = machine.cloud.id except Machine.DoesNotExist: raise NotFoundError("Machine %s doesn't exist" % machine_uuid) cloud_id = machine.cloud.id auth_context.check_perm("cloud", "read", cloud_id) if machine.cloud.owner != auth_context.owner: raise NotFoundError("Machine %s doesn't exist" % machine.id) auth_context.check_perm("machine", action, machine.id) actions = ('start', 'stop', 'reboot', 'destroy', 'resize', 'rename', 'undefine', 'suspend', 'resume') if action not in actions: raise BadRequestError("Action '%s' should be " "one of %s" % (action, actions)) if action == 'destroy': methods.destroy_machine(auth_context.owner, cloud_id, machine.machine_id) elif action in ('start', 'stop', 'reboot', 'undefine', 'suspend', 'resume'): getattr(machine.ctl, action)() elif action == 'rename': if not name: raise BadRequestError("You must give a name!") getattr(machine.ctl, action)(name) elif action == 'resize': kwargs = {} if memory: kwargs['memory'] = memory if cpus: kwargs['cpus'] = cpus if cpu_shares: kwargs['cpu_shares'] = cpu_shares if cpu_units: kwargs['cpu_units'] = cpu_units getattr(machine.ctl, action)(plan_id, kwargs) # TODO: We shouldn't return list_machines, just OK. Save the API! return methods.filter_list_machines(auth_context, cloud_id)
def machine_actions(request): """ Tags: machines --- Calls a machine action on cloud that supports it. READ permission required on cloud. ACTION permission required on machine(ACTION can be START, STOP, DESTROY, REBOOT or RESIZE, RENAME for some providers). --- machine_uuid: in: path required: true type: string action: enum: - start - stop - reboot - destroy - resize - rename required: true type: string name: description: The new name of the renamed machine type: string size: description: The size id of the plan to resize type: string """ cloud_id = request.matchdict.get('cloud') params = params_from_request(request) action = params.get('action', '') size_id = params.get('size', params.get('plan_id', '')) memory = params.get('memory', '') cpus = params.get('cpus', '') cpu_shares = params.get('cpu_shares', '') cpu_units = params.get('cpu_units', '') name = params.get('name', '') auth_context = auth_context_from_request(request) if cloud_id: # this is depracated, keep it for backwards compatibility machine_id = request.matchdict['machine'] auth_context.check_perm("cloud", "read", cloud_id) try: machine = Machine.objects.get(cloud=cloud_id, machine_id=machine_id, state__ne='terminated') # used by logging_view_decorator request.environ['machine_uuid'] = machine.id except Machine.DoesNotExist: raise NotFoundError("Machine %s doesn't exist" % machine_id) else: machine_uuid = request.matchdict['machine_uuid'] try: machine = Machine.objects.get(id=machine_uuid) # VMs in libvirt can be started no matter if they are terminated if machine.state == 'terminated' and not isinstance( machine.cloud, LibvirtCloud): raise NotFoundError("Machine %s has been terminated" % machine_uuid) # used by logging_view_decorator request.environ['machine_id'] = machine.machine_id request.environ['cloud_id'] = machine.cloud.id except Machine.DoesNotExist: raise NotFoundError("Machine %s doesn't exist" % machine_uuid) cloud_id = machine.cloud.id auth_context.check_perm("cloud", "read", cloud_id) if machine.cloud.owner != auth_context.owner: raise NotFoundError("Machine %s doesn't exist" % machine.id) auth_context.check_perm("machine", action, machine.id) actions = ('start', 'stop', 'reboot', 'destroy', 'resize', 'rename', 'undefine', 'suspend', 'resume', 'remove') if action not in actions: raise BadRequestError("Action '%s' should be " "one of %s" % (action, actions)) if action == 'destroy': methods.destroy_machine(auth_context.owner, cloud_id, machine.machine_id) elif action == 'remove': log.info('Removing machine %s in cloud %s' % (machine.machine_id, cloud_id)) if not machine.monitoring.hasmonitoring: machine.ctl.remove() # Schedule a UI update trigger_session_update(auth_context.owner, ['clouds']) return # if machine has monitoring, disable it. the way we disable depends on # whether this is a standalone io installation or not try: disable_monitoring(auth_context.owner, cloud_id, machine_id, no_ssh=True) except Exception as exc: log.warning( "Didn't manage to disable monitoring, maybe the " "machine never had monitoring enabled. Error: %r", exc) machine.ctl.remove() # Schedule a UI update trigger_session_update(auth_context.owner, ['clouds']) elif action in ('start', 'stop', 'reboot', 'undefine', 'suspend', 'resume'): getattr(machine.ctl, action)() elif action == 'rename': if not name: raise BadRequestError("You must give a name!") getattr(machine.ctl, action)(name) elif action == 'resize': kwargs = {} if memory: kwargs['memory'] = memory if cpus: kwargs['cpus'] = cpus if cpu_shares: kwargs['cpu_shares'] = cpu_shares if cpu_units: kwargs['cpu_units'] = cpu_units getattr(machine.ctl, action)(size_id, kwargs) # TODO: We shouldn't return list_machines, just OK. Save the API! return methods.filter_list_machines(auth_context, cloud_id)
def process_update(self, ch, method, properties, body): routing_key = method.routing_key try: result = json.loads(body) except: result = body log.info("Got %s", routing_key) if routing_key in set(['notify', 'probe', 'list_sizes', 'list_images', 'list_networks', 'list_machines', 'list_zones', 'list_locations', 'list_projects', 'ping']): if routing_key == 'list_machines': # probe newly discovered running machines machines = result['machines'] cloud_id = result['cloud_id'] filtered_machines = filter_list_machines( self.auth_context, cloud_id, machines ) if filtered_machines is not None: self.send(routing_key, {'cloud_id': cloud_id, 'machines': filtered_machines}) # update cloud machine count in multi-user setups cloud = Cloud.objects.get(owner=self.owner, id=cloud_id, deleted=None) for machine in machines: bmid = (cloud_id, machine['machine_id']) if bmid in self.running_machines: # machine was running if machine['state'] != 'running': # machine no longer running self.running_machines.remove(bmid) continue if machine['state'] != 'running': # machine not running continue # machine just started running self.running_machines.add(bmid) ips = filter(lambda ip: ':' not in ip, machine.get('public_ips', [])) if not ips: # if not public IPs, search for private IPs, otherwise # continue iterating over the list of machines ips = filter(lambda ip: ':' not in ip, machine.get('private_ips', [])) if not ips: continue machine_obj = Machine.objects( cloud=cloud, machine_id=machine['machine_id'], key_associations__not__size=0 ).first() if machine_obj: cached = tasks.ProbeSSH().smart_delay( self.owner.id, cloud_id, machine['machine_id'], ips[0], machine['id'] ) if cached is not None: self.send('probe', cached) cached = tasks.Ping().smart_delay( self.owner.id, cloud_id, machine['machine_id'], ips[0] ) if cached is not None: self.send('ping', cached) else: self.send(routing_key, result) elif routing_key == 'update': self.owner.reload() sections = result if 'clouds' in sections: self.list_clouds() if 'keys' in sections: self.list_keys() if 'scripts' in sections: self.list_scripts() if 'schedules' in sections: self.list_schedules() if 'zones' in sections: task = tasks.ListZones() clouds = Cloud.objects(owner=self.owner, enabled=True, deleted=None) for cloud in clouds: if cloud.dns_enabled: task.delay(self.owner.id, cloud.id) if 'templates' in sections: self.list_templates() if 'stacks' in sections: self.list_stacks() if 'tags' in sections: self.list_tags() if 'tunnels' in sections: self.list_tunnels() if 'monitoring' in sections: self.check_monitoring() if 'user' in sections: self.auth_context.user.reload() self.update_user() if 'org' in sections: self.auth_context.org.reload() self.update_org()