def list_networks(self, persist=True): """Return list of networks for cloud A list of networks is fetched from libcloud, data is processed, stored on network models, and a list of network models is returned. Subclasses SHOULD NOT override or extend this method. This method wraps `_list_networks` which contains the core implementation. """ task_key = 'cloud:list_networks:%s' % self.cloud.id task = PeriodicTaskInfo.get_or_add(task_key) first_run = False if task.last_success else True async def _list_subnets_async(networks): loop = asyncio.get_event_loop() subnets = [ loop.run_in_executor(None, network.ctl.list_subnets) for network in networks ] return await asyncio.gather(*subnets) with task.task_runner(persist=persist): # Get cached networks as dict cached_networks = { '%s-%s' % (n.id, n.network_id): n.as_dict() for n in self.list_cached_networks() } networks = self._list_networks() loop = asyncio.get_event_loop() loop.run_until_complete(_list_subnets_async(networks)) # Publish patches to rabbitmq. new_networks = { '%s-%s' % (n.id, n.network_id): n.as_dict() for n in networks } # Exclude last seen and probe field if cached_networks or new_networks: # Publish patches to rabbitmq. patch = jsonpatch.JsonPatch.from_diff(cached_networks, new_networks).patch if patch: if not first_run and self.cloud.observation_logs_enabled: from mist.api.logs.methods import log_observations log_observations(self.cloud.owner.id, self.cloud.id, 'network', patch, cached_networks, new_networks) if amqp_owner_listening(self.cloud.owner.id): amqp_publish_user(self.cloud.owner.id, routing_key='patch_networks', data={ 'cloud_id': self.cloud.id, 'patch': patch }) return networks
def list_zones(self, persist=True): """Return list of zones for cloud A list of zones is fetched from libcloud, data is processed, stored on zone models, and a list of zone models is returned. Subclasses SHOULD NOT override or extend this method. This method wraps `_list_zones` which contains the core implementation. """ task_key = 'cloud:list_zones:%s' % self.cloud.id task = PeriodicTaskInfo.get_or_add(task_key) first_run = False if task.last_success else True with task.task_runner(persist=persist): cached_zones = { '%s-%s' % (z.id, z.zone_id): z.as_dict() for z in self.list_cached_zones() } zones = self._list_zones() for zone in zones: self.list_records(zone) # Initialize AMQP connection to reuse for multiple messages. if amqp_owner_listening(self.cloud.owner.id): zones_dict = [z.as_dict() for z in zones] if cached_zones or zones_dict: # Publish patches to rabbitmq. new_zones = { '%s-%s' % (z['id'], z['zone_id']): z for z in zones_dict } patch = jsonpatch.JsonPatch.from_diff(cached_zones, new_zones).patch if patch: if not first_run and self.cloud.observation_logs_enabled: from mist.api.logs.methods import log_observations log_observations(self.cloud.owner.id, self.cloud.id, 'zone', patch, cached_zones, new_zones) amqp_publish_user(self.cloud.owner.id, routing_key='patch_zones', data={ 'cloud_id': self.cloud.id, 'patch': patch }) return zones
def list_volumes(self, persist=True): """Return list of volumes for cloud A list of volumes is fetched from libcloud, data is processed, stored on volume models, and a list of volume models is returned. Subclasses SHOULD NOT override or extend this method. This method wraps `_list_volumes` which contains the core implementation. """ task_key = 'cloud:list_volumes:%s' % self.cloud.id task = PeriodicTaskInfo.get_or_add(task_key) first_run = False if task.last_success else True with task.task_runner(persist=persist): cached_volumes = { '%s-%s' % (v.id, v.external_id): v.as_dict() for v in self.list_cached_volumes() } volumes = self._list_volumes() volumes_dict = [v.as_dict() for v in volumes] if cached_volumes or volumes: # Publish patches to rabbitmq. new_volumes = { '%s-%s' % (v['id'], v['external_id']): v for v in volumes_dict } patch = jsonpatch.JsonPatch.from_diff(cached_volumes, new_volumes).patch if patch: if not first_run and self.cloud.observation_logs_enabled: from mist.api.logs.methods import log_observations log_observations(self.cloud.owner.id, self.cloud.id, 'volume', patch, cached_volumes, new_volumes) if amqp_owner_listening(self.cloud.owner.id): amqp_publish_user(self.cloud.owner.id, routing_key='patch_volumes', data={ 'cloud_id': self.cloud.id, 'patch': patch }) return volumes