Пример #1
0
def notify_user(owner, title, message="", email_notify=True, **kwargs):
    # Notify connected owner via amqp
    payload = {'title': title, 'message': message}
    payload.update(kwargs)
    if 'command' in kwargs:
        output = '%s\n' % kwargs['command']
        if 'output' in kwargs:
            output += '%s\n' % kwargs['output'].decode('utf-8', 'ignore')
        if 'retval' in kwargs:
            output += 'returned with exit code %s.\n' % kwargs['retval']
        payload['output'] = output
    amqp_publish_user(owner, routing_key='notify', data=payload)

    body = message + '\n' if message else ''
    if 'cloud_id' in kwargs:
        cloud_id = kwargs['cloud_id']
        body += "Cloud:\n"
        try:
            cloud = Cloud.objects.get(owner=owner, id=cloud_id, deleted=None)
            cloud_title = cloud.title
        except DoesNotExist:
            cloud_title = ''
            cloud = ''
        if cloud_title:
            body += "  Name: %s\n" % cloud_title
        body += "  Id: %s\n" % cloud_id
        if 'machine_id' in kwargs:
            machine_id = kwargs['machine_id']
            body += "Machine:\n"
            if kwargs.get('machine_name'):
                name = kwargs['machine_name']
            else:
                try:
                    name = Machine.objects.get(cloud=cloud,
                                               machine_id=machine_id).name
                except DoesNotExist:
                    name = ''
            if name:
                body += "  Name: %s\n" % name
            title += " for machine %s" % (name or machine_id)
            body += "  Id: %s\n" % machine_id
    if 'error' in kwargs:
        error = kwargs['error']
        body += "Result: %s\n" % ('Success' if not error else 'Error')
        if error and error is not True:
            body += "Error: %s" % error
    if 'command' in kwargs:
        body += "Command: %s\n" % kwargs['command']
    if 'retval' in kwargs:
        body += "Return value: %s\n" % kwargs['retval']
    if 'duration' in kwargs:
        body += "Duration: %.2f secs\n" % kwargs['duration']
    if 'output' in kwargs:
        body += "Output: %s\n" % kwargs['output'].decode('utf-8', 'ignore')

    if email_notify:
        from mist.api.helpers import send_email
        email = owner.email if hasattr(owner, 'email') else owner.get_email()
        send_email("[%s] %s" % (config.PORTAL_NAME, title),
                   body.encode('utf-8', 'ignore'), email)
Пример #2
0
def set_machine_tags(request):
    """
    Set tags on a machine
    Set tags for a machine, given the cloud and machine id.
    READ permission required on cloud.
    EDIT_TAGS permission required on machine.
    ---
    cloud_id:
      in: path
      required: true
      type: string
    machine_id:
      in: path
      required: true
      type: string
    tags:
      items:
        type: object
      type: array
    """
    auth_context = auth_context_from_request(request)
    params = params_from_request(request)
    cloud_id = request.matchdict["cloud_id"]
    machine_id = request.matchdict["machine_id"]
    auth_context.check_perm("cloud", "read", cloud_id)
    try:
        machine = Machine.objects.get(cloud=cloud_id, machine_id=machine_id)
    except me.DoesNotExist:
        raise NotFoundError('Resource with that id does not exist')

    # SEC require EDIT_TAGS permission on machine
    auth_context.check_perm("machine", "edit_tags", machine.id)

    tags = params.get("tags")
    if type(tags) != dict:
        raise BadRequestError('tags should be dictionary of tags')

    if not modify_security_tags(auth_context, tags, machine):
        raise auth_context._raise('machine', 'edit_security_tags')

    # FIXME: This is f***** up! This method is utilized by the Ember UI in
    # order to update a machine's tags by providing the entire list of tags
    # to be re-set. However, `add_tags_to_resource` simply appends the new
    # tags without deleting any.

    old_tags = get_tags_for_resource(auth_context.owner, machine)
    add_tags_to_resource(auth_context.owner, machine, tags.items())

    if config.MACHINE_PATCHES:
        new_tags = get_tags_for_resource(auth_context.owner, machine)

        patch = jsonpatch.JsonPatch.from_diff(old_tags, new_tags).patch
        for item in patch:
            item['path'] = '/%s-%s/tags%s' % (machine.id, machine.machine_id,
                                              item['path'])
        amqp_publish_user(auth_context.owner.id,
                          routing_key='patch_machines',
                          data={'cloud_id': cloud_id,
                                'patch': patch})
    return {}
Пример #3
0
    def list_networks(self, persist=True):
        """Return list of networks for cloud

        A list of networks is fetched from libcloud, data is processed, stored
        on network models, and a list of network models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_networks` which contains the core
        implementation.

        """
        task_key = 'cloud:list_networks:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        first_run = False if task.last_success else True

        async def _list_subnets_async(networks):
            loop = asyncio.get_event_loop()
            subnets = [
                loop.run_in_executor(None, network.ctl.list_subnets)
                for network in networks
            ]
            return await asyncio.gather(*subnets)

        with task.task_runner(persist=persist):
            # Get cached networks as dict
            cached_networks = {
                '%s-%s' % (n.id, n.network_id): n.as_dict()
                for n in self.list_cached_networks()
            }
            networks = self._list_networks()
            loop = asyncio.get_event_loop()
            loop.run_until_complete(_list_subnets_async(networks))

        # Publish patches to rabbitmq.
        new_networks = {
            '%s-%s' % (n.id, n.network_id): n.as_dict()
            for n in networks
        }
        # Exclude last seen and probe field
        if cached_networks or new_networks:
            # Publish patches to rabbitmq.
            patch = jsonpatch.JsonPatch.from_diff(cached_networks,
                                                  new_networks).patch
            if patch:
                if not first_run and self.cloud.observation_logs_enabled:
                    from mist.api.logs.methods import log_observations
                    log_observations(self.cloud.owner.id, self.cloud.id,
                                     'network', patch, cached_networks,
                                     new_networks)
                if amqp_owner_listening(self.cloud.owner.id):
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_networks',
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })
        return networks
Пример #4
0
    def ping_probe(self, persist=True):
        if not self.machine.cloud.enabled:
            return False
        from mist.api.methods import ping
        from mist.api.machines.models import PingProbe

        def _get_probe_dict():
            data = {}
            if self.machine.ping_probe is not None:
                data = self.machine.ping_probe.as_dict()
            return {
                '%s-%s' % (self.machine.id, self.machine.machine_id): {
                    'probe': {
                        'ping': data
                    }
                }
            }

        try:
            host = self.machine.ctl.get_host()
            if host in ['localhost', '127.0.0.1']:
                return
        except RuntimeError:
            return

        old_probe_data = _get_probe_dict()

        task_key = 'machine:ping_probe:%s' % self.machine.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        with task.task_runner(persist=persist):
            try:
                data = ping(self.machine.cloud.owner, self.get_host())
            except:
                probe = self.machine.ping_probe
                if probe is not None:
                    probe.unreachable_since = datetime.datetime.now()
                raise
            else:
                probe = PingProbe()
                probe.update_from_dict(data)
            finally:
                self.machine.ping_probe = probe
                self.machine.save()
                new_probe_data = _get_probe_dict()
                patch = jsonpatch.JsonPatch.from_diff(old_probe_data,
                                                      new_probe_data).patch
                if patch:
                    amqp_publish_user(self.machine.cloud.owner.id,
                                      routing_key='patch_machines',
                                      data={
                                          'cloud_id': self.machine.cloud.id,
                                          'patch': patch
                                      })
        probe_result = self.machine.ping_probe
        return probe_result and probe_result.as_dict()
Пример #5
0
    def list_networks(self, persist=True):
        """Return list of networks for cloud

        A list of networks is fetched from libcloud, data is processed, stored
        on network models, and a list of network models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_networks` which contains the core
        implementation.

        """
        task_key = 'cloud:list_networks:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        with task.task_runner(persist=persist):
            cached_networks = {
                '%s' % n.id: n.as_dict()
                for n in self.list_cached_networks()
            }

            networks = self._list_networks()

        # Initialize AMQP connection to reuse for multiple messages.
        amqp_conn = Connection(config.AMQP_URI)
        if amqp_owner_listening(self.cloud.owner.id):
            networks_dict = [n.as_dict() for n in networks]
            if cached_networks and networks_dict:
                # Publish patches to rabbitmq.
                new_networks = {'%s' % n['id']: n for n in networks_dict}
                patch = jsonpatch.JsonPatch.from_diff(cached_networks,
                                                      new_networks).patch
                if patch:
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_networks',
                                      connection=amqp_conn,
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })
            else:
                # TODO: remove this block, once patches
                # are implemented in the UI
                amqp_publish_user(self.cloud.owner.id,
                                  routing_key='list_networks',
                                  connection=amqp_conn,
                                  data={
                                      'cloud_id': self.cloud.id,
                                      'networks': networks_dict
                                  })
        return networks
Пример #6
0
    def ssh_probe(self, persist=True):
        from mist.api.methods import probe_ssh_only
        from mist.api.machines.models import SSHProbe

        def _get_probe_dict():
            data = {}
            if self.machine.ssh_probe is not None:
                data = self.machine.ssh_probe.as_dict()
            return {
                '%s-%s' % (self.machine.id, self.machine.machine_id): {
                    'probe': {
                        'ssh': data
                    }
                }
            }

        old_probe_data = _get_probe_dict()

        task_key = 'machine:ssh_probe:%s' % self.machine.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        with task.task_runner(persist=persist):
            try:
                data = probe_ssh_only(
                    self.machine.cloud.owner,
                    self.machine.cloud.id,
                    self.machine.machine_id,
                    self.get_host(),
                )
            except:
                probe = self.machine.ssh_probe
                if probe is not None:
                    probe.unreachable_since = datetime.datetime.now()
                raise
            else:
                probe = SSHProbe()
                probe.update_from_dict(data)
            finally:
                self.machine.ssh_probe = probe
                self.machine.save()
                new_probe_data = _get_probe_dict()
                patch = jsonpatch.JsonPatch.from_diff(old_probe_data,
                                                      new_probe_data).patch
                if patch:
                    amqp_publish_user(self.machine.cloud.owner.id,
                                      routing_key='patch_machines',
                                      data={
                                          'cloud_id': self.machine.cloud.id,
                                          'patch': patch
                                      })
        return self.machine.ssh_probe.as_dict()
Пример #7
0
    def list_zones(self, persist=True):
        """Return list of zones for cloud

        A list of zones is fetched from libcloud, data is processed, stored
        on zone models, and a list of zone models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_zones` which contains the core
        implementation.

        """
        task_key = 'cloud:list_zones:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        first_run = False if task.last_success else True
        with task.task_runner(persist=persist):
            cached_zones = {
                '%s-%s' % (z.id, z.zone_id): z.as_dict()
                for z in self.list_cached_zones()
            }

            zones = self._list_zones()
            for zone in zones:
                self.list_records(zone)

        # Initialize AMQP connection to reuse for multiple messages.
        if amqp_owner_listening(self.cloud.owner.id):
            zones_dict = [z.as_dict() for z in zones]
            if cached_zones or zones_dict:
                # Publish patches to rabbitmq.
                new_zones = {
                    '%s-%s' % (z['id'], z['zone_id']): z
                    for z in zones_dict
                }
                patch = jsonpatch.JsonPatch.from_diff(cached_zones,
                                                      new_zones).patch
                if patch:
                    if not first_run and self.cloud.observation_logs_enabled:
                        from mist.api.logs.methods import log_observations
                        log_observations(self.cloud.owner.id, self.cloud.id,
                                         'zone', patch, cached_zones,
                                         new_zones)
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_zones',
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })
        return zones
Пример #8
0
    def list_volumes(self, persist=True):
        """Return list of volumes for cloud

        A list of volumes is fetched from libcloud, data is processed, stored
        on volume models, and a list of volume models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_volumes` which contains the core
        implementation.

        """
        task_key = 'cloud:list_volumes:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        first_run = False if task.last_success else True
        with task.task_runner(persist=persist):
            cached_volumes = {
                '%s-%s' % (v.id, v.external_id): v.as_dict()
                for v in self.list_cached_volumes()
            }

            volumes = self._list_volumes()

        volumes_dict = [v.as_dict() for v in volumes]
        if cached_volumes or volumes:
            # Publish patches to rabbitmq.
            new_volumes = {
                '%s-%s' % (v['id'], v['external_id']): v
                for v in volumes_dict
            }
            patch = jsonpatch.JsonPatch.from_diff(cached_volumes,
                                                  new_volumes).patch
            if patch:
                if not first_run and self.cloud.observation_logs_enabled:
                    from mist.api.logs.methods import log_observations
                    log_observations(self.cloud.owner.id, self.cloud.id,
                                     'volume', patch, cached_volumes,
                                     new_volumes)
                if amqp_owner_listening(self.cloud.owner.id):
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_volumes',
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })
        return volumes
Пример #9
0
    def ping_probe(self, persist=True):

        from mist.api.methods import ping
        from mist.api.machines.models import PingProbe

        def _get_probe_dict():
            data = {}
            if self.machine.ping_probe is not None:
                data = self.machine.ping_probe.as_dict()
            return {
                '%s-%s' % (self.machine.id, self.machine.machine_id): {
                    'probe': {
                        'ping': data
                    }
                }
            }

        old_probe_data = _get_probe_dict()

        task_key = 'machine:ping_probe:%s' % self.machine.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        with task.task_runner(persist=persist):
            data = ping(self.machine.cloud.owner, self.get_host())

        probe = PingProbe()
        probe.update_from_dict(data)
        self.machine.ping_probe = probe
        self.machine.save()
        new_probe_data = _get_probe_dict()
        patch = jsonpatch.JsonPatch.from_diff(old_probe_data,
                                              new_probe_data).patch
        if patch:
            amqp_publish_user(self.machine.cloud.owner.id,
                              routing_key='patch_machines',
                              data={
                                  'cloud_id': self.machine.cloud.id,
                                  'patch': patch
                              })
        return self.machine.ping_probe.as_dict()
Пример #10
0
    def _modify_and_notify(self, notification, modifier):
        user = notification.user

        old_notifications = [
            json.loads(obj.to_json())
            for obj in InAppNotification.objects(user=user, dismissed=False)
        ]
        modifier(notification)
        new_notifications = [
            json.loads(obj.to_json())
            for obj in InAppNotification.objects(user=user, dismissed=False)
        ]
        patch = jsonpatch.JsonPatch.from_diff(old_notifications,
                                              new_notifications).patch
        if patch:
            data = json.dumps({
                "user": user.id,
                "patch": patch
            },
                              cls=NotificationsEncoder)
            amqp_publish_user(notification.organization,
                              routing_key='patch_notifications',
                              data=data)
Пример #11
0
    def send(self, users=None, dismiss=False):
        # FIXME Imported here due to circular dependency issues.
        from mist.api.notifications.models import InAppNotification
        from mist.api.notifications.models import UserNotificationPolicy

        # Get the list of `InAppNotifications`s in the current context before
        # any update takes place.
        owner_old_ntfs = list(InAppNotification.objects(owner=self.ntf.owner))

        if not users:
            users = self.ntf.owner.members
        elif not isinstance(users, list):
            users = [users]

        # Save/update/dismiss notifications.
        if dismiss:
            dismissed_by = set(self.ntf.dismissed_by)
            old_dismissed_by = list(dismissed_by)
            dismissed_by |= set(user.id for user in users)
            self.ntf.dismissed_by = list(dismissed_by)

        # Is anyone listening?
        if not amqp_owner_listening(self.ntf.owner.id):
            return

        # Initialize AMQP connection to reuse for multiple messages.
        amqp_conn = Connection(config.AMQP_URI)

        # Re-fetch all notifications in order to calculate the diff between
        # the two lists.
        owner_new_ntfs = list(InAppNotification.objects(owner=self.ntf.owner))

        # Apply each user's notification policy on the above lists to get rid
        # of notifications users are not interested in.
        for user in users:
            user_old_ntfs, user_new_ntfs = [], []
            try:
                np = UserNotificationPolicy.objects.get(user_id=user.id)
            except UserNotificationPolicy.DoesNotExist:
                log.debug('No UserNotificationPolicy found for %s', user)
                user_old_ntfs = [
                    ntf.as_dict() for ntf in owner_old_ntfs if not (
                        self.ntf.id == ntf.id and user.id in old_dismissed_by)
                ]
                user_new_ntfs = [
                    ntf.as_dict() for ntf in owner_new_ntfs
                    if not (self.ntf.id == ntf.id and user.id in dismissed_by)
                ]
            else:
                user_old_ntfs = [
                    ntf.as_dict() for ntf in owner_old_ntfs
                    if not np.has_blocked(ntf) and not (
                        self.ntf.id == ntf.id and user.id in old_dismissed_by)
                ]
                user_new_ntfs = [
                    ntf.as_dict() for ntf in owner_new_ntfs
                    if not np.has_blocked(ntf)
                    and not (self.ntf.id == ntf.id and user.id in dismissed_by)
                ]
            # Now we can save the dismissed notification
            self.ntf.save()

            # Calculate diff.
            patch = jsonpatch.JsonPatch.from_diff(user_old_ntfs,
                                                  user_new_ntfs).patch

            if patch:
                amqp_publish_user(self.ntf.owner.id,
                                  routing_key='patch_notifications',
                                  connection=amqp_conn,
                                  data={
                                      'user': user.id,
                                      'patch': patch
                                  })

        # Finally, try to close the AMQP connection.
        try:
            amqp_conn.close()
        except Exception as exc:
            log.exception(repr(exc))
Пример #12
0
def tag_resources(request):
    """
    Tags: tags
    ---
    Batch operation for adding/removing tags from a list of resources.
    This api call provides the ability to modify the tags of a large number
    of resources. For each resource a list of dicts is passed with a key, a
    value and optionally an op field. The op field should be either '+' or '-'
    and defines whether or not the tag should be added or removed from the
    resource. If no op value is defined in the dict then '+' is assumed.
    ---
    tags:
      required: true
      type: array
    resource:
      required: true
      type: object
    """

    auth_context = auth_context_from_request(request)
    params = params_from_request(request)

    # FIXME: This implementation is far from OK. We need to re-code the way
    # tags are handled and make sure that RBAC is properly enforced on tags
    for resource in params:
        # list of dicts of key-value pairs
        resource_tags = resource.get('tags', '')
        # dict of resource info
        resource_data = resource.get('resource', '')

        if not resource_data:
            raise RequiredParameterMissingError("resources")
        if not resource_tags:
            raise RequiredParameterMissingError("tags")
        if not resource_data.get('type') or not resource_data.get('item_id'):
            raise BadRequestError('No type or rid provided for some of the '
                                  'resources')

        # ui send this var only for machine. image, network, location
        cloud_id = resource_data.get('cloud_id')

        if cloud_id:
            auth_context.check_perm('cloud', 'read', cloud_id)
        elif resource_data['type'] in [
                'machine', 'image', 'network', 'volume'
        ]:
            raise RequiredParameterMissingError("cloud_id")
        else:
            del resource_data['cloud_id']

        query = {}
        rtype = resource_data['type']
        rid = resource_data['item_id']
        if rtype == 'machine':
            query['machine_id'] = rid
        else:
            query['id'] = rid

        if cloud_id:
            query['cloud'] = cloud_id

        try:
            resource_obj = get_resource_model(rtype).objects.get(**query)
        except me.DoesNotExist:
            # if the resource can not be found just go on and process the next
            continue

        # SEC require EDIT_TAGS permission on resource
        auth_context.check_perm(rtype, 'edit_tags', resource_obj.id)

        # normalized_resources.append(resource_data)
        query['rtype'] = rtype

        # split the tags into two lists: those that will be added and those
        # that will be removed
        tags_to_add = [
            (tag['key'], tag['value']) for tag in
            [tag for tag in resource_tags if tag.get('op', '+') == '+']
        ]
        # also extract the keys from all the tags to be deleted
        tags_to_remove = [
            tag['key'] for tag in
            [tag for tag in resource_tags if tag.get('op', '+') == '-']
        ]

        # SEC only Org Owners may edit the secure tags
        tags = {tag[0]: tag[1] for tag in tags_to_add}
        if not modify_security_tags(auth_context, tags, resource_obj):
            auth_context._raise(rtype, 'edit_security_tags')

        old_tags = get_tags_for_resource(auth_context.owner, resource_obj)
        if tags_to_add:
            add_tags_to_resource(auth_context.owner, resource_obj, tags_to_add)
        if tags_to_remove:
            remove_tags_from_resource(auth_context.owner, resource_obj,
                                      tags_to_remove)

        if rtype in ['machine', 'network', 'volume', 'zone', 'record']:
            new_tags = get_tags_for_resource(auth_context.owner, resource_obj)
            try:
                external_id = getattr(resource_obj, rtype + '_id')
            except AttributeError:
                external_id = getattr(resource_obj, 'external_id')
            patch = jsonpatch.JsonPatch.from_diff(old_tags, new_tags).patch
            for item in patch:
                item['path'] = '/%s-%s/tags%s' % (resource_obj.id, external_id,
                                                  item['path'])
            if amqp_owner_listening(resource_obj.cloud.owner.id):
                amqp_publish_user(auth_context.owner.id,
                                  routing_key='patch_%ss' % rtype,
                                  data={
                                      'cloud_id': resource_obj.cloud.id,
                                      'patch': patch
                                  })
    return OK
Пример #13
0
    def list_machines(self, persist=True):
        """Return list of machines for cloud

        A list of nodes is fetched from libcloud, the data is processed, stored
        on machine models, and a list of machine models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_machines` which contains the core
        implementation.

        """

        task_key = 'cloud:list_machines:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        try:
            with task.task_runner(persist=persist):
                old_machines = {
                    '%s-%s' % (m.id, m.machine_id): m.as_dict()
                    for m in self.list_cached_machines()
                }
                machines = self._list_machines()
        except PeriodicTaskThresholdExceeded:
            self.cloud.disable()
            raise

        # Initialize AMQP connection to reuse for multiple messages.
        amqp_conn = Connection(config.AMQP_URI)

        if amqp_owner_listening(self.cloud.owner.id):
            if not config.MACHINE_PATCHES:
                amqp_publish_user(
                    self.cloud.owner.id,
                    routing_key='list_machines',
                    connection=amqp_conn,
                    data={
                        'cloud_id': self.cloud.id,
                        'machines':
                        [machine.as_dict() for machine in machines]
                    })
            else:
                # Publish patches to rabbitmq.
                new_machines = {
                    '%s-%s' % (m.id, m.machine_id): m.as_dict()
                    for m in machines
                }
                # Exclude last seen and probe fields from patch.
                for md in old_machines, new_machines:
                    for m in md.values():
                        m.pop('last_seen')
                        m.pop('probe')
                patch = jsonpatch.JsonPatch.from_diff(old_machines,
                                                      new_machines).patch
                if patch:
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_machines',
                                      connection=amqp_conn,
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })

        # Push historic information for inventory and cost reporting.
        for machine in machines:
            data = {
                'owner_id': self.cloud.owner.id,
                'machine_id': machine.id,
                'cost_per_month': machine.cost.monthly
            }
            amqp_publish(exchange='machines_inventory',
                         routing_key='',
                         auto_delete=False,
                         data=data,
                         connection=amqp_conn)

        return machines
Пример #14
0
    def update(self, auth_context, params={}):
        if params.get('expiration'):
            """
            FIXME: we're recreating instead of updating existing expiration
                   schedules because updating them doesn't seem to affect the
                   actual expiration datetime.
            """
            from mist.api.schedules.models import Schedule
            exp_date = params['expiration']['date']
            exp_reminder = int(params['expiration'].get('notify', 0) or 0)
            exp_action = params['expiration'].get('action', 'stop')
            assert exp_action in ['stop', 'destroy'], 'Invalid action'
            if self.machine.expiration:  # Existing expiration schedule
                # Delete after removing db ref
                sched = self.machine.expiration
                self.machine.expiration = None
                self.machine.save()
                sched.delete()

            if exp_date:  # Create new expiration schedule
                params = {
                    'description': 'Scheduled to run when machine expires',
                    'task_enabled': True,
                    'schedule_type': 'one_off',
                    'schedule_entry': exp_date,
                    'action': exp_action,
                    'selectors': [{
                        'type': 'machines',
                        'ids': [self.machine.id]
                    }],
                    'notify': exp_reminder
                }
                name = self.machine.name + '-expiration-' + str(
                    randrange(1000))
                self.machine.expiration = Schedule.add(auth_context, name,
                                                       **params)
                self.machine.save()

            # Prepare exp date JSON patch to update the UI
            if not self.machine.expiration:
                patch = [{
                    'op':
                    'remove',
                    'path':
                    '/%s-%s/expiration' %
                    (self.machine.id, self.machine.machine_id)
                }]
            else:
                patch = [{
                    'op':
                    'replace',
                    'path':
                    '/%s-%s/expiration' %
                    (self.machine.id, self.machine.machine_id),
                    'value':
                    not self.machine.expiration and None or {
                        'id':
                        self.machine.expiration.id,
                        'date':
                        self.machine.expiration.schedule_type.entry,
                        'action':
                        self.machine.expiration.task_type.action,
                        'notify':
                        self.machine.expiration.reminder and int(
                            (self.machine.expiration.schedule_type.entry -
                             self.machine.expiration.reminder.schedule_type.
                             entry).total_seconds()) or 0
                    }
                }]
            # Publish patches to rabbitmq.
            amqp_publish_user(self.machine.cloud.owner.id,
                              routing_key='patch_machines',
                              data={
                                  'cloud_id': self.machine.cloud.id,
                                  'patch': patch
                              })

        return self.machine
Пример #15
0
def list_machines(schedule_id):
    """Perform list machines. Cloud controller stores results in mongodb."""

    # Fetch schedule and cloud from database.
    # FIXME: resolve circular deps error
    from mist.api.poller.models import ListMachinesPollingSchedule
    sched = ListMachinesPollingSchedule.objects.get(id=schedule_id)
    cloud = sched.cloud
    now = datetime.datetime.now()

    # Check if this cloud should be autodisabled.
    if sched.last_success:
        two_days = datetime.timedelta(days=2)
        if now - sched.last_success > two_days and sched.failure_count > 50:
            autodisable_cloud(sched.cloud)
            return
    elif sched.failure_count > 100:
        autodisable_cloud(sched.cloud)
        return

    # Find last run. If too recent, abort.
    if sched.last_success and sched.last_failure:
        last_run = max(sched.last_success, sched.last_failure)
    else:
        last_run = sched.last_success or sched.last_failure
    if last_run:
        if now - last_run < sched.interval.timedelta:
            log.warning("Running too soon for cloud %s, aborting!", cloud)
            return

    # Is another same task running?
    if sched.last_attempt_started:
        # Other same task started recently, abort.
        if now - sched.last_attempt_started < datetime.timedelta(seconds=60):
            log.warning("Other same tasks started recently, aborting.")
            return
        # Has been running for too long or has died. Ignore.
        log.warning("Other same task seems to have started, but it's been "
                    "quite a while, will ignore and run normally.")
    sched.last_attempt_started = now
    cloud.save()

    try:
        # Run list_machines.
        machines = cloud.ctl.compute.list_machines()
    except Exception as exc:
        # Store failure.
        log.warning("Failed to list_machines for cloud %s: %r", cloud, exc)
        sched.last_failure = datetime.datetime.now()
        sched.failure_count += 1
        sched.last_attempt_started = None
        cloud.save()
        raise
    else:
        # Store success.
        log.info("Succeeded to list_machines for cloud %s", cloud)
        sched.last_success = datetime.datetime.now()
        sched.failure_count = 0
        sched.last_attempt_started = None
        cloud.save()

    # Publish results to rabbitmq (for backwards compatibility).
    if amqp_owner_listening(cloud.owner.id):
        amqp_publish_user(cloud.owner.id, routing_key='list_machines',
                          data={'cloud_id': cloud.id,
                                'machines': [machine.as_dict()
                                             for machine in machines]})

    # Push historic information for inventory and cost reporting.
    for machine in machines:
        data = {'owner_id': machine.cloud.owner.id,
                'machine_id': machine.id,
                'cost_per_month': machine.cost.monthly}
        log.info("Will push to elastic: %s", data)
        amqp_publish(exchange='machines_inventory', routing_key='',
                     auto_delete=False, data=data)