Esempio n. 1
0
def list_keys(owner):
    """List owner's keys
    :param owner:
    :return:
    """
    keys = Key.objects(owner=owner, deleted=None)
    clouds = Cloud.objects(owner=owner, deleted=None)
    key_objects = []
    # FIXME: This must be taken care of in Keys.as_dict
    for key in keys:
        key_object = {}
        # FIXME: Need to optimize this! It's potentially invoked per ssh probe.
        # Can't we expose associations directly from Machine.key_associations?
        machines = Machine.objects(cloud__in=clouds,
                                   key_associations__keypair__exact=key)
        key_object["id"] = key.id
        key_object['name'] = key.name
        key_object['owned_by'] = key.owned_by.id if key.owned_by else ''
        key_object['created_by'] = key.created_by.id if key.created_by else ''
        key_object["isDefault"] = key.default
        key_object["machines"] = transform_key_machine_associations(
            machines, key)
        key_object['tags'] = get_tags_for_resource(owner, key)
        key_objects.append(key_object)
    return key_objects
Esempio n. 2
0
def _get_multimachine_stats(owner, metric, start='', stop='', step='',
                            uuids=None):
    if not uuids:
        uuids = [machine.id for machine in Machine.objects(
            cloud__in=Cloud.objects(owner=owner, deleted=None),
            monitoring__hasmonitoring=True
        )]
    if not uuids:
        raise NotFoundError("No machine has monitoring enabled.")
    try:
        data = get_multi_uuid(uuids, metric, start=start, stop=stop,
                              interval_str=step)
    except Exception as exc:
        log.error("Error getting %s: %r", metric, exc)
        raise ServiceUnavailableError()
    ret = {}
    for item in data:
        target = item['target'].split('.')
        if len(target) > 1:
            uuid = target[1]
        else:
            uuid = target[0]
        item['name'] = uuid
        ret[uuid] = item
    return ret
def set_missing():
    """Declare machines, whose cloud has been marked as deleted, as missing"""
    failed = succeeded = 0
    clouds = Cloud.objects(deleted__ne=None)

    print
    print 'Searching through %d clouds' % clouds.count()
    print

    for c in clouds:
        try:
            print 'Updating machines of', c,
            updated = Machine.objects(
                cloud=c, missing_since=None).update(missing_since=c.deleted)
        except Exception:
            print '[ERROR]'
            traceback.print_exc()
            failed += 1
        else:
            print '[OK:%s]' % updated
            succeeded += 1

    print
    print 'Failed:', failed
    print 'Succeeded:', succeeded
    print
    print 'Completed %s' % ('with errors!' if failed else 'successfully!')
    print
Esempio n. 4
0
def get_load(owner, start='', stop='', step='', uuids=None):
    """Get shortterm load for all monitored machines."""
    clouds = Cloud.objects(owner=owner, deleted=None).only('id')
    machines = Machine.objects(cloud__in=clouds,
                               monitoring__hasmonitoring=True)
    if uuids:
        machines.filter(id__in=uuids)

    graphite_uuids = [machine.id for machine in machines
                      if machine.monitoring.method.endswith('-graphite')]
    influx_uuids = [machine.id for machine in machines
                    if machine.monitoring.method.endswith('-influxdb')]

    graphite_data = {}
    influx_data = {}
    if graphite_uuids:
        graphite_data = graphite_get_load(owner, start=start, stop=stop,
                                          step=step, uuids=graphite_uuids)
    if influx_uuids:
        # Transform "min" and "sec" to "m" and "s", respectively.
        _start, _stop, _step = [re.sub('in|ec', repl='', string=x) for x in (
            start.strip('-'), stop.strip('-'), step)]
        influx_data = InfluxMultiLoadHandler(influx_uuids).get_stats(
            metric='system.load1',
            start=_start, stop=_stop, step=_step,
        )

    if graphite_data or influx_data:
        return dict(list(graphite_data.items()) + list(influx_data.items()))
    else:
        raise NotFoundError('No machine has monitoring enabled')
Esempio n. 5
0
def disassociate_key(request):
    """
    Disassociate a key from a machine
    Disassociates a key from a machine. If host is set it will also attempt to
    actually remove it from the machine.
    READ permission required on cloud.
    DISASSOCIATE_KEY permission required on machine.
    ---
    key:
      in: path
      required: true
      type: string
    machine:
      in: path
      required: true
      type: string
    """
    key_id = request.matchdict['key']
    cloud_id = request.matchdict.get('cloud')
    auth_context = auth_context_from_request(request)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine']
        try:
            Cloud.objects.get(owner=auth_context.owner,
                              id=cloud_id,
                              deleted=None)
        except Cloud.DoesNotExist:
            raise NotFoundError('Cloud does not exist')

        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    auth_context.check_perm("machine", "disassociate_key", machine.id)

    key = Key.objects.get(owner=auth_context.owner, id=key_id, deleted=None)
    key.ctl.disassociate(machine)
    clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               key_associations__keypair__exact=key)

    assoc_machines = transform_key_machine_associations(machines, key)
    return assoc_machines
Esempio n. 6
0
def list_clouds(owner):
    clouds = [
        cloud.as_dict() for cloud in Cloud.objects(owner=owner, deleted=None)
    ]
    for cloud in clouds:
        # FIXME: cloud must be a mongoengine object FFS!
        # Also, move into cloud model's as_dict method?
        cloud['tags'] = get_tags_for_resource(owner, cloud)
    return clouds
Esempio n. 7
0
def get_load(owner, start="", stop="", step="", uuids=None):
    """Get shortterm load for all monitored machines."""
    clouds = Cloud.objects(owner=owner, deleted=None).only("id")
    machines = Machine.objects(cloud__in=clouds,
                               monitoring__hasmonitoring=True)
    if uuids:
        machines.filter(id__in=uuids)

    graphite_uuids = [
        machine.id for machine in machines
        if machine.monitoring.method.endswith("-graphite")
    ]
    influx_uuids = [
        machine.id for machine in machines
        if machine.monitoring.method.endswith("-influxdb")
    ]
    fdb_uuids = [
        machine.id for machine in machines
        if machine.monitoring.method.endswith("-tsfdb")
    ]

    graphite_data = {}
    influx_data = {}
    fdb_data = {}

    if graphite_uuids:
        graphite_data = graphite_get_load(owner,
                                          start=start,
                                          stop=stop,
                                          step=step,
                                          uuids=graphite_uuids)
    if influx_uuids:

        # Transform "min" and "sec" to "m" and "s", respectively.
        _start, _stop, _step = [
            re.sub("in|ec", repl="", string=x)
            for x in (start.strip("-"), stop.strip("-"), step)
        ]
        metric = "system.load1"
        if step:
            metric = "MEAN(%s)" % metric
        influx_data = InfluxMultiLoadHandler(influx_uuids).get_stats(
            metric=metric,
            start=_start,
            stop=_stop,
            step=_step,
        )

    if fdb_uuids:
        fdb_data = fdb_get_load(owner, fdb_uuids, start, stop, step)

    if graphite_data or influx_data or fdb_data:
        return dict(
            list(graphite_data.items()) + list(influx_data.items()) +
            list(fdb_data.items()))
    else:
        raise NotFoundError("No machine has monitoring enabled")
Esempio n. 8
0
def check_monitoring(owner):
    """Return the monitored machines, enabled metrics, and user details."""

    custom_metrics = owner.get_metrics_dict()
    for metric in custom_metrics.values():
        metric['machines'] = []

    monitored_machines = []
    monitored_machines_2 = {}

    clouds = Cloud.objects(owner=owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               monitoring__hasmonitoring=True)

    for machine in machines:
        monitored_machines.append([machine.cloud.id, machine.machine_id])
        try:
            commands = machine.monitoring.get_commands()
        except Exception as exc:
            log.error(exc)
            commands = {}
        monitored_machines_2[machine.id] = {
            'cloud_id': machine.cloud.id,
            'machine_id': machine.machine_id,
            'installation_status':
            machine.monitoring.installation_status.as_dict(),
            'commands': commands,
        }
        for metric_id in machine.monitoring.metrics:
            if metric_id in custom_metrics:
                machines = custom_metrics[metric_id]['machines']
                machines.append((machine.cloud.id, machine.machine_id))

    ret = {
        'machines': monitored_machines,
        'monitored_machines': monitored_machines_2,
        'rules': owner.get_rules_dict(),
        'alerts_email': owner.alerts_email,
        'custom_metrics': custom_metrics,
    }
    if config.DEFAULT_MONITORING_METHOD.endswith('graphite'):
        ret.update({
            # Keep for backwards compatibility
            'builtin_metrics': config.GRAPHITE_BUILTIN_METRICS,
            'builtin_metrics_graphite': config.GRAPHITE_BUILTIN_METRICS,
            'builtin_metrics_influxdb': config.INFLUXDB_BUILTIN_METRICS,
        })
    elif config.DEFAULT_MONITORING_METHOD.endswith('influxdb'):
        ret.update({
            # Keep for backwards compatibility
            'builtin_metrics': config.INFLUXDB_BUILTIN_METRICS,
            'builtin_metrics_influxdb': config.INFLUXDB_BUILTIN_METRICS,
        })
    for key in ('rules', 'builtin_metrics', 'custom_metrics'):
        for id in ret[key]:
            ret[key][id]['id'] = id
    return ret
Esempio n. 9
0
def add_key(request):
    """
    Tags: keys
    ---
    Adds key.
    ADD permission required on key.
    ---
    name:
      description: The key's name
      required: true
      type: string
    priv:
      description: The private key
      required: true
      type: string
    certificate:
      description: The signed public key, when using signed ssh keys
      type: string
    """
    params = params_from_request(request)
    key_name = params.pop('name', None)
    private_key = params.get('priv', None)
    certificate = params.get('certificate', None)
    auth_context = auth_context_from_request(request)
    key_tags = auth_context.check_perm("key", "add", None)

    if not key_name:
        raise BadRequestError("Key name is not provided")
    if not private_key:
        raise RequiredParameterMissingError("Private key is not provided")

    if certificate:
        key = SignedSSHKey.add(auth_context.owner, key_name, **params)
    else:
        key = SSHKey.add(auth_context.owner, key_name, **params)

    # Set ownership.
    key.assign_to(auth_context.user)

    if key_tags:
        add_tags_to_resource(auth_context.owner, key, key_tags.items())
    # since its a new key machines fields should be an empty list

    clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               key_associations__keypair__exact=key)

    assoc_machines = transform_key_machine_associations(machines, key)

    return {
        'id': key.id,
        'name': key.name,
        'machines': assoc_machines,
        'isDefault': key.default
    }
Esempio n. 10
0
def add_cloud_v_2(owner, title, provider, params):
    """Add cloud to owner"""

    # FIXME: Some of these should be explicit arguments, others shouldn't exist
    fail_on_error = params.pop('fail_on_error',
                               params.pop('remove_on_error', True))
    monitoring = params.pop('monitoring', False)
    params.pop('title', None)
    params.pop('provider', None)
    # Find proper Cloud subclass.
    if not provider:
        raise RequiredParameterMissingError("provider")
    log.info("Adding new cloud in provider '%s'", provider)
    if provider not in cloud_models.CLOUDS:
        raise BadRequestError("Invalid provider '%s'." % provider)
    cloud_cls = cloud_models.CLOUDS[provider]  # Class of Cloud model.

    # Add the cloud.
    cloud = cloud_cls.add(owner,
                          title,
                          fail_on_error=fail_on_error,
                          fail_on_invalid_params=False,
                          **params)
    ret = {'cloud_id': cloud.id}
    if provider == 'bare_metal' and monitoring:
        # Let's overload this a bit more by also combining monitoring.
        machine = Machine.objects.get(cloud=cloud)

        ret['monitoring'] = enable_monitoring(
            owner,
            cloud.id,
            machine.machine_id,
            no_ssh=not (machine.os_type == 'unix'
                        and machine.key_associations))

    # SEC
    owner.mapper.update(cloud)

    log.info("Cloud with id '%s' added succesfully.", cloud.id)
    trigger_session_update(owner, ['clouds'])
    c_count = Cloud.objects(owner=owner, deleted=None).count()
    if owner.clouds_count != c_count:
        owner.clouds_count = c_count
        owner.save()

    cloud.polling_interval = 1800  # 30 min * 60 sec/min
    cloud.save()
    ListMachinesPollingSchedule.add(cloud=cloud)

    return ret
Esempio n. 11
0
def _machine_from_matchdict(request, deleted=False):
    """Find machine given either uuid or cloud-id/ext-id in request path"""
    auth_context = auth_context_from_request(request)
    if 'cloud' in request.matchdict:
        try:
            if not deleted:
                cloud = Cloud.objects.get(owner=auth_context.owner,
                                          id=request.matchdict['cloud'],
                                          deleted=None)
            else:
                cloud = Cloud.objects.get(owner=auth_context.owner,
                                          id=request.matchdict['cloud'])
        except Cloud.DoesNotExist:
            raise NotFoundError('Cloud does not exist')
        try:
            if not deleted:
                machine = Machine.objects.get(
                    cloud=cloud,
                    machine_id=request.matchdict['machine'],
                    state__ne='terminated',
                )
            else:
                machine = Machine.objects.get(
                    cloud=cloud,
                    machine_id=request.matchdict['machine'])
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" %
                                request.matchdict['machine'])
        # used by logging_view_decorator
        request.environ['machine_uuid'] = machine.id
    else:
        clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
        try:
            machine = Machine.objects.get(
                cloud__in=clouds,
                id=request.matchdict['machine_uuid'],
                state__ne='terminated'
            )
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" %
                                request.matchdict['machine'])
        # used by logging_view_decorator
        request.environ['machine_id'] = machine.machine_id
        request.environ['cloud_id'] = machine.cloud.id
    auth_context.check_perm('cloud', 'read', machine.cloud.id)
    return machine
Esempio n. 12
0
def trigger_network_polling_schedules():
    clouds = Cloud.objects(deleted=None)

    print
    print 'Creating and storing in database ListNetworksPollingSchedule'
    print

    failed = 0

    for cloud in clouds:
        if not hasattr(cloud.ctl, 'network'):
            continue
        try:
            ListNetworksPollingSchedule.add(cloud)
        except Exception:
            traceback.print_exc()
            failed += 1

    print ' ****** Failures: %d *********' % failed
Esempio n. 13
0
def add_cloud_v_2(owner, title, provider, params):
    """Add cloud to owner"""

    # FIXME: Some of these should be explicit arguments, others shouldn't exist
    fail_on_error = params.pop('fail_on_error',
                               params.pop('remove_on_error', True))
    params.pop('title', None)
    params.pop('provider', None)
    # Find proper Cloud subclass.
    if not provider:
        raise RequiredParameterMissingError("provider")
    log.info("Adding new cloud in provider '%s'", provider)
    if provider not in cloud_models.CLOUDS:
        raise BadRequestError("Invalid provider '%s'." % provider)
    cloud_cls = cloud_models.CLOUDS[provider]  # Class of Cloud model.

    # Add the cloud.
    cloud = cloud_cls.add(owner, title, fail_on_error=fail_on_error,
                          fail_on_invalid_params=False, **params)
    ret = {
        'cloud_id': cloud.id,
        'errors': getattr(cloud,
                          'errors', []),  # just an attribute, not a field
    }

    log.info("Cloud with id '%s' added succesfully.", cloud.id)

    c_count = Cloud.objects(owner=owner, deleted=None).count()
    if owner.clouds_count != c_count:
        owner.clouds_count = c_count
        owner.save()

    cloud.polling_interval = 1800  # 30 min * 60 sec/min
    cloud.save()

    # TODO: remove below, most probably doesn't make any difference?
    ListMachinesPollingSchedule.add(cloud=cloud)
    ListLocationsPollingSchedule.add(cloud=cloud)
    ListSizesPollingSchedule.add(cloud=cloud)
    ListImagesPollingSchedule.add(cloud=cloud)

    return ret
Esempio n. 14
0
    def list_clouds(self):
        if config.ACTIVATE_POLLER:
            self.update_poller()
        self.send('list_clouds', filter_list_clouds(self.auth_context))
        clouds = Cloud.objects(owner=self.owner, enabled=True, deleted=None)
        log.info(clouds)
        periodic_tasks = []
        if not config.ACTIVATE_POLLER:
            periodic_tasks.append(('list_machines', tasks.ListMachines()))
        else:
            for cloud in clouds:
                after = datetime.datetime.utcnow() - datetime.timedelta(days=1)
                machines = Machine.objects(cloud=cloud, missing_since=None,
                                           last_seen__gt=after)
                machines = filter_list_machines(
                    self.auth_context, cloud_id=cloud.id,
                    machines=[machine.as_dict() for machine in machines]
                )
                if machines:
                    log.info("Emitting list_machines from poller's cache.")
                    self.send('list_machines',
                              {'cloud_id': cloud.id, 'machines': machines})

        periodic_tasks.extend([('list_images', tasks.ListImages()),
                               ('list_sizes', tasks.ListSizes()),
                               ('list_networks', tasks.ListNetworks()),
                               ('list_zones', tasks.ListZones()),
                               ('list_locations', tasks.ListLocations()),
                               ('list_projects', tasks.ListProjects())])
        for key, task in periodic_tasks:
            for cloud in clouds:
                cached = task.smart_delay(self.owner.id, cloud.id)
                if cached is not None:
                    log.info("Emitting %s from cache", key)
                    if key == 'list_machines':
                        cached['machines'] = filter_list_machines(
                            self.auth_context, **cached
                        )
                        if cached['machines'] is None:
                            continue
                    self.send(key, cached)
Esempio n. 15
0
def trigger_location_polling_schedules():
    clouds = Cloud.objects(deleted=None)

    print
    print 'Creating and storing in database ListLocationsPollingSchedules'
    print

    failed = 0

    for cloud in clouds:
        try:
            schedule = ListLocationsPollingSchedule.add(cloud)
            schedule.set_default_interval(60 * 60 * 24)
            schedule.save()
        except Exception as exc:
            print 'Error: %s' % exc
            traceback.print_exc()
            failed += 1
            continue

    print ' ****** Failures: %d *********' % failed
Esempio n. 16
0
def list_keys(owner):
    """List owner's keys
    :param owner:
    :return:
    """
    keys = Key.objects(owner=owner, deleted=None)
    clouds = Cloud.objects(owner=owner, deleted=None)
    key_objects = []
    # FIXME: This must be taken care of in Keys.as_dict
    for key in keys:
        key_object = {}
        machines = Machine.objects(cloud__in=clouds,
                                   key_associations__keypair__exact=key)
        key_object["id"] = key.id
        key_object['name'] = key.name
        key_object["isDefault"] = key.default
        key_object["machines"] = transform_key_machine_associations(
            machines, key)
        key_object['tags'] = get_tags_for_resource(owner, key)
        key_objects.append(key_object)
    return key_objects
Esempio n. 17
0
    def load(self, machines=None):
        self.hosts = {}
        self.keys = {}
        if not machines:
            clouds = Cloud.objects(owner=self.owner, deleted=None)
            machines = [(machine.cloud.id, machine.machine_id)
                        for machine in Machine.objects(cloud__in=clouds)]
        for bid, mid in machines:
            try:
                name, ip_addr = self.find_machine_details(bid, mid)
                key_id, ssh_user, port = self.find_ssh_settings(bid, mid)
            except Exception as exc:
                print exc
                continue
            ip_addr, port = dnat(self.owner, ip_addr, port)
            if key_id not in self.keys:
                keypair = SSHKey.objects.get(owner=self.owner,
                                             name=key_id,
                                             deleted=None)
                self.keys[key_id] = keypair.private
                if isinstance(keypair, SignedSSHKey):
                    # if signed ssh key, provide the key appending a -cert.pub
                    # on the name since this is how ssh will include it as
                    # an identify file
                    self.keys['%s-cert.pub' % key_id] = keypair.certificate
                    # pub key also needed for openssh 7.2
                    self.keys['%s.pub' % key_id] = keypair.public
            if name in self.hosts:
                num = 2
                while ('%s-%d' % (name, num)) in self.hosts:
                    num += 1
                name = '%s-%d' % (name, num)

            self.hosts[name] = {
                'ansible_ssh_host': ip_addr,
                'ansible_ssh_port': port,
                'ansible_ssh_user': ssh_user,
                'ansible_ssh_private_key_file': 'id_rsa/%s' % key_id,
            }
Esempio n. 18
0
def delete_cloud(owner, cloud_id):
    """Deletes cloud with given cloud_id."""

    log.info("Deleting cloud: %s", cloud_id)

    try:
        disable_monitoring_cloud(owner, cloud_id)
    except Exception as exc:
        log.warning("Couldn't disable monitoring before deleting cloud. "
                    "Error: %r", exc)

    try:
        cloud = Cloud.objects.get(owner=owner, id=cloud_id, deleted=None)
        cloud.ctl.delete()
    except Cloud.DoesNotExist:
        raise NotFoundError('Cloud does not exist')

    log.info("Successfully deleted cloud '%s'", cloud_id)
    trigger_session_update(owner, ['clouds'])
    c_count = Cloud.objects(owner=owner, deleted=None).count()
    if owner.clouds_count != c_count:
        owner.clouds_count = c_count
        owner.save()
Esempio n. 19
0
def add_cloud_v_2(owner, title, provider, params):
    """Add cloud to owner"""
    # FIXME: Some of these should be explicit arguments, others shouldn't exist
    fail_on_error = params.pop('fail_on_error',
                               params.pop('remove_on_error', True))
    params.pop('title', None)
    params.pop('provider', None)
    # Find proper Cloud subclass.
    if not provider:
        raise RequiredParameterMissingError("provider")

    title = validate_cloud_title(title)
    log.info("Adding new cloud in provider '%s'", provider)
    if provider not in cloud_models.CLOUDS:
        raise BadRequestError("Invalid provider '%s'." % provider)
    cloud_cls = cloud_models.CLOUDS[provider]  # Class of Cloud model.

    # Add the cloud.
    cloud = cloud_cls.add(owner,
                          title,
                          fail_on_error=fail_on_error,
                          fail_on_invalid_params=False,
                          **params)
    ret = {
        'cloud_id': cloud.id,
        'errors': getattr(cloud, 'errors',
                          []),  # just an attribute, not a field
    }

    log.info("Cloud with id '%s' added succesfully.", cloud.id)

    c_count = Cloud.objects(owner=owner, deleted=None).count()
    if owner.clouds_count != c_count:
        owner.clouds_count = c_count
        owner.save()

    return ret
Esempio n. 20
0
    def list_machines(self):
        """Return list of machines for cloud

        A list of nodes is fetched from libcloud, the data is processed, stored
        on machine models, and a list of machine models is returned.

        Subclasses SHOULD NOT override or extend this method.

        There are instead a number of methods that are called from this method,
        to allow subclasses to modify the data according to the specific of
        their cloud type. These methods currently are:

            `self._list_machines__fetch_machines`
            `self._list_machines__machine_actions`
            `self._list_machines__postparse_machine`
            `self._list_machines__cost_machine`
            `self._list_machines__fetch_generic_machines`

        Subclasses that require special handling should override these, by
        default, dummy methods.

        """

        # Try to query list of machines from provider API.
        try:
            nodes = self._list_machines__fetch_machines()
            log.info("List nodes returned %d results for %s.",
                     len(nodes), self.cloud)
        except InvalidCredsError as exc:
            log.warning("Invalid creds on running list_nodes on %s: %s",
                        self.cloud, exc)
            raise CloudUnauthorizedError(msg=exc.message)
        except ssl.SSLError as exc:
            log.error("SSLError on running list_nodes on %s: %s",
                      self.cloud, exc)
            raise SSLError(exc=exc)
        except Exception as exc:
            log.exception("Error while running list_nodes on %s", self.cloud)
            raise CloudUnavailableError(exc=exc)

        machines = []
        now = datetime.datetime.utcnow()

        # Process each machine in returned list.
        # Store previously unseen machines separately.
        new_machines = []
        for node in nodes:

            # Fetch machine mongoengine model from db, or initialize one.
            try:
                machine = Machine.objects.get(cloud=self.cloud,
                                              machine_id=node.id)
            except Machine.DoesNotExist:
                machine = Machine(cloud=self.cloud, machine_id=node.id).save()
                new_machines.append(machine)

            # Update machine_model's last_seen fields.
            machine.last_seen = now
            machine.missing_since = None

            # Get misc libcloud metadata.
            image_id = str(node.image or node.extra.get('imageId') or
                           node.extra.get('image_id') or
                           node.extra.get('image') or '')
            size = (node.size or node.extra.get('flavorId') or
                    node.extra.get('instancetype'))

            machine.name = node.name
            machine.image_id = image_id
            machine.size = size
            machine.state = config.STATES[node.state]
            machine.private_ips = node.private_ips
            machine.public_ips = node.public_ips

            # Set machine extra dict.
            # Make sure we don't meet any surprises when we try to json encode
            # later on in the HTTP response.
            extra = self._list_machines__get_machine_extra(machine, node)

            for key, val in extra.items():
                try:
                    json.dumps(val)
                except TypeError:
                    extra[key] = str(val)
            machine.extra = extra

            # Set machine hostname
            if machine.extra.get('dns_name'):
                machine.hostname = machine.extra['dns_name']
            else:
                ips = machine.public_ips + machine.private_ips
                if not ips:
                    ips = []
                for ip in ips:
                    if ip and ':' not in ip:
                        machine.hostname = ip
                        break

            # Get machine tags from db
            tags = {tag.key: tag.value for tag in Tag.objects(
                owner=self.cloud.owner, resource=machine,
            ).only('key', 'value')}

            # Get machine creation date.
            try:
                created = self._list_machines__machine_creation_date(machine,
                                                                     node)
                if created:
                    machine.created = get_datetime(created)
            except Exception as exc:
                log.exception("Error finding creation date for %s in %s.",
                              self.cloud, machine)
            # TODO: Consider if we should fall back to using current date.
            # if not machine_model.created:
            #     machine_model.created = datetime.datetime.utcnow()

            # Update with available machine actions.
            try:
                self._list_machines__machine_actions(machine, node)
            except Exception as exc:
                log.exception("Error while finding machine actions "
                              "for machine %s:%s for %s",
                              machine.id, node.name, self.cloud)

            # Apply any cloud/provider specific post processing.
            try:
                self._list_machines__postparse_machine(machine, node)
            except Exception as exc:
                log.exception("Error while post parsing machine %s:%s for %s",
                              machine.id, node.name, self.cloud)

            # Apply any cloud/provider cost reporting.
            try:
                def parse_num(num):
                    try:
                        return float(num or 0)
                    except (ValueError, TypeError):
                        log.warning("Can't parse %r as float.", num)
                        return 0

                month_days = calendar.monthrange(now.year, now.month)[1]

                cph = parse_num(tags.get('cost_per_hour'))
                cpm = parse_num(tags.get('cost_per_month'))
                if not (cph or cpm) or cph > 100 or cpm > 100 * 24 * 31:
                    cph, cpm = map(parse_num,
                                   self._list_machines__cost_machine(machine,
                                                                     node))
                if not cph:
                    cph = float(cpm) / month_days / 24
                elif not cpm:
                    cpm = cph * 24 * month_days
                machine.cost.hourly = cph
                machine.cost.monthly = cpm

            except Exception as exc:
                log.exception("Error while calculating cost "
                              "for machine %s:%s for %s",
                              machine.id, node.name, self.cloud)
            if node.state.lower() == 'terminated':
                machine.cost.hourly = 0
                machine.cost.monthly = 0

            # Save all changes to machine model on the database.
            try:
                machine.save()
            except me.ValidationError as exc:
                log.error("Error adding %s: %s", machine.name, exc.to_dict())
                raise BadRequestError({"msg": exc.message,
                                       "errors": exc.to_dict()})
            except me.NotUniqueError as exc:
                log.error("Machine %s not unique error: %s", machine.name, exc)
                raise ConflictError("Machine with this name already exists")

            machines.append(machine)

        # Append generic-type machines, which aren't handled by libcloud.
        for machine in self._list_machines__fetch_generic_machines():
            machine.last_seen = now
            machine.missing_since = None
            machine.state = config.STATES[NodeState.UNKNOWN]
            for action in ('start', 'stop', 'reboot', 'destroy', 'rename',
                           'resume', 'suspend', 'undefine'):
                setattr(machine.actions, action, False)
            machine.actions.tag = True
            # allow reboot action for bare metal with key associated
            if machine.key_associations:
                machine.actions.reboot = True
            machine.save()
            machines.append(machine)

        # Set last_seen on machine models we didn't see for the first time now.
        Machine.objects(cloud=self.cloud,
                        id__nin=[m.id for m in machines],
                        missing_since=None).update(missing_since=now)

        # Update RBAC Mappings given the list of nodes seen for the first time.
        self.cloud.owner.mapper.update(new_machines)

        # Update machine counts on cloud and org.
        # FIXME: resolve circular import issues
        from mist.api.clouds.models import Cloud
        self.cloud.machine_count = len(machines)
        self.cloud.save()
        self.cloud.owner.total_machine_count = sum(
            cloud.machine_count for cloud in Cloud.objects(
                owner=self.cloud.owner, deleted=None
            ).only('machine_count')
        )
        self.cloud.owner.save()

        # Close libcloud connection
        try:
            self.disconnect()
        except Exception as exc:
            log.warning("Error while closing connection: %r", exc)

        return machines
Esempio n. 21
0
def check_monitoring(owner):
    """Return the monitored machines, enabled metrics, and user details."""

    custom_metrics = owner.get_metrics_dict()
    for metric in list(custom_metrics.values()):
        metric["machines"] = []

    monitored_machines = []
    monitored_machines_2 = {}

    clouds = Cloud.objects(owner=owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               monitoring__hasmonitoring=True)

    for machine in machines:
        monitored_machines.append([machine.cloud.id, machine.machine_id])
        try:
            commands = machine.monitoring.get_commands()
        except Exception as exc:
            log.error(exc)
            commands = {}
        monitored_machines_2[machine.id] = {
            "cloud_id":
            machine.cloud.id,
            "machine_id":
            machine.machine_id,
            "installation_status":
            (machine.monitoring.installation_status.as_dict()),
            "commands":
            commands,
        }
        for metric_id in machine.monitoring.metrics:
            if metric_id in custom_metrics:
                machines = custom_metrics[metric_id]["machines"]
                machines.append((machine.cloud.id, machine.machine_id))

    ret = {
        "machines": monitored_machines,
        "monitored_machines": monitored_machines_2,
        "rules": owner.get_rules_dict(),
        "alerts_email": owner.alerts_email,
        "custom_metrics": custom_metrics,
    }
    if config.DEFAULT_MONITORING_METHOD.endswith("graphite"):
        ret.update({
            # Keep for backwards compatibility
            "builtin_metrics": config.GRAPHITE_BUILTIN_METRICS,
            "builtin_metrics_graphite": config.GRAPHITE_BUILTIN_METRICS,
            "builtin_metrics_influxdb": config.INFLUXDB_BUILTIN_METRICS,
        })
    elif config.DEFAULT_MONITORING_METHOD.endswith("influxdb"):
        ret.update({
            # Keep for backwards compatibility
            "builtin_metrics": config.INFLUXDB_BUILTIN_METRICS,
            "builtin_metrics_influxdb": config.INFLUXDB_BUILTIN_METRICS,
        })
    elif config.DEFAULT_MONITORING_METHOD.endswith("tsfdb"):
        ret.update({
            # Keep for backwards compatibility
            "builtin_metrics": {},
            # "builtin_metrics_tsfdb": config.FDB_BUILTIN_METRICS,
        })
    for key in ("rules", "builtin_metrics", "custom_metrics"):
        for id in ret[key]:
            ret[key][id]["id"] = id
    return ret
Esempio n. 22
0
def list_clouds(owner, as_dict=True):
    clouds = Cloud.objects(owner=owner, deleted=None)
    if as_dict:
        return [cloud.as_dict() for cloud in clouds]
    else:
        return clouds
Esempio n. 23
0
def associate_key(request):
    """
    Associate a key to a machine
    Associates a key with a machine. If host is set it will also attempt to
    actually deploy it to the machine. To do that it requires another key
    (existing_key) that can connect to the machine.
    READ permission required on cloud.
    READ_PRIVATE permission required on key.
    ASSOCIATE_KEY permission required on machine.
    ---
    machine:
      in: path
      required: true
      type: string
    key:
      in: path
      required: true
      type: string
    port:
      default: 22
      type: integer
    user:
      description: The ssh user
      type: string
    """
    key_id = request.matchdict['key']
    cloud_id = request.matchdict.get('cloud')

    params = params_from_request(request)
    ssh_user = params.get('user', None)
    try:
        ssh_port = int(request.json_body.get('port', 22))
    except:
        ssh_port = 22

    auth_context = auth_context_from_request(request)
    try:
        key = Key.objects.get(owner=auth_context.owner,
                              id=key_id,
                              deleted=None)
    except Key.DoesNotExist:
        raise NotFoundError('Key id does not exist')
    auth_context.check_perm('key', 'read_private', key.id)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine_uuid']
        try:
            Cloud.objects.get(owner=auth_context.owner,
                              id=cloud_id,
                              deleted=None)
        except Cloud.DoesNotExist:
            raise NotFoundError('Cloud does not exist')

        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine_uuid']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    auth_context.check_perm("machine", "associate_key", machine.id)

    key.ctl.associate(machine, username=ssh_user, port=ssh_port)
    clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               key_associations__keypair__exact=key)

    assoc_machines = transform_key_machine_associations(machines, key)
    return assoc_machines
Esempio n. 24
0
 def owner_query(self):
     return me.Q(cloud__in=Cloud.objects(owner=self.owner).only('id'))
Esempio n. 25
0
def add_cloud(request):
    """
    Tags: clouds
    ---
    Adds a new cloud and returns the cloud's id.
    ADD permission required on cloud.
    ---
    api_key:
      type: string
      description: Required for Clearcenter
    api_secret:
      type: string
    apikey:
      type: string
      description: Required for Ec2, Hostvirtual, Linode, \
      EquinixMetal, Rackspace, OnApp, SoftLayer, Vultr
    apisecret:
      type: string
      description: Required for Ec2
    apiurl:
      type: string
    auth_password:
      description: Optional for Docker
      type: string
    auth_url:
      type: string
      description: Required for OpenStack
    auth_user:
      description: Optional for Docker
      type: string
    authentication:
      description: Required for Docker
      enum:
      - tls
      - basic
    ca_cert_file:
      type: string
      description: Optional for Docker
    cert_file:
      type: string
      description: Optional for Docker
    certificate:
      type: string
      description: Required for Azure
    compute_endpoint:
      type: string
      description: Optional for OpenStack
    dns_enabled:
      type: boolean
    docker_host:
      description: Required for Docker
    docker_port:
      type: string
    domain:
      type: string
      description: Optional for OpenStack
    host:
      type: string
      description: Required for OnApp, Vcloud, vSphere
    images_location:
      type: string
      description: Required for KVM
    key:
      type: string
      description: Required for Azure_arm
    key_file:
      type: string
      description: Optional for Docker
    machine_hostname:
      type: string
      description: Required for KVM
    machine_key:
      type: string
      description: Id of the key. Required for KVM
    machine_port:
      type: string
    machine_user:
      type: string
      description: Required for KVM
    organization:
      type: string
      description: Required for Vcloud
    password:
      type: string
      description: Required for OpenStack, Vcloud, vSphere
    port:
      type: integer
      description: Required for Vcloud
    private_key:
      type: string
      description: Required for GCE
    project_id:
      type: string
      description: Required for GCE. Optional for EquinixMetal
    provider:
      description: The cloud provider.
      required: True
      enum:
      - vcloud
      - bare_metal
      - docker
      - libvirt
      - openstack
      - vsphere
      - ec2
      - rackspace
      - digitalocean
      - softlayer
      - gce
      - azure
      - azure_arm
      - linode
      - onapp
      - hostvirtual
      - vultr
      - aliyun_ecs
      required: true
      type: string
    region:
      type: string
      description: Required for Ec2, Alibaba, Rackspace. Optional for Openstack
    remove_on_error:
      type: string
    secret:
      type: string
      description: Required for Azure_arm
    show_all:
      type: boolean
      description: Show stopped containers. Required for Docker.
    ssh_port:
      type: integer
      description: Required for KVM
    subscription_id:
      type: string
      description: Required for Azure, Azure_arm
    tenant_id:
      type: string
      description: Required for Azure_arm
    tenant_name:
      type: string
      description: Required for OpenStack
    title:
      description: The human readable title of the cloud.
      type: string
      required: True
    token:
      type: string
      description: Required for Digitalocean
    username:
      type: string
      description: Required for Rackspace, OnApp, \
      SoftLayer, OpenStack, Vcloud, vSphere
    """
    auth_context = auth_context_from_request(request)
    cloud_tags, _ = auth_context.check_perm("cloud", "add", None)
    owner = auth_context.owner
    params = params_from_request(request)
    # remove spaces from start/end of string fields that are often included
    # when pasting keys, preventing thus succesfull connection with the
    # cloud
    for key in list(params.keys()):
        if type(params[key]) in [str, str]:
            params[key] = params[key].rstrip().lstrip()

    # api_version = request.headers.get('Api-Version', 1)
    title = params.get('title', '')
    provider = params.get('provider', '')

    if not provider:
        raise RequiredParameterMissingError('provider')

    monitoring = None
    result = add_cloud_v_2(owner, title, provider, params)
    cloud_id = result['cloud_id']
    monitoring = result.get('monitoring')
    errors = result.get('errors')

    cloud = Cloud.objects.get(owner=owner, id=cloud_id)

    if cloud_tags:
        add_tags_to_resource(owner, cloud, list(cloud_tags.items()))

    # Set ownership.
    cloud.assign_to(auth_context.user)

    trigger_session_update(owner.id, ['clouds'])

    # SEC
    # Update the RBAC & User/Ownership mappings with the new Cloud and finally
    # trigger a session update by registering it as a chained task.
    if config.HAS_RBAC:
        owner.mapper.update(cloud,
                            callback=async_session_update,
                            args=(
                                owner.id,
                                ['clouds'],
                            ))

    c_count = Cloud.objects(owner=owner, deleted=None).count()
    ret = cloud.as_dict()
    ret['index'] = c_count - 1
    if errors:
        ret['errors'] = errors
    if monitoring:
        ret['monitoring'] = monitoring

    return ret
Esempio n. 26
0
    def list_clouds(self):
        self.update_poller()
        self.send('list_clouds', filter_list_clouds(self.auth_context))
        clouds = Cloud.objects(owner=self.owner, enabled=True, deleted=None)
        periodic_tasks = []
        for cloud in clouds:
            self.internal_request(
                'api/v1/clouds/%s/machines' % cloud.id,
                params={'cached': True},
                callback=lambda machines, cloud_id=cloud.id: self.send(
                    'list_machines', {
                        'cloud_id': cloud_id,
                        'machines': machines
                    }),
            )
            self.internal_request(
                'api/v1/clouds/%s/locations' % cloud.id,
                params={'cached': True},
                callback=lambda locations, cloud_id=cloud.id: self.send(
                    'list_locations', {
                        'cloud_id': cloud_id,
                        'locations': locations
                    }),
            )
            self.internal_request(
                'api/v1/clouds/%s/sizes' % cloud.id,
                params={'cached': True},
                callback=lambda sizes, cloud_id=cloud.id: self.send(
                    'list_sizes', {
                        'cloud_id': cloud_id,
                        'sizes': sizes
                    }),
            )
            self.internal_request(
                'api/v1/clouds/%s/networks' % cloud.id,
                params={'cached': True},
                callback=lambda networks, cloud_id=cloud.id: self.send(
                    'list_networks', {
                        'cloud_id': cloud_id,
                        'networks': networks
                    }),
            )

        periodic_tasks.extend([
            ('list_images', tasks.ListImages()),
            ('list_zones', tasks.ListZones()),
            ('list_resource_groups', tasks.ListResourceGroups()),
            ('list_storage_accounts', tasks.ListStorageAccounts()),
            ('list_projects', tasks.ListProjects())
        ])
        for key, task in periodic_tasks:
            for cloud in clouds:
                # Avoid submitting new celery tasks, when it's certain that
                # they will exit immediately without performing any actions.
                if not maybe_submit_cloud_task(cloud, key):
                    continue
                cached = task.smart_delay(self.owner.id, cloud.id)
                if cached is not None:
                    log.info("Emitting %s from cache", key)
                    if key == 'list_machines':
                        cached['machines'] = filter_list_machines(
                            self.auth_context, **cached)
                        if cached['machines'] is None:
                            continue
                    elif key == 'list_zones':
                        cached = filter_list_zones(self.auth_context, cloud.id,
                                                   cached['zones'])
                        if cached is None:
                            continue
                    elif key == 'list_networks':
                        cached['networks'] = filter_list_networks(
                            self.auth_context, **cached)
                        if not (cached['networks']['public']
                                or cached['networks']['private']):
                            continue
                    self.send(key, cached)
Esempio n. 27
0
def list_clouds(owner):
    return [
        cloud.as_dict() for cloud in Cloud.objects(owner=owner, deleted=None)
    ]
Esempio n. 28
0
 def count_mon_machines(self):
     from mist.api.clouds.models import Cloud
     from mist.api.machines.models import Machine
     clouds = Cloud.objects(owner=self, deleted=None)
     return Machine.objects(cloud__in=clouds,
                            monitoring__hasmonitoring=True).count()
Esempio n. 29
0
    def process_update(self, ch, method, properties, body):
        routing_key = method.routing_key
        try:
            result = json.loads(body)
        except:
            result = body
        log.info("Got %s", routing_key)
        if routing_key in set([
                'notify', 'probe', 'list_sizes', 'list_images',
                'list_networks', 'list_machines', 'list_zones',
                'list_locations', 'list_projects', 'ping',
                'list_resource_groups', 'list_storage_accounts'
        ]):
            if routing_key == 'list_machines':
                # probe newly discovered running machines
                machines = result['machines']
                cloud_id = result['cloud_id']
                filtered_machines = filter_list_machines(
                    self.auth_context, cloud_id, machines)
                if filtered_machines is not None:
                    self.send(routing_key, {
                        'cloud_id': cloud_id,
                        'machines': filtered_machines
                    })
                # update cloud machine count in multi-user setups
                cloud = Cloud.objects.get(owner=self.owner,
                                          id=cloud_id,
                                          deleted=None)
                for machine in machines:
                    bmid = (cloud_id, machine['machine_id'])
                    if bmid in self.running_machines:
                        # machine was running
                        if machine['state'] != 'running':
                            # machine no longer running
                            self.running_machines.remove(bmid)
                        continue
                    if machine['state'] != 'running':
                        # machine not running
                        continue
                    # machine just started running
                    self.running_machines.add(bmid)

                    ips = filter(lambda ip: ':' not in ip,
                                 machine.get('public_ips', []))
                    if not ips:
                        # if not public IPs, search for private IPs, otherwise
                        # continue iterating over the list of machines
                        ips = filter(lambda ip: ':' not in ip,
                                     machine.get('private_ips', []))
                        if not ips:
                            continue
            elif routing_key == 'list_zones':
                zones = result['zones']
                cloud_id = result['cloud_id']
                filtered_zones = filter_list_zones(self.auth_context, cloud_id,
                                                   zones)
                self.send(routing_key, filtered_zones)
            elif routing_key == 'list_networks':
                networks = result['networks']
                cloud_id = result['cloud_id']
                filtered_networks = filter_list_networks(
                    self.auth_context, cloud_id, networks)
                self.send(routing_key, {
                    'cloud_id': cloud_id,
                    'networks': filtered_networks
                })
            else:
                self.send(routing_key, result)

        elif routing_key == 'update':
            self.owner.reload()
            sections = result
            if 'clouds' in sections:
                self.list_clouds()
            if 'keys' in sections:
                self.list_keys()
            if 'scripts' in sections:
                self.list_scripts()
            if 'schedules' in sections:
                self.list_schedules()
            if 'zones' in sections:
                task = tasks.ListZones()
                clouds = Cloud.objects(owner=self.owner,
                                       enabled=True,
                                       deleted=None)
                for cloud in clouds:
                    if cloud.dns_enabled:
                        task.smart_delay(self.owner.id, cloud.id)
            if 'templates' in sections:
                self.list_templates()
            if 'stacks' in sections:
                self.list_stacks()
            if 'tunnels' in sections:
                self.list_tunnels()
            if 'notifications' in sections:
                self.update_notifications()
            if 'monitoring' in sections:
                self.check_monitoring()
            if 'user' in sections:
                self.auth_context.user.reload()
                self.update_user()
            if 'org' in sections:
                self.auth_context.org.reload()
                self.update_org()

        elif routing_key == 'patch_notifications':
            if result.get('user') == self.user.id:
                self.send('patch_notifications', result)

        elif routing_key == 'patch_machines':
            cloud_id = result['cloud_id']
            patch = result['patch']
            machine_ids = []
            for line in patch:
                machine_id, line['path'] = line['path'][1:].split('-', 1)
                machine_ids.append(machine_id)
            if not self.auth_context.is_owner():
                allowed_machine_ids = filter_machine_ids(
                    self.auth_context, cloud_id, machine_ids)
            else:
                allowed_machine_ids = machine_ids
            patch = [
                line for line, m_id in zip(patch, machine_ids)
                if m_id in allowed_machine_ids
            ]
            for line in patch:
                line['path'] = '/clouds/%s/machines/%s' % (cloud_id,
                                                           line['path'])
            if patch:
                self.batch.extend(patch)

        elif routing_key in [
                'patch_locations', 'patch_sizes', 'patch_networks'
        ]:
            cloud_id = result['cloud_id']
            patch = result['patch']
            for line in patch:
                _id = line['path'][1:]
                if routing_key == 'patch_locations':
                    line['path'] = '/clouds/%s/locations/%s' % (cloud_id, _id)
                elif routing_key == 'patch_sizes':
                    line['path'] = '/clouds/%s/sizes/%s' % (cloud_id, _id)
                elif routing_key == 'patch_networks':
                    line['path'] = '/clouds/%s/networks/%s' % (cloud_id, _id)
            if patch:
                self.batch.extend(patch)
Esempio n. 30
0
def add_cloud(request):
    """
    Add a new cloud
    Adds a new cloud to the user and returns the cloud_id
    ADD permission required on cloud.

    ---
    api_key:
      type: string
    api_secret:
      type: string
    apiurl:
      type: string
    docker_port:
      type: string
    machine_key:
      type: string
    machine_port:
      type: string
    machine_user:
      type: string
    provider:
      description: The id of the cloud provider.
      enum:
      - vcloud
      - bare_metal
      - docker
      - libvirt
      - openstack
      - vsphere
      - ec2
      - rackspace
      - nephoscale
      - digitalocean
      - softlayer
      - gce
      - azure
      - azure_arm
      - linode
      - onapp
      - hostvirtual
      - vultr
      required: true
      type: string
    remove_on_error:
      type: string
    tenant_name:
      type: string
    title:
      description: The human readable title of the cloud.
      required: true
      type: string
    """
    auth_context = auth_context_from_request(request)
    cloud_tags = auth_context.check_perm("cloud", "add", None)
    owner = auth_context.owner
    params = params_from_request(request)
    # remove spaces from start/end of string fields that are often included
    # when pasting keys, preventing thus succesfull connection with the
    # cloud
    for key in params.keys():
        if type(params[key]) in [unicode, str]:
            params[key] = params[key].rstrip().lstrip()

    # api_version = request.headers.get('Api-Version', 1)
    title = params.get('title', '')
    provider = params.get('provider', '')

    if not provider:
        raise RequiredParameterMissingError('provider')

    monitoring = None
    ret = add_cloud_v_2(owner, title, provider, params)

    cloud_id = ret['cloud_id']
    monitoring = ret.get('monitoring')

    cloud = Cloud.objects.get(owner=owner, id=cloud_id)

    # If insights enabled on org, set poller with half hour period.
    if auth_context.org.insights_enabled:
        cloud.ctl.set_polling_interval(1800)

    if cloud_tags:
        add_tags_to_resource(owner, cloud, cloud_tags.items())

    c_count = Cloud.objects(owner=owner, deleted=None).count()
    ret = cloud.as_dict()
    ret['index'] = c_count - 1
    if monitoring:
        ret['monitoring'] = monitoring
    return ret