コード例 #1
0
ファイル: base.py プロジェクト: hb407033/mist.api
 def disable(self):
     self.cloud.enabled = False
     self.cloud.save()
     # FIXME: Circular dependency.
     from mist.api.machines.models import Machine
     Machine.objects(
         cloud=self.cloud,
         missing_since=None).update(missing_since=datetime.datetime.now())
コード例 #2
0
    def add(self, fail_on_error=True, fail_on_invalid_params=True, **kwargs):
        """Add new Cloud to the database

        This is only expected to be called by `Cloud.add` classmethod to create
        a cloud. Fields `owner` and `title` are already populated in
        `self.cloud`. The `self.cloud` model is not yet saved.

        Params:
        fail_on_error: If True, then a connection to the cloud will be
            established and if it fails, a `CloudUnavailableError` or
            `CloudUnauthorizedError` will be raised and the cloud will be
            deleted.
        fail_on_invalid_params: If True, then invalid keys in `kwargs` will
            raise an Error.

        Subclasses SHOULD NOT override or extend this method.

        If a subclass has to perform special parsing of `kwargs`, it can
        override `self._add__preparse_kwargs`.

        """
        # Transform params with extra underscores for compatibility.
        rename_kwargs(kwargs, 'api_key', 'apikey')
        rename_kwargs(kwargs, 'api_secret', 'apisecret')

        # Cloud specific argument preparsing cloud-wide argument
        self.cloud.dns_enabled = kwargs.pop('dns_enabled', False) is True
        self.cloud.observation_logs_enabled = True
        self.cloud.polling_interval = kwargs.pop('polling_interval', 30 * 60)

        # Cloud specific kwargs preparsing.
        try:
            self._add__preparse_kwargs(kwargs)
        except MistError as exc:
            log.error("Error while adding cloud %s: %r", self.cloud, exc)
            raise
        except Exception as exc:
            log.exception("Error while preparsing kwargs on add %s",
                          self.cloud)
            raise InternalServerError(exc=exc)

        try:
            self.update(fail_on_error=fail_on_error,
                        fail_on_invalid_params=fail_on_invalid_params,
                        **kwargs)
        except (CloudUnavailableError, CloudUnauthorizedError) as exc:
            # FIXME: Move this to top of the file once Machine model is
            # migrated.  The import statement is currently here to avoid
            # circular import issues.
            from mist.api.machines.models import Machine
            # Remove any machines created from check_connection performing a
            # list_machines.
            Machine.objects(cloud=self.cloud).delete()
            # Propagate original error.
            raise

        # Add relevant polling schedules.
        self.add_polling_schedules()
コード例 #3
0
    def delete(self, expire=False):
        """Delete a Cloud.

        By default the corresponding mongodb document is not actually deleted,
        but rather marked as deleted.

        :param expire: if True, the document is expired from its collection.
        """
        self.cloud.deleted = datetime.datetime.utcnow()
        self.cloud.save()
        if expire:
            # FIXME: Circular dependency.
            from mist.api.machines.models import Machine
            Machine.objects(cloud=self.cloud).delete()
            self.cloud.delete()
コード例 #4
0
ファイル: methods.py プロジェクト: ghoul008/mist.api
def get_load(owner, start='', stop='', step='', uuids=None):
    """Get shortterm load for all monitored machines."""
    clouds = Cloud.objects(owner=owner, deleted=None).only('id')
    machines = Machine.objects(cloud__in=clouds,
                               monitoring__hasmonitoring=True)
    if uuids:
        machines.filter(id__in=uuids)

    graphite_uuids = [machine.id for machine in machines
                      if machine.monitoring.method.endswith('-graphite')]
    influx_uuids = [machine.id for machine in machines
                    if machine.monitoring.method.endswith('-influxdb')]

    graphite_data = {}
    influx_data = {}
    if graphite_uuids:
        graphite_data = graphite_get_load(owner, start=start, stop=stop,
                                          step=step, uuids=graphite_uuids)
    if influx_uuids:
        # Transform "min" and "sec" to "m" and "s", respectively.
        _start, _stop, _step = [re.sub('in|ec', repl='', string=x) for x in (
            start.strip('-'), stop.strip('-'), step)]
        influx_data = InfluxMultiLoadHandler(influx_uuids).get_stats(
            metric='system.load1',
            start=_start, stop=_stop, step=_step,
        )

    if graphite_data or influx_data:
        return dict(list(graphite_data.items()) + list(influx_data.items()))
    else:
        raise NotFoundError('No machine has monitoring enabled')
コード例 #5
0
ファイル: methods.py プロジェクト: lovelife100/mist.api
def list_keys(owner):
    """List owner's keys
    :param owner:
    :return:
    """
    keys = Key.objects(owner=owner, deleted=None)
    clouds = Cloud.objects(owner=owner, deleted=None)
    key_objects = []
    # FIXME: This must be taken care of in Keys.as_dict
    for key in keys:
        key_object = {}
        # FIXME: Need to optimize this! It's potentially invoked per ssh probe.
        # Can't we expose associations directly from Machine.key_associations?
        machines = Machine.objects(cloud__in=clouds,
                                   key_associations__keypair__exact=key)
        key_object["id"] = key.id
        key_object['name'] = key.name
        key_object['owned_by'] = key.owned_by.id if key.owned_by else ''
        key_object['created_by'] = key.created_by.id if key.created_by else ''
        key_object["isDefault"] = key.default
        key_object["machines"] = transform_key_machine_associations(
            machines, key)
        key_object['tags'] = get_tags_for_resource(owner, key)
        key_objects.append(key_object)
    return key_objects
コード例 #6
0
def set_missing():
    """Declare machines, whose cloud has been marked as deleted, as missing"""
    failed = succeeded = 0
    clouds = Cloud.objects(deleted__ne=None)

    print
    print 'Searching through %d clouds' % clouds.count()
    print

    for c in clouds:
        try:
            print 'Updating machines of', c,
            updated = Machine.objects(
                cloud=c, missing_since=None).update(missing_since=c.deleted)
        except Exception:
            print '[ERROR]'
            traceback.print_exc()
            failed += 1
        else:
            print '[OK:%s]' % updated
            succeeded += 1

    print
    print 'Failed:', failed
    print 'Succeeded:', succeeded
    print
    print 'Completed %s' % ('with errors!' if failed else 'successfully!')
    print
コード例 #7
0
def _get_multimachine_stats(owner, metric, start='', stop='', step='',
                            uuids=None):
    if not uuids:
        uuids = [machine.id for machine in Machine.objects(
            cloud__in=Cloud.objects(owner=owner, deleted=None),
            monitoring__hasmonitoring=True
        )]
    if not uuids:
        raise NotFoundError("No machine has monitoring enabled.")
    try:
        data = get_multi_uuid(uuids, metric, start=start, stop=stop,
                              interval_str=step)
    except Exception as exc:
        log.error("Error getting %s: %r", metric, exc)
        raise ServiceUnavailableError()
    ret = {}
    for item in data:
        target = item['target'].split('.')
        if len(target) > 1:
            uuid = target[1]
        else:
            uuid = target[0]
        item['name'] = uuid
        ret[uuid] = item
    return ret
コード例 #8
0
ファイル: controllers.py プロジェクト: dan-sullivan/mist.api
 def _list_vnfs(self, host=None):
     from mist.api.machines.models import Machine
     from mist.api.clouds.models import CloudLocation
     if not host:
         hosts = Machine.objects(
             cloud=self.cloud, parent=None, missing_since=None)
     else:
         hosts = [host]
     vnfs = []
     for host in hosts:  # TODO: asyncio
         driver = self.cloud.ctl.compute._get_host_driver(host)
         host_vnfs = driver.ex_list_vnfs()
         try:
             location = CloudLocation.objects.get(cloud=self.cloud,
                                                  name=host.name)
         except CloudLocation.DoesNotExist:
             host_name = host.name.replace('.', '-')
             try:
                 location = CloudLocation.objects.get(cloud=self.cloud,
                                                      external_id=host_name)
             except CloudLocation.DoesNotExist:
                 location = None
         except Exception as e:
             log.error(e)
             location = None
         for vnf in host_vnfs:
             vnf['location'] = location.id
         vnfs += host_vnfs
     return vnfs
コード例 #9
0
def remove_string_field_type():
    c = MongoClient(MONGO_URI)
    db = c.get_database('mist2')
    db_machines = db['machines']

    machines = Machine.objects().only('id')

    print
    print 'Removing size field from %d migrated machines' % db_machines.count()
    print

    failed = migrated = 0

    for machine in machines:
        try:
            print 'Updating machine %s ...' % machine['id'],
            db_machines.update_one({'_id': machine['id']},
                                   {'$unset': {
                                       'size': ''
                                   }})
        except Exception:
            traceback.print_exc()
            failed += 1
            continue
        else:
            print 'OK'
            migrated += 1
            print 'migrated: %d' % migrated

    c.close()
コード例 #10
0
ファイル: views.py プロジェクト: dzaporozhets/mist-api
def disassociate_key(request):
    """
    Disassociate a key from a machine
    Disassociates a key from a machine. If host is set it will also attempt to
    actually remove it from the machine.
    READ permission required on cloud.
    DISASSOCIATE_KEY permission required on machine.
    ---
    key:
      in: path
      required: true
      type: string
    machine:
      in: path
      required: true
      type: string
    """
    key_id = request.matchdict['key']
    cloud_id = request.matchdict.get('cloud')
    auth_context = auth_context_from_request(request)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine']
        try:
            Cloud.objects.get(owner=auth_context.owner,
                              id=cloud_id,
                              deleted=None)
        except Cloud.DoesNotExist:
            raise NotFoundError('Cloud does not exist')

        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    auth_context.check_perm("machine", "disassociate_key", machine.id)

    key = Key.objects.get(owner=auth_context.owner, id=key_id, deleted=None)
    key.ctl.disassociate(machine)
    clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               key_associations__keypair__exact=key)

    assoc_machines = transform_key_machine_associations(machines, key)
    return assoc_machines
コード例 #11
0
def _gen_config():
    """Generate traefik config from scratch for all machines"""
    cfg = {'frontends': {}, 'backends': {}}
    for machine in Machine.objects(monitoring__hasmonitoring=True, ):
        frontend, backend = _gen_machine_config(machine)
        cfg['frontends'][machine.id] = frontend
        cfg['backends'][machine.id] = backend
    return cfg
コード例 #12
0
def get_load(owner, start="", stop="", step="", uuids=None):
    """Get shortterm load for all monitored machines."""
    clouds = Cloud.objects(owner=owner, deleted=None).only("id")
    machines = Machine.objects(cloud__in=clouds,
                               monitoring__hasmonitoring=True)
    if uuids:
        machines.filter(id__in=uuids)

    graphite_uuids = [
        machine.id for machine in machines
        if machine.monitoring.method.endswith("-graphite")
    ]
    influx_uuids = [
        machine.id for machine in machines
        if machine.monitoring.method.endswith("-influxdb")
    ]
    fdb_uuids = [
        machine.id for machine in machines
        if machine.monitoring.method.endswith("-tsfdb")
    ]

    graphite_data = {}
    influx_data = {}
    fdb_data = {}

    if graphite_uuids:
        graphite_data = graphite_get_load(owner,
                                          start=start,
                                          stop=stop,
                                          step=step,
                                          uuids=graphite_uuids)
    if influx_uuids:

        # Transform "min" and "sec" to "m" and "s", respectively.
        _start, _stop, _step = [
            re.sub("in|ec", repl="", string=x)
            for x in (start.strip("-"), stop.strip("-"), step)
        ]
        metric = "system.load1"
        if step:
            metric = "MEAN(%s)" % metric
        influx_data = InfluxMultiLoadHandler(influx_uuids).get_stats(
            metric=metric,
            start=_start,
            stop=_stop,
            step=_step,
        )

    if fdb_uuids:
        fdb_data = fdb_get_load(owner, fdb_uuids, start, stop, step)

    if graphite_data or influx_data or fdb_data:
        return dict(
            list(graphite_data.items()) + list(influx_data.items()) +
            list(fdb_data.items()))
    else:
        raise NotFoundError("No machine has monitoring enabled")
コード例 #13
0
 def _list_networks__fetch_networks(self):
     from mist.api.machines.models import Machine
     hosts = Machine.objects(cloud=self.cloud,
                             parent=None,
                             missing_since=None)
     loop = asyncio.get_event_loop()
     all_nets = loop.run_until_complete(
         self.list_networks_all_hosts(hosts, loop))
     return [net for host_nets in all_nets for net in host_nets]
コード例 #14
0
def push_metering_info(owner_id):
    """Collect and push new metering data to InfluxDB"""
    now = datetime.datetime.utcnow()
    metering = {}

    # Base InfluxDB URL.
    url = config.INFLUX['host']

    # Create database for storing metering data, if missing.
    db = requests.post('%s/query?q=CREATE DATABASE metering' % url)
    if not db.ok:
        raise Exception(db.content)

    # CPUs
    for machine in Machine.objects(owner=owner_id, last_seen__gte=now.date()):
        metering.setdefault(
            owner_id,
            dict.fromkeys(('cores', 'checks', 'datapoints'), 0)
        )
        try:
            if _skip_metering(machine):
                continue
            metering[owner_id]['cores'] += machine.cores or 0
        except Exception as exc:
            log.error('Failed upon cores metering of %s: %r', machine.id, exc)

    # Checks
    for rule in Rule.objects(owner_id=owner_id):
        try:
            metering[rule.owner_id]['checks'] += rule.total_check_count
        except Exception as exc:
            log.error('Failed upon checks metering of %s: %r', rule.id, exc)

    # Datapoints
    try:
        q = "SELECT MAX(counter) FROM datapoints "
        q += "WHERE owner = '%s' AND time >= now() - 30m" % owner_id
        q += " GROUP BY machine"
        result = requests.get('%s/query?db=metering&q=%s' % (url, q)).json()
        result = result['results'][0]['series']
        for series in result:
            metering[owner_id]['datapoints'] += series['values'][0][-1]
    except Exception as exc:
        log.error('Failed upon datapoints metering: %r', exc)

    # Assemble points.
    points = []
    for owner, counters in metering.iteritems():
        value = ','.join(['%s=%s' % (k, v) for k, v in counters.iteritems()])
        point = 'usage,owner=%s %s' % (owner, value)
        points.append(point)

    # Write metering data.
    data = '\n'.join(points)
    write = requests.post('%s/write?db=metering&precision=s' % url, data=data)
    if not write.ok:
        log.error('Failed to write metering data: %s', write.text)
コード例 #15
0
def check_monitoring(owner):
    """Return the monitored machines, enabled metrics, and user details."""

    custom_metrics = owner.get_metrics_dict()
    for metric in custom_metrics.values():
        metric['machines'] = []

    monitored_machines = []
    monitored_machines_2 = {}

    clouds = Cloud.objects(owner=owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               monitoring__hasmonitoring=True)

    for machine in machines:
        monitored_machines.append([machine.cloud.id, machine.machine_id])
        try:
            commands = machine.monitoring.get_commands()
        except Exception as exc:
            log.error(exc)
            commands = {}
        monitored_machines_2[machine.id] = {
            'cloud_id': machine.cloud.id,
            'machine_id': machine.machine_id,
            'installation_status':
            machine.monitoring.installation_status.as_dict(),
            'commands': commands,
        }
        for metric_id in machine.monitoring.metrics:
            if metric_id in custom_metrics:
                machines = custom_metrics[metric_id]['machines']
                machines.append((machine.cloud.id, machine.machine_id))

    ret = {
        'machines': monitored_machines,
        'monitored_machines': monitored_machines_2,
        'rules': owner.get_rules_dict(),
        'alerts_email': owner.alerts_email,
        'custom_metrics': custom_metrics,
    }
    if config.DEFAULT_MONITORING_METHOD.endswith('graphite'):
        ret.update({
            # Keep for backwards compatibility
            'builtin_metrics': config.GRAPHITE_BUILTIN_METRICS,
            'builtin_metrics_graphite': config.GRAPHITE_BUILTIN_METRICS,
            'builtin_metrics_influxdb': config.INFLUXDB_BUILTIN_METRICS,
        })
    elif config.DEFAULT_MONITORING_METHOD.endswith('influxdb'):
        ret.update({
            # Keep for backwards compatibility
            'builtin_metrics': config.INFLUXDB_BUILTIN_METRICS,
            'builtin_metrics_influxdb': config.INFLUXDB_BUILTIN_METRICS,
        })
    for key in ('rules', 'builtin_metrics', 'custom_metrics'):
        for id in ret[key]:
            ret[key][id]['id'] = id
    return ret
コード例 #16
0
ファイル: base.py プロジェクト: dan-sullivan/mist.api
    def delete(self, expire=False):
        """Delete a Cloud.

        By default the corresponding mongodb document is not actually deleted,
        but rather marked as deleted.

        :param expire: if True, the document is expired from its collection.

        """
        if expire:
            # FIXME: Set reverse_delete_rule=me.CASCADE?
            from mist.api.machines.models import Machine
            Machine.objects(cloud=self.cloud).delete()
            self.cloud.delete()
        else:
            from mist.api.tasks import set_missing_since
            self.cloud.deleted = datetime.datetime.utcnow()
            self.cloud.save()
            set_missing_since.apply_async((self.cloud.id, ), countdown=30)
コード例 #17
0
ファイル: views.py プロジェクト: lovelife100/mist.api
def add_key(request):
    """
    Tags: keys
    ---
    Adds key.
    ADD permission required on key.
    ---
    name:
      description: The key's name
      required: true
      type: string
    priv:
      description: The private key
      required: true
      type: string
    certificate:
      description: The signed public key, when using signed ssh keys
      type: string
    """
    params = params_from_request(request)
    key_name = params.pop('name', None)
    private_key = params.get('priv', None)
    certificate = params.get('certificate', None)
    auth_context = auth_context_from_request(request)
    key_tags = auth_context.check_perm("key", "add", None)

    if not key_name:
        raise BadRequestError("Key name is not provided")
    if not private_key:
        raise RequiredParameterMissingError("Private key is not provided")

    if certificate:
        key = SignedSSHKey.add(auth_context.owner, key_name, **params)
    else:
        key = SSHKey.add(auth_context.owner, key_name, **params)

    # Set ownership.
    key.assign_to(auth_context.user)

    if key_tags:
        add_tags_to_resource(auth_context.owner, key, key_tags.items())
    # since its a new key machines fields should be an empty list

    clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               key_associations__keypair__exact=key)

    assoc_machines = transform_key_machine_associations(machines, key)

    return {
        'id': key.id,
        'name': key.name,
        'machines': assoc_machines,
        'isDefault': key.default
    }
コード例 #18
0
def get_load(request):
    """
    Tags: monitoring
    ---
    Request load data for all monitored machines
    ---
    start:
      in: query
      type: string
      default: now
      required: false
      description: time (eg. '10s') since when to fetch stats
    stop:
      in: query
      type: string
      required: false
      description: time until when to fetch stats
    step:
      in: query
      type: string
      required: false
      description: step to fetch stats, used in aggregations
    request_id:
      in: query
      type: string
      required: false

    """
    auth_context = auth_context_from_request(request)
    cloud_ids = [
        cloud['id'] for cloud in filter_list_clouds(auth_context)
        if cloud['enabled']
    ]
    uuids = [
        machine.id for machine in Machine.objects(
            cloud__in=cloud_ids,
            monitoring__hasmonitoring=True,
        ).only('id')
    ]
    if not auth_context.is_owner():
        allowed_uuids = auth_context.get_allowed_resources(rtype='machines')
        uuids = set(uuids) & set(allowed_uuids)

    params = params_from_request(request)
    start = params.get('start', '')
    stop = params.get('stop', '')
    step = params.get('step', '')
    data = mist.api.monitoring.methods.get_load(auth_context.owner,
                                                start=start,
                                                stop=stop,
                                                step=step,
                                                uuids=uuids)
    data['request_id'] = params.get('request_id')
    return data
コード例 #19
0
ファイル: traefik.py プロジェクト: lovelife100/mist.api
def _gen_config():
    """Generate traefik config from scratch for all machines"""
    cfg = {'frontends': {}, 'backends': {}}
    for machine in Machine.objects(
            monitoring__hasmonitoring=True,
            monitoring__method__in=['telegraf-graphite', 'telegraf-influxdb'],
    ):
        frontend, backend = _gen_machine_config(machine)
        cfg['frontends'][machine.id] = frontend
        cfg['backends'][machine.id] = backend
    return cfg
コード例 #20
0
ファイル: base.py プロジェクト: hb407033/mist.api
    def list_cached_machines(self, timedelta=datetime.timedelta(days=1)):
        """Return list of machines from database

        Only returns machines that existed last time we check and we've seen
        during the last `timedelta`.

        """
        return Machine.objects(
            cloud=self.cloud,
            missing_since=None,
            last_seen__gt=datetime.datetime.utcnow() - timedelta,
        )
コード例 #21
0
def migrate_libvirt_clouds():
    c = MongoClient(MONGO_URI)
    db = c.get_database('mist2')
    db_clouds = db['clouds']

    clouds = LibvirtCloud.objects()

    failed = migrated = skipped = 0

    for cloud in clouds:
        try:
            machines = Machine.objects(cloud=cloud, missing_since=None)
            images_location = db_clouds.find_one(
                {'_id': cloud['id']}).get('images_location')
            if not images_location:
                skipped += 1
                continue
            print('Updating cloud ' + cloud['id'])
            for machine in machines:
                if machine.extra.get('tags', {}).get('type') == 'hypervisor':
                    updated_extra = {
                        'images_location': images_location,
                    }
                    machine.extra.update(updated_extra)
                    machine.save()
                    break

            db_clouds.update_one(
                {'_id': cloud['id']},
                {'$unset': {'host': '',
                            'username': '',
                            'port': '',
                            'key': '',
                            'images_location': ''}}
            )
            cloud.ctl.compute.list_machines()
        except Exception:
            traceback.print_exc()
            failed += 1
            continue
        else:
            print('OK')
            migrated += 1

    print('Clouds migrated: %d' % migrated)
    if skipped:
        print('Skipped: %d' % skipped)

    c.close()
コード例 #22
0
ファイル: methods.py プロジェクト: ghoul008/mist.api
def disable_monitoring_cloud(owner, cloud_id, no_ssh=False):
    """Disable monitoring for all machines of the specified Cloud."""
    try:
        cloud = Cloud.objects.get(owner=owner, id=cloud_id, deleted=None)
        machines = Machine.objects(
            cloud=cloud, monitoring__hasmonitoring=True).only('machine_id')
    except me.DoesNotExist:
        raise NotFoundError("Cloud doesn't exist")
    for machine in machines:
        try:
            disable_monitoring(owner, cloud_id, machine.machine_id,
                               no_ssh=no_ssh)
        except Exception as exc:
            log.error("Error while disabling monitoring for all machines of "
                      "Cloud %s (%s): %s", cloud.id, owner.id, exc)
コード例 #23
0
    def list_clouds(self):
        if config.ACTIVATE_POLLER:
            self.update_poller()
        self.send('list_clouds', filter_list_clouds(self.auth_context))
        clouds = Cloud.objects(owner=self.owner, enabled=True, deleted=None)
        log.info(clouds)
        periodic_tasks = []
        if not config.ACTIVATE_POLLER:
            periodic_tasks.append(('list_machines', tasks.ListMachines()))
        else:
            for cloud in clouds:
                after = datetime.datetime.utcnow() - datetime.timedelta(days=1)
                machines = Machine.objects(cloud=cloud, missing_since=None,
                                           last_seen__gt=after)
                machines = filter_list_machines(
                    self.auth_context, cloud_id=cloud.id,
                    machines=[machine.as_dict() for machine in machines]
                )
                if machines:
                    log.info("Emitting list_machines from poller's cache.")
                    self.send('list_machines',
                              {'cloud_id': cloud.id, 'machines': machines})

        periodic_tasks.extend([('list_images', tasks.ListImages()),
                               ('list_sizes', tasks.ListSizes()),
                               ('list_networks', tasks.ListNetworks()),
                               ('list_zones', tasks.ListZones()),
                               ('list_locations', tasks.ListLocations()),
                               ('list_projects', tasks.ListProjects())])
        for key, task in periodic_tasks:
            for cloud in clouds:
                cached = task.smart_delay(self.owner.id, cloud.id)
                if cached is not None:
                    log.info("Emitting %s from cache", key)
                    if key == 'list_machines':
                        cached['machines'] = filter_list_machines(
                            self.auth_context, **cached
                        )
                        if cached['machines'] is None:
                            continue
                    self.send(key, cached)
コード例 #24
0
ファイル: methods.py プロジェクト: dzaporozhets/mist-api
def list_keys(owner):
    """List owner's keys
    :param owner:
    :return:
    """
    keys = Key.objects(owner=owner, deleted=None)
    clouds = Cloud.objects(owner=owner, deleted=None)
    key_objects = []
    # FIXME: This must be taken care of in Keys.as_dict
    for key in keys:
        key_object = {}
        machines = Machine.objects(cloud__in=clouds,
                                   key_associations__keypair__exact=key)
        key_object["id"] = key.id
        key_object['name'] = key.name
        key_object["isDefault"] = key.default
        key_object["machines"] = transform_key_machine_associations(
            machines, key)
        key_object['tags'] = get_tags_for_resource(owner, key)
        key_objects.append(key_object)
    return key_objects
コード例 #25
0
    def add(self, fail_on_error=True, fail_on_invalid_params=True, **kwargs):
        """This is a hack to associate a key with the VM hosting this cloud"""
        super(LibvirtMainController, self).add(
            fail_on_error=fail_on_error,
            fail_on_invalid_params=fail_on_invalid_params,
            add=True, **kwargs
        )
        # FIXME: Don't use self.cloud.host as machine_id, this prevents us from
        # changing the cloud's host.
        # FIXME: Add type field to differentiate between actual vm's and the
        # host.

        try:
            machine = Machine.objects.get(cloud=self.cloud,
                                          machine_id=self.cloud.host)
        except me.DoesNotExist:
            machine = Machine.objects(cloud=self.cloud,
                                      machine_id=self.cloud.host).save()
        if self.cloud.key:
            machine.ctl.associate_key(self.cloud.key,
                                      username=self.cloud.username,
                                      port=self.cloud.port)
コード例 #26
0
ファイル: inventory.py プロジェクト: dzaporozhets/mist-api
    def load(self, machines=None):
        self.hosts = {}
        self.keys = {}
        if not machines:
            clouds = Cloud.objects(owner=self.owner, deleted=None)
            machines = [(machine.cloud.id, machine.machine_id)
                        for machine in Machine.objects(cloud__in=clouds)]
        for bid, mid in machines:
            try:
                name, ip_addr = self.find_machine_details(bid, mid)
                key_id, ssh_user, port = self.find_ssh_settings(bid, mid)
            except Exception as exc:
                print exc
                continue
            ip_addr, port = dnat(self.owner, ip_addr, port)
            if key_id not in self.keys:
                keypair = SSHKey.objects.get(owner=self.owner,
                                             name=key_id,
                                             deleted=None)
                self.keys[key_id] = keypair.private
                if isinstance(keypair, SignedSSHKey):
                    # if signed ssh key, provide the key appending a -cert.pub
                    # on the name since this is how ssh will include it as
                    # an identify file
                    self.keys['%s-cert.pub' % key_id] = keypair.certificate
                    # pub key also needed for openssh 7.2
                    self.keys['%s.pub' % key_id] = keypair.public
            if name in self.hosts:
                num = 2
                while ('%s-%d' % (name, num)) in self.hosts:
                    num += 1
                name = '%s-%d' % (name, num)

            self.hosts[name] = {
                'ansible_ssh_host': ip_addr,
                'ansible_ssh_port': port,
                'ansible_ssh_user': ssh_user,
                'ansible_ssh_private_key_file': 'id_rsa/%s' % key_id,
            }
コード例 #27
0
ファイル: views.py プロジェクト: hb407033/mist.api
def associate_key(request):
    """
    Associate a key to a machine
    Associates a key with a machine. If host is set it will also attempt to
    actually deploy it to the machine. To do that it requires another key
    (existing_key) that can connect to the machine.
    READ permission required on cloud.
    READ_PRIVATE permission required on key.
    ASSOCIATE_KEY permission required on machine.
    ---
    machine:
      in: path
      required: true
      type: string
    key:
      in: path
      required: true
      type: string
    port:
      default: 22
      type: integer
    user:
      description: The ssh user
      type: string
    """
    key_id = request.matchdict['key']
    cloud_id = request.matchdict.get('cloud')

    params = params_from_request(request)
    ssh_user = params.get('user', None)
    try:
        ssh_port = int(request.json_body.get('port', 22))
    except:
        ssh_port = 22

    auth_context = auth_context_from_request(request)
    try:
        key = Key.objects.get(owner=auth_context.owner,
                              id=key_id,
                              deleted=None)
    except Key.DoesNotExist:
        raise NotFoundError('Key id does not exist')
    auth_context.check_perm('key', 'read_private', key.id)

    if cloud_id:
        # this is depracated, keep it for backwards compatibility
        machine_id = request.matchdict['machine_uuid']
        try:
            Cloud.objects.get(owner=auth_context.owner,
                              id=cloud_id,
                              deleted=None)
        except Cloud.DoesNotExist:
            raise NotFoundError('Cloud does not exist')

        auth_context.check_perm("cloud", "read", cloud_id)
        try:
            machine = Machine.objects.get(cloud=cloud_id,
                                          machine_id=machine_id,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_uuid'] = machine.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_id)
    else:
        machine_uuid = request.matchdict['machine_uuid']
        try:
            machine = Machine.objects.get(id=machine_uuid,
                                          state__ne='terminated')
            # used by logging_view_decorator
            request.environ['machine_id'] = machine.machine_id
            request.environ['cloud_id'] = machine.cloud.id
        except Machine.DoesNotExist:
            raise NotFoundError("Machine %s doesn't exist" % machine_uuid)

        cloud_id = machine.cloud.id
        auth_context.check_perm("cloud", "read", cloud_id)

    auth_context.check_perm("machine", "associate_key", machine.id)

    key.ctl.associate(machine, username=ssh_user, port=ssh_port)
    clouds = Cloud.objects(owner=auth_context.owner, deleted=None)
    machines = Machine.objects(cloud__in=clouds,
                               key_associations__keypair__exact=key)

    assoc_machines = transform_key_machine_associations(machines, key)
    return assoc_machines
コード例 #28
0
ファイル: models.py プロジェクト: lovelife100/mist.api
 def count_mon_machines(self):
     from mist.api.clouds.models import Cloud
     from mist.api.machines.models import Machine
     clouds = Cloud.objects(owner=self, deleted=None)
     return Machine.objects(cloud__in=clouds,
                            monitoring__hasmonitoring=True).count()
コード例 #29
0
    def list_machines(self):
        """Return list of machines for cloud

        A list of nodes is fetched from libcloud, the data is processed, stored
        on machine models, and a list of machine models is returned.

        Subclasses SHOULD NOT override or extend this method.

        There are instead a number of methods that are called from this method,
        to allow subclasses to modify the data according to the specific of
        their cloud type. These methods currently are:

            `self._list_machines__fetch_machines`
            `self._list_machines__machine_actions`
            `self._list_machines__postparse_machine`
            `self._list_machines__cost_machine`
            `self._list_machines__fetch_generic_machines`

        Subclasses that require special handling should override these, by
        default, dummy methods.

        """

        # Try to query list of machines from provider API.
        try:
            nodes = self._list_machines__fetch_machines()
            log.info("List nodes returned %d results for %s.",
                     len(nodes), self.cloud)
        except InvalidCredsError as exc:
            log.warning("Invalid creds on running list_nodes on %s: %s",
                        self.cloud, exc)
            raise CloudUnauthorizedError(msg=exc.message)
        except ssl.SSLError as exc:
            log.error("SSLError on running list_nodes on %s: %s",
                      self.cloud, exc)
            raise SSLError(exc=exc)
        except Exception as exc:
            log.exception("Error while running list_nodes on %s", self.cloud)
            raise CloudUnavailableError(exc=exc)

        machines = []
        now = datetime.datetime.utcnow()

        # Process each machine in returned list.
        # Store previously unseen machines separately.
        new_machines = []
        for node in nodes:

            # Fetch machine mongoengine model from db, or initialize one.
            try:
                machine = Machine.objects.get(cloud=self.cloud,
                                              machine_id=node.id)
            except Machine.DoesNotExist:
                machine = Machine(cloud=self.cloud, machine_id=node.id).save()
                new_machines.append(machine)

            # Update machine_model's last_seen fields.
            machine.last_seen = now
            machine.missing_since = None

            # Get misc libcloud metadata.
            image_id = str(node.image or node.extra.get('imageId') or
                           node.extra.get('image_id') or
                           node.extra.get('image') or '')
            size = (node.size or node.extra.get('flavorId') or
                    node.extra.get('instancetype'))

            machine.name = node.name
            machine.image_id = image_id
            machine.size = size
            machine.state = config.STATES[node.state]
            machine.private_ips = node.private_ips
            machine.public_ips = node.public_ips

            # Set machine extra dict.
            # Make sure we don't meet any surprises when we try to json encode
            # later on in the HTTP response.
            extra = self._list_machines__get_machine_extra(machine, node)

            for key, val in extra.items():
                try:
                    json.dumps(val)
                except TypeError:
                    extra[key] = str(val)
            machine.extra = extra

            # Set machine hostname
            if machine.extra.get('dns_name'):
                machine.hostname = machine.extra['dns_name']
            else:
                ips = machine.public_ips + machine.private_ips
                if not ips:
                    ips = []
                for ip in ips:
                    if ip and ':' not in ip:
                        machine.hostname = ip
                        break

            # Get machine tags from db
            tags = {tag.key: tag.value for tag in Tag.objects(
                owner=self.cloud.owner, resource=machine,
            ).only('key', 'value')}

            # Get machine creation date.
            try:
                created = self._list_machines__machine_creation_date(machine,
                                                                     node)
                if created:
                    machine.created = get_datetime(created)
            except Exception as exc:
                log.exception("Error finding creation date for %s in %s.",
                              self.cloud, machine)
            # TODO: Consider if we should fall back to using current date.
            # if not machine_model.created:
            #     machine_model.created = datetime.datetime.utcnow()

            # Update with available machine actions.
            try:
                self._list_machines__machine_actions(machine, node)
            except Exception as exc:
                log.exception("Error while finding machine actions "
                              "for machine %s:%s for %s",
                              machine.id, node.name, self.cloud)

            # Apply any cloud/provider specific post processing.
            try:
                self._list_machines__postparse_machine(machine, node)
            except Exception as exc:
                log.exception("Error while post parsing machine %s:%s for %s",
                              machine.id, node.name, self.cloud)

            # Apply any cloud/provider cost reporting.
            try:
                def parse_num(num):
                    try:
                        return float(num or 0)
                    except (ValueError, TypeError):
                        log.warning("Can't parse %r as float.", num)
                        return 0

                month_days = calendar.monthrange(now.year, now.month)[1]

                cph = parse_num(tags.get('cost_per_hour'))
                cpm = parse_num(tags.get('cost_per_month'))
                if not (cph or cpm) or cph > 100 or cpm > 100 * 24 * 31:
                    cph, cpm = map(parse_num,
                                   self._list_machines__cost_machine(machine,
                                                                     node))
                if not cph:
                    cph = float(cpm) / month_days / 24
                elif not cpm:
                    cpm = cph * 24 * month_days
                machine.cost.hourly = cph
                machine.cost.monthly = cpm

            except Exception as exc:
                log.exception("Error while calculating cost "
                              "for machine %s:%s for %s",
                              machine.id, node.name, self.cloud)
            if node.state.lower() == 'terminated':
                machine.cost.hourly = 0
                machine.cost.monthly = 0

            # Save all changes to machine model on the database.
            try:
                machine.save()
            except me.ValidationError as exc:
                log.error("Error adding %s: %s", machine.name, exc.to_dict())
                raise BadRequestError({"msg": exc.message,
                                       "errors": exc.to_dict()})
            except me.NotUniqueError as exc:
                log.error("Machine %s not unique error: %s", machine.name, exc)
                raise ConflictError("Machine with this name already exists")

            machines.append(machine)

        # Append generic-type machines, which aren't handled by libcloud.
        for machine in self._list_machines__fetch_generic_machines():
            machine.last_seen = now
            machine.missing_since = None
            machine.state = config.STATES[NodeState.UNKNOWN]
            for action in ('start', 'stop', 'reboot', 'destroy', 'rename',
                           'resume', 'suspend', 'undefine'):
                setattr(machine.actions, action, False)
            machine.actions.tag = True
            # allow reboot action for bare metal with key associated
            if machine.key_associations:
                machine.actions.reboot = True
            machine.save()
            machines.append(machine)

        # Set last_seen on machine models we didn't see for the first time now.
        Machine.objects(cloud=self.cloud,
                        id__nin=[m.id for m in machines],
                        missing_since=None).update(missing_since=now)

        # Update RBAC Mappings given the list of nodes seen for the first time.
        self.cloud.owner.mapper.update(new_machines)

        # Update machine counts on cloud and org.
        # FIXME: resolve circular import issues
        from mist.api.clouds.models import Cloud
        self.cloud.machine_count = len(machines)
        self.cloud.save()
        self.cloud.owner.total_machine_count = sum(
            cloud.machine_count for cloud in Cloud.objects(
                owner=self.cloud.owner, deleted=None
            ).only('machine_count')
        )
        self.cloud.owner.save()

        # Close libcloud connection
        try:
            self.disconnect()
        except Exception as exc:
            log.warning("Error while closing connection: %r", exc)

        return machines
コード例 #30
0
ファイル: models.py プロジェクト: lovelife100/mist.api
 def enabled(self):
     return bool(
         Machine.objects(id=self.machine_id, missing_since=None).count())