Esempio n. 1
0
    def list_networks(self, persist=True):
        """Return list of networks for cloud

        A list of networks is fetched from libcloud, data is processed, stored
        on network models, and a list of network models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_networks` which contains the core
        implementation.

        """
        task_key = 'cloud:list_networks:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        first_run = False if task.last_success else True

        async def _list_subnets_async(networks):
            loop = asyncio.get_event_loop()
            subnets = [
                loop.run_in_executor(None, network.ctl.list_subnets)
                for network in networks
            ]
            return await asyncio.gather(*subnets)

        with task.task_runner(persist=persist):
            # Get cached networks as dict
            cached_networks = {
                '%s-%s' % (n.id, n.network_id): n.as_dict()
                for n in self.list_cached_networks()
            }
            networks = self._list_networks()
            loop = asyncio.get_event_loop()
            loop.run_until_complete(_list_subnets_async(networks))

        # Publish patches to rabbitmq.
        new_networks = {
            '%s-%s' % (n.id, n.network_id): n.as_dict()
            for n in networks
        }
        # Exclude last seen and probe field
        if cached_networks or new_networks:
            # Publish patches to rabbitmq.
            patch = jsonpatch.JsonPatch.from_diff(cached_networks,
                                                  new_networks).patch
            if patch:
                if not first_run and self.cloud.observation_logs_enabled:
                    from mist.api.logs.methods import log_observations
                    log_observations(self.cloud.owner.id, self.cloud.id,
                                     'network', patch, cached_networks,
                                     new_networks)
                if amqp_owner_listening(self.cloud.owner.id):
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_networks',
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })
        return networks
Esempio n. 2
0
    def list_networks(self, persist=True):
        """Return list of networks for cloud

        A list of networks is fetched from libcloud, data is processed, stored
        on network models, and a list of network models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_networks` which contains the core
        implementation.

        """
        task_key = 'cloud:list_networks:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        with task.task_runner(persist=persist):
            cached_networks = {
                '%s' % n.id: n.as_dict()
                for n in self.list_cached_networks()
            }

            networks = self._list_networks()

        # Initialize AMQP connection to reuse for multiple messages.
        amqp_conn = Connection(config.AMQP_URI)
        if amqp_owner_listening(self.cloud.owner.id):
            networks_dict = [n.as_dict() for n in networks]
            if cached_networks and networks_dict:
                # Publish patches to rabbitmq.
                new_networks = {'%s' % n['id']: n for n in networks_dict}
                patch = jsonpatch.JsonPatch.from_diff(cached_networks,
                                                      new_networks).patch
                if patch:
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_networks',
                                      connection=amqp_conn,
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })
            else:
                # TODO: remove this block, once patches
                # are implemented in the UI
                amqp_publish_user(self.cloud.owner.id,
                                  routing_key='list_networks',
                                  connection=amqp_conn,
                                  data={
                                      'cloud_id': self.cloud.id,
                                      'networks': networks_dict
                                  })
        return networks
Esempio n. 3
0
    def list_zones(self, persist=True):
        """Return list of zones for cloud

        A list of zones is fetched from libcloud, data is processed, stored
        on zone models, and a list of zone models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_zones` which contains the core
        implementation.

        """
        task_key = 'cloud:list_zones:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        first_run = False if task.last_success else True
        with task.task_runner(persist=persist):
            cached_zones = {
                '%s-%s' % (z.id, z.zone_id): z.as_dict()
                for z in self.list_cached_zones()
            }

            zones = self._list_zones()
            for zone in zones:
                self.list_records(zone)

        # Initialize AMQP connection to reuse for multiple messages.
        if amqp_owner_listening(self.cloud.owner.id):
            zones_dict = [z.as_dict() for z in zones]
            if cached_zones or zones_dict:
                # Publish patches to rabbitmq.
                new_zones = {
                    '%s-%s' % (z['id'], z['zone_id']): z
                    for z in zones_dict
                }
                patch = jsonpatch.JsonPatch.from_diff(cached_zones,
                                                      new_zones).patch
                if patch:
                    if not first_run and self.cloud.observation_logs_enabled:
                        from mist.api.logs.methods import log_observations
                        log_observations(self.cloud.owner.id, self.cloud.id,
                                         'zone', patch, cached_zones,
                                         new_zones)
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_zones',
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })
        return zones
Esempio n. 4
0
    def list_volumes(self, persist=True):
        """Return list of volumes for cloud

        A list of volumes is fetched from libcloud, data is processed, stored
        on volume models, and a list of volume models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_volumes` which contains the core
        implementation.

        """
        task_key = 'cloud:list_volumes:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        first_run = False if task.last_success else True
        with task.task_runner(persist=persist):
            cached_volumes = {
                '%s-%s' % (v.id, v.external_id): v.as_dict()
                for v in self.list_cached_volumes()
            }

            volumes = self._list_volumes()

        volumes_dict = [v.as_dict() for v in volumes]
        if cached_volumes or volumes:
            # Publish patches to rabbitmq.
            new_volumes = {
                '%s-%s' % (v['id'], v['external_id']): v
                for v in volumes_dict
            }
            patch = jsonpatch.JsonPatch.from_diff(cached_volumes,
                                                  new_volumes).patch
            if patch:
                if not first_run and self.cloud.observation_logs_enabled:
                    from mist.api.logs.methods import log_observations
                    log_observations(self.cloud.owner.id, self.cloud.id,
                                     'volume', patch, cached_volumes,
                                     new_volumes)
                if amqp_owner_listening(self.cloud.owner.id):
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_volumes',
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })
        return volumes
Esempio n. 5
0
    def add_machine(self, name, host='',
                    ssh_user='******', ssh_port=22, ssh_key=None,
                    os_type='unix', rdp_port=3389, fail_on_error=True):
        """Add machine to this dummy Cloud

        This is a special method that exists only on this Cloud subclass.
        """

        old_machines = [m.as_dict() for m in
                        self.cloud.ctl.compute.list_cached_machines()]

        # FIXME: Move ssh command to Machine controller once it is migrated.
        from mist.api.methods import ssh_command

        try:
            ssh_port = int(ssh_port)
        except (ValueError, TypeError):
            ssh_port = 22
        try:
            rdp_port = int(rdp_port)
        except (ValueError, TypeError):
            rdp_port = 3389
        if ssh_key:
            ssh_key = Key.objects.get(owner=self.cloud.owner, id=ssh_key,
                                      deleted=None)

        from mist.api.machines.models import Machine
        # Create and save machine entry to database.
        machine = Machine(
            cloud=self.cloud,
            name=name,
            machine_id=uuid.uuid4().hex,
            os_type=os_type,
            ssh_port=ssh_port,
            rdp_port=rdp_port,
            last_seen=datetime.datetime.utcnow()
        )
        if host:
            # Sanitize inputs.
            host = sanitize_host(host)
            check_host(host)
            machine.hostname = host

            if is_private_subnet(socket.gethostbyname(host)):
                machine.private_ips = [host]
            else:
                machine.public_ips = [host]
        machine.save(write_concern={'w': 1, 'fsync': True})

        # Attempt to connect.
        if os_type == 'unix' and ssh_key:
            if not ssh_user:
                ssh_user = '******'
            # Try to connect. If it works, it will create the association.
            try:
                if not host:
                    raise BadRequestError("You have specified an SSH key but "
                                          "machine hostname is empty.")
                to_tunnel(self.cloud.owner, host)  # May raise VPNTunnelError
                ssh_command(
                    self.cloud.owner, self.cloud.id, machine.id, host,
                    'uptime', key_id=ssh_key.id, username=ssh_user,
                    port=ssh_port
                )
            except MachineUnauthorizedError as exc:
                if fail_on_error:
                    machine.delete()
                raise CloudUnauthorizedError(exc)
            except ServiceUnavailableError as exc:
                if fail_on_error:
                    machine.delete()
                raise MistError("Couldn't connect to host '%s'." % host)
            except:
                if fail_on_error:
                    machine.delete()
                raise

        if amqp_owner_listening(self.cloud.owner.id):
            new_machines = self.cloud.ctl.compute.list_cached_machines()
            self.cloud.ctl.compute.produce_and_publish_patch(
                old_machines, new_machines)

        return machine
Esempio n. 6
0
    def send(self, users=None, dismiss=False):
        # FIXME Imported here due to circular dependency issues.
        from mist.api.notifications.models import InAppNotification
        from mist.api.notifications.models import UserNotificationPolicy

        # Get the list of `InAppNotifications`s in the current context before
        # any update takes place.
        owner_old_ntfs = list(InAppNotification.objects(owner=self.ntf.owner))

        if not users:
            users = self.ntf.owner.members
        elif not isinstance(users, list):
            users = [users]

        # Save/update/dismiss notifications.
        if dismiss:
            dismissed_by = set(self.ntf.dismissed_by)
            old_dismissed_by = list(dismissed_by)
            dismissed_by |= set(user.id for user in users)
            self.ntf.dismissed_by = list(dismissed_by)

        # Is anyone listening?
        if not amqp_owner_listening(self.ntf.owner.id):
            return

        # Initialize AMQP connection to reuse for multiple messages.
        amqp_conn = Connection(config.AMQP_URI)

        # Re-fetch all notifications in order to calculate the diff between
        # the two lists.
        owner_new_ntfs = list(InAppNotification.objects(owner=self.ntf.owner))

        # Apply each user's notification policy on the above lists to get rid
        # of notifications users are not interested in.
        for user in users:
            user_old_ntfs, user_new_ntfs = [], []
            try:
                np = UserNotificationPolicy.objects.get(user_id=user.id)
            except UserNotificationPolicy.DoesNotExist:
                log.debug('No UserNotificationPolicy found for %s', user)
                user_old_ntfs = [
                    ntf.as_dict() for ntf in owner_old_ntfs if not (
                        self.ntf.id == ntf.id and user.id in old_dismissed_by)
                ]
                user_new_ntfs = [
                    ntf.as_dict() for ntf in owner_new_ntfs
                    if not (self.ntf.id == ntf.id and user.id in dismissed_by)
                ]
            else:
                user_old_ntfs = [
                    ntf.as_dict() for ntf in owner_old_ntfs
                    if not np.has_blocked(ntf) and not (
                        self.ntf.id == ntf.id and user.id in old_dismissed_by)
                ]
                user_new_ntfs = [
                    ntf.as_dict() for ntf in owner_new_ntfs
                    if not np.has_blocked(ntf)
                    and not (self.ntf.id == ntf.id and user.id in dismissed_by)
                ]
            # Now we can save the dismissed notification
            self.ntf.save()

            # Calculate diff.
            patch = jsonpatch.JsonPatch.from_diff(user_old_ntfs,
                                                  user_new_ntfs).patch

            if patch:
                amqp_publish_user(self.ntf.owner.id,
                                  routing_key='patch_notifications',
                                  connection=amqp_conn,
                                  data={
                                      'user': user.id,
                                      'patch': patch
                                  })

        # Finally, try to close the AMQP connection.
        try:
            amqp_conn.close()
        except Exception as exc:
            log.exception(repr(exc))
Esempio n. 7
0
def tag_resources(request):
    """
    Tags: tags
    ---
    Batch operation for adding/removing tags from a list of resources.
    This api call provides the ability to modify the tags of a large number
    of resources. For each resource a list of dicts is passed with a key, a
    value and optionally an op field. The op field should be either '+' or '-'
    and defines whether or not the tag should be added or removed from the
    resource. If no op value is defined in the dict then '+' is assumed.
    ---
    tags:
      required: true
      type: array
    resource:
      required: true
      type: object
    """

    auth_context = auth_context_from_request(request)
    params = params_from_request(request)

    # FIXME: This implementation is far from OK. We need to re-code the way
    # tags are handled and make sure that RBAC is properly enforced on tags
    for resource in params:
        # list of dicts of key-value pairs
        resource_tags = resource.get('tags', '')
        # dict of resource info
        resource_data = resource.get('resource', '')

        if not resource_data:
            raise RequiredParameterMissingError("resources")
        if not resource_tags:
            raise RequiredParameterMissingError("tags")
        if not resource_data.get('type') or not resource_data.get('item_id'):
            raise BadRequestError('No type or rid provided for some of the '
                                  'resources')

        # ui send this var only for machine. image, network, location
        cloud_id = resource_data.get('cloud_id')

        if cloud_id:
            auth_context.check_perm('cloud', 'read', cloud_id)
        elif resource_data['type'] in [
                'machine', 'image', 'network', 'volume'
        ]:
            raise RequiredParameterMissingError("cloud_id")
        else:
            del resource_data['cloud_id']

        query = {}
        rtype = resource_data['type']
        rid = resource_data['item_id']
        if rtype == 'machine':
            query['machine_id'] = rid
        else:
            query['id'] = rid

        if cloud_id:
            query['cloud'] = cloud_id

        try:
            resource_obj = get_resource_model(rtype).objects.get(**query)
        except me.DoesNotExist:
            # if the resource can not be found just go on and process the next
            continue

        # SEC require EDIT_TAGS permission on resource
        auth_context.check_perm(rtype, 'edit_tags', resource_obj.id)

        # normalized_resources.append(resource_data)
        query['rtype'] = rtype

        # split the tags into two lists: those that will be added and those
        # that will be removed
        tags_to_add = [
            (tag['key'], tag['value']) for tag in
            [tag for tag in resource_tags if tag.get('op', '+') == '+']
        ]
        # also extract the keys from all the tags to be deleted
        tags_to_remove = [
            tag['key'] for tag in
            [tag for tag in resource_tags if tag.get('op', '+') == '-']
        ]

        # SEC only Org Owners may edit the secure tags
        tags = {tag[0]: tag[1] for tag in tags_to_add}
        if not modify_security_tags(auth_context, tags, resource_obj):
            auth_context._raise(rtype, 'edit_security_tags')

        old_tags = get_tags_for_resource(auth_context.owner, resource_obj)
        if tags_to_add:
            add_tags_to_resource(auth_context.owner, resource_obj, tags_to_add)
        if tags_to_remove:
            remove_tags_from_resource(auth_context.owner, resource_obj,
                                      tags_to_remove)

        if rtype in ['machine', 'network', 'volume', 'zone', 'record']:
            new_tags = get_tags_for_resource(auth_context.owner, resource_obj)
            try:
                external_id = getattr(resource_obj, rtype + '_id')
            except AttributeError:
                external_id = getattr(resource_obj, 'external_id')
            patch = jsonpatch.JsonPatch.from_diff(old_tags, new_tags).patch
            for item in patch:
                item['path'] = '/%s-%s/tags%s' % (resource_obj.id, external_id,
                                                  item['path'])
            if amqp_owner_listening(resource_obj.cloud.owner.id):
                amqp_publish_user(auth_context.owner.id,
                                  routing_key='patch_%ss' % rtype,
                                  data={
                                      'cloud_id': resource_obj.cloud.id,
                                      'patch': patch
                                  })
    return OK
Esempio n. 8
0
    def list_machines(self, persist=True):
        """Return list of machines for cloud

        A list of nodes is fetched from libcloud, the data is processed, stored
        on machine models, and a list of machine models is returned.

        Subclasses SHOULD NOT override or extend this method.

        This method wraps `_list_machines` which contains the core
        implementation.

        """

        task_key = 'cloud:list_machines:%s' % self.cloud.id
        task = PeriodicTaskInfo.get_or_add(task_key)
        try:
            with task.task_runner(persist=persist):
                old_machines = {
                    '%s-%s' % (m.id, m.machine_id): m.as_dict()
                    for m in self.list_cached_machines()
                }
                machines = self._list_machines()
        except PeriodicTaskThresholdExceeded:
            self.cloud.disable()
            raise

        # Initialize AMQP connection to reuse for multiple messages.
        amqp_conn = Connection(config.AMQP_URI)

        if amqp_owner_listening(self.cloud.owner.id):
            if not config.MACHINE_PATCHES:
                amqp_publish_user(
                    self.cloud.owner.id,
                    routing_key='list_machines',
                    connection=amqp_conn,
                    data={
                        'cloud_id': self.cloud.id,
                        'machines':
                        [machine.as_dict() for machine in machines]
                    })
            else:
                # Publish patches to rabbitmq.
                new_machines = {
                    '%s-%s' % (m.id, m.machine_id): m.as_dict()
                    for m in machines
                }
                # Exclude last seen and probe fields from patch.
                for md in old_machines, new_machines:
                    for m in md.values():
                        m.pop('last_seen')
                        m.pop('probe')
                patch = jsonpatch.JsonPatch.from_diff(old_machines,
                                                      new_machines).patch
                if patch:
                    amqp_publish_user(self.cloud.owner.id,
                                      routing_key='patch_machines',
                                      connection=amqp_conn,
                                      data={
                                          'cloud_id': self.cloud.id,
                                          'patch': patch
                                      })

        # Push historic information for inventory and cost reporting.
        for machine in machines:
            data = {
                'owner_id': self.cloud.owner.id,
                'machine_id': machine.id,
                'cost_per_month': machine.cost.monthly
            }
            amqp_publish(exchange='machines_inventory',
                         routing_key='',
                         auto_delete=False,
                         data=data,
                         connection=amqp_conn)

        return machines
Esempio n. 9
0
def list_machines(schedule_id):
    """Perform list machines. Cloud controller stores results in mongodb."""

    # Fetch schedule and cloud from database.
    # FIXME: resolve circular deps error
    from mist.api.poller.models import ListMachinesPollingSchedule
    sched = ListMachinesPollingSchedule.objects.get(id=schedule_id)
    cloud = sched.cloud
    now = datetime.datetime.now()

    # Check if this cloud should be autodisabled.
    if sched.last_success:
        two_days = datetime.timedelta(days=2)
        if now - sched.last_success > two_days and sched.failure_count > 50:
            autodisable_cloud(sched.cloud)
            return
    elif sched.failure_count > 100:
        autodisable_cloud(sched.cloud)
        return

    # Find last run. If too recent, abort.
    if sched.last_success and sched.last_failure:
        last_run = max(sched.last_success, sched.last_failure)
    else:
        last_run = sched.last_success or sched.last_failure
    if last_run:
        if now - last_run < sched.interval.timedelta:
            log.warning("Running too soon for cloud %s, aborting!", cloud)
            return

    # Is another same task running?
    if sched.last_attempt_started:
        # Other same task started recently, abort.
        if now - sched.last_attempt_started < datetime.timedelta(seconds=60):
            log.warning("Other same tasks started recently, aborting.")
            return
        # Has been running for too long or has died. Ignore.
        log.warning("Other same task seems to have started, but it's been "
                    "quite a while, will ignore and run normally.")
    sched.last_attempt_started = now
    cloud.save()

    try:
        # Run list_machines.
        machines = cloud.ctl.compute.list_machines()
    except Exception as exc:
        # Store failure.
        log.warning("Failed to list_machines for cloud %s: %r", cloud, exc)
        sched.last_failure = datetime.datetime.now()
        sched.failure_count += 1
        sched.last_attempt_started = None
        cloud.save()
        raise
    else:
        # Store success.
        log.info("Succeeded to list_machines for cloud %s", cloud)
        sched.last_success = datetime.datetime.now()
        sched.failure_count = 0
        sched.last_attempt_started = None
        cloud.save()

    # Publish results to rabbitmq (for backwards compatibility).
    if amqp_owner_listening(cloud.owner.id):
        amqp_publish_user(cloud.owner.id, routing_key='list_machines',
                          data={'cloud_id': cloud.id,
                                'machines': [machine.as_dict()
                                             for machine in machines]})

    # Push historic information for inventory and cost reporting.
    for machine in machines:
        data = {'owner_id': machine.cloud.owner.id,
                'machine_id': machine.id,
                'cost_per_month': machine.cost.monthly}
        log.info("Will push to elastic: %s", data)
        amqp_publish(exchange='machines_inventory', routing_key='',
                     auto_delete=False, data=data)
Esempio n. 10
0
    def add_machine(self, host, ssh_user='******', ssh_port=22, ssh_key=None,
                    **kwargs):
        try:
            ssh_port = int(ssh_port)
        except (ValueError, TypeError):
            ssh_port = 22

        if not ssh_key:
            raise RequiredParameterMissingError('machine_key')

        try:
            ssh_key = Key.objects.get(owner=self.cloud.owner, id=ssh_key,
                                      deleted=None)
        except Key.DoesNotExist:
            raise NotFoundError("Key does not exist.")

        images_location = kwargs.get('images_location',
                                     '/var/lib/libvirt/images')
        extra = {
            'images_location': images_location,
            'tags': {'type': 'hypervisor'},
            'username': ssh_user
        }

        from mist.api.machines.models import Machine
        # Create and save machine entry to database.
        # first check if the host has already been added to the cloud
        try:
            machine = Machine.objects.get(cloud=self.cloud,
                                          machine_id=host.replace('.', '-'))
            machine.name = kwargs.get('name') or host
            machine.ssh_port = ssh_port
            machine.extra = extra
            machine.last_seen = datetime.datetime.utcnow()
            machine.missing_since = None
        except me.DoesNotExist:
            machine = Machine(
                cloud=self.cloud,
                name=kwargs.get('name') or host,
                hostname=host,
                machine_id=host.replace('.', '-'),
                ssh_port=ssh_port,
                extra=extra,
                state=NodeState.RUNNING,
                last_seen=datetime.datetime.utcnow(),
            )

        # Sanitize inputs.
        host = sanitize_host(host)
        check_host(host)
        machine.hostname = host

        if is_private_subnet(socket.gethostbyname(host)):
            machine.private_ips = [host]
        else:
            machine.public_ips = [host]

        machine.save(write_concern={'w': 1, 'fsync': True})

        # associate key and attempt to connect
        try:
            machine.ctl.associate_key(ssh_key,
                                      username=ssh_user,
                                      port=ssh_port)
        except MachineUnauthorizedError as exc:
            log.error("Could not connect to host %s."
                      % host)
            machine.delete()
            raise CloudUnauthorizedError(exc)
        except ServiceUnavailableError as exc:
            log.error("Could not connect to host %s."
                      % host)
            machine.delete()
            raise MistError("Couldn't connect to host '%s'."
                            % host)

        if amqp_owner_listening(self.cloud.owner.id):
            old_machines = []
            for cached_machine in \
                    self.cloud.ctl.compute.list_cached_machines():
                # make sure that host just added becomes visible
                if cached_machine.id != machine.id:
                    old_machines.append(cached_machine)
            old_machines = [m.as_dict() for m in
                            old_machines]
            new_machines = self.cloud.ctl.compute.list_machines()
            self.cloud.ctl.compute.produce_and_publish_patch(
                old_machines, new_machines)

        return machine
Esempio n. 11
0
    def add(self, fail_on_error=True, fail_on_invalid_params=False, **kwargs):
        from mist.api.machines.models import Machine
        if not kwargs.get('hosts'):
            raise RequiredParameterMissingError('hosts')
        try:
            self.cloud.save()
        except me.ValidationError as exc:
            raise BadRequestError({'msg': str(exc),
                                   'errors': exc.to_dict()})
        except me.NotUniqueError:
            raise CloudExistsError("Cloud with name %s already exists"
                                   % self.cloud.title)
        total_errors = {}

        for _host in kwargs['hosts']:
            self._add__preparse_kwargs(_host)
            errors = {}
            for key in list(_host.keys()):
                if key not in ('host', 'alias', 'username', 'port', 'key',
                               'images_location'):
                    error = "Invalid parameter %s=%r." % (key, _host[key])
                    if fail_on_invalid_params:
                        self.cloud.delete()
                        raise BadRequestError(error)
                    else:
                        log.warning(error)
                        _host.pop(key)

            for key in ('host', 'key'):
                if key not in _host or not _host.get(key):
                    error = "Required parameter missing: %s" % key
                    errors[key] = error
                    if fail_on_error:
                        self.cloud.delete()
                        raise RequiredParameterMissingError(key)
                    else:
                        log.warning(error)
                        total_errors.update({key: error})

            if not errors:
                try:
                    ssh_port = int(_host.get('ssh_port', 22))
                except (ValueError, TypeError):
                    ssh_port = 22

                images_location = _host.get('images_location',
                                            '/var/lib/libvirt/images')
                extra = {
                    'images_location': images_location,
                    'tags': {'type': 'hypervisor'},
                    'username': _host.get('username')
                }
                # Create and save machine entry to database.
                machine = Machine(
                    cloud=self.cloud,
                    machine_id=_host.get('host').replace('.', '-'),
                    name=_host.get('alias') or _host.get('host'),
                    ssh_port=ssh_port,
                    last_seen=datetime.datetime.utcnow(),
                    hostname=_host.get('host'),
                    state=NodeState.RUNNING,
                    extra=extra
                )
                # Sanitize inputs.
                host = sanitize_host(_host.get('host'))
                check_host(_host.get('host'))
                machine.hostname = host

                if is_private_subnet(socket.gethostbyname(_host.get('host'))):
                    machine.private_ips = [_host.get('host')]
                else:
                    machine.public_ips = [_host.get('host')]

                try:
                    machine.save(write_concern={'w': 1, 'fsync': True})
                except me.NotUniqueError:
                    error = 'Duplicate machine entry. Maybe the same \
                            host has been added twice?'
                    if fail_on_error:
                        self.cloud.delete()
                        raise MistError(error)
                    else:
                        total_errors.update({_host.get('host'): error})
                        continue

                # associate key and attempt to connect
                try:
                    machine.ctl.associate_key(_host.get('key'),
                                              username=_host.get('username'),
                                              port=ssh_port)
                except MachineUnauthorizedError as exc:
                    log.error("Could not connect to host %s."
                              % _host.get('host'))
                    machine.delete()
                    if fail_on_error:
                        self.cloud.delete()
                        raise CloudUnauthorizedError(exc)
                except ServiceUnavailableError as exc:
                    log.error("Could not connect to host %s."
                              % _host.get('host'))
                    machine.delete()
                    if fail_on_error:
                        self.cloud.delete()
                        raise MistError("Couldn't connect to host '%s'."
                                        % _host.get('host'))

        # check if host was added successfully
        # if not, delete the cloud and raise
        if Machine.objects(cloud=self.cloud):
            if amqp_owner_listening(self.cloud.owner.id):
                old_machines = [m.as_dict() for m in
                                self.cloud.ctl.compute.list_cached_machines()]
                new_machines = self.cloud.ctl.compute.list_machines()
                self.cloud.ctl.compute.produce_and_publish_patch(
                    old_machines, new_machines)

            self.cloud.errors = total_errors

        else:
            self.cloud.delete()
            raise BadRequestError(total_errors)