Esempio n. 1
0
    def add_to_aggregate(self,
                         context,
                         aggregate,
                         host,
                         subordinate_info=None):
        """Add a compute host to an aggregate."""
        if not pool_states.is_hv_pool(aggregate['metadata']):
            return

        invalid = {
            pool_states.CHANGING: 'setup in progress',
            pool_states.DISMISSED: 'aggregate deleted',
            pool_states.ERROR: 'aggregate in error'
        }

        if (aggregate['metadata'][pool_states.KEY] in invalid.keys()):
            raise exception.InvalidAggregateAction(
                action='add host',
                aggregate_id=aggregate['id'],
                reason=aggregate['metadata'][pool_states.KEY])

        if (aggregate['metadata'][pool_states.KEY] == pool_states.CREATED):
            aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING})
        if len(aggregate['hosts']) == 1:
            # this is the first host of the pool -> make it main
            self._init_pool(aggregate['id'], aggregate['name'])
            # save metadata so that we can find the main again
            metadata = {
                'main_compute': host,
                host: self._host_uuid,
                pool_states.KEY: pool_states.ACTIVE
            }
            aggregate.update_metadata(metadata)
        else:
            # the pool is already up and running, we need to figure out
            # whether we can serve the request from this host or not.
            main_compute = aggregate['metadata']['main_compute']
            if main_compute == CONF.host and main_compute != host:
                # this is the main ->  do a pool-join
                # To this aim, nova compute on the subordinate has to go down.
                # NOTE: it is assumed that ONLY nova compute is running now
                self._join_subordinate(aggregate['id'], host,
                                       subordinate_info.get('compute_uuid'),
                                       subordinate_info.get('url'),
                                       subordinate_info.get('user'),
                                       subordinate_info.get('passwd'))
                metadata = {
                    host: subordinate_info.get('xenhost_uuid'),
                }
                aggregate.update_metadata(metadata)
            elif main_compute and main_compute != host:
                # send rpc cast to main, asking to add the following
                # host with specified credentials.
                subordinate_info = self._create_subordinate_info()

                self.compute_rpcapi.add_aggregate_host(context, aggregate,
                                                       host, main_compute,
                                                       subordinate_info)
Esempio n. 2
0
    def add_to_aggregate(self, context, aggregate, host, slave_info=None):
        """Add a compute host to an aggregate."""
        if not pool_states.is_hv_pool(aggregate.metadata):
            return

        if CONF.xenserver.independent_compute:
            raise exception.NotSupportedWithOption(
                operation='adding to a XenServer pool',
                option='CONF.xenserver.independent_compute')

        invalid = {
            pool_states.CHANGING: _('setup in progress'),
            pool_states.DISMISSED: _('aggregate deleted'),
            pool_states.ERROR: _('aggregate in error')
        }

        if (aggregate.metadata[pool_states.KEY] in invalid.keys()):
            raise exception.InvalidAggregateActionAdd(
                aggregate_id=aggregate.id,
                reason=invalid[aggregate.metadata[pool_states.KEY]])

        if (aggregate.metadata[pool_states.KEY] == pool_states.CREATED):
            aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING})
        if len(aggregate.hosts) == 1:
            # this is the first host of the pool -> make it master
            self._init_pool(aggregate.id, aggregate.name)
            # save metadata so that we can find the master again
            metadata = {
                'master_compute': host,
                host: self._host_uuid,
                pool_states.KEY: pool_states.ACTIVE
            }
            aggregate.update_metadata(metadata)
        else:
            # the pool is already up and running, we need to figure out
            # whether we can serve the request from this host or not.
            master_compute = aggregate.metadata['master_compute']
            if master_compute == CONF.host and master_compute != host:
                # this is the master ->  do a pool-join
                # To this aim, nova compute on the slave has to go down.
                # NOTE: it is assumed that ONLY nova compute is running now
                self._join_slave(aggregate.id, host,
                                 slave_info.get('compute_uuid'),
                                 slave_info.get('url'), slave_info.get('user'),
                                 slave_info.get('passwd'))
                metadata = {
                    host: slave_info.get('xenhost_uuid'),
                }
                aggregate.update_metadata(metadata)
            elif master_compute and master_compute != host:
                # send rpc cast to master, asking to add the following
                # host with specified credentials.
                slave_info = self._create_slave_info()

                self.compute_rpcapi.add_aggregate_host(context, host,
                                                       aggregate,
                                                       master_compute,
                                                       slave_info)
Esempio n. 3
0
    def remove_from_aggregate(self,
                              context,
                              aggregate,
                              host,
                              subordinate_info=None):
        """Remove a compute host from an aggregate."""
        subordinate_info = subordinate_info or dict()
        if not pool_states.is_hv_pool(aggregate['metadata']):
            return

        invalid = {
            pool_states.CREATED: 'no hosts to remove',
            pool_states.CHANGING: 'setup in progress',
            pool_states.DISMISSED: 'aggregate deleted',
        }
        if aggregate['metadata'][pool_states.KEY] in invalid.keys():
            raise exception.InvalidAggregateAction(
                action='remove host',
                aggregate_id=aggregate['id'],
                reason=invalid[aggregate['metadata'][pool_states.KEY]])

        main_compute = aggregate['metadata']['main_compute']
        if main_compute == CONF.host and main_compute != host:
            # this is the main -> instruct it to eject a host from the pool
            host_uuid = aggregate['metadata'][host]
            self._eject_subordinate(aggregate['id'],
                                    subordinate_info.get('compute_uuid'),
                                    host_uuid)
            aggregate.update_metadata({host: None})
        elif main_compute == host:
            # Remove main from its own pool -> destroy pool only if the
            # main is on its own, otherwise raise fault. Destroying a
            # pool made only by main is fictional
            if len(aggregate['hosts']) > 1:
                # NOTE: this could be avoided by doing a main
                # re-election, but this is simpler for now.
                raise exception.InvalidAggregateAction(
                    aggregate_id=aggregate['id'],
                    action='remove_from_aggregate',
                    reason=_('Unable to eject %s '
                             'from the pool; pool not empty') % host)
            self._clear_pool(aggregate['id'])
            aggregate.update_metadata({'main_compute': None, host: None})
        elif main_compute and main_compute != host:
            # A main exists -> forward pool-eject request to main
            subordinate_info = self._create_subordinate_info()

            self.compute_rpcapi.remove_aggregate_host(context, aggregate['id'],
                                                      host, main_compute,
                                                      subordinate_info)
        else:
            # this shouldn't have happened
            raise exception.AggregateError(
                aggregate_id=aggregate['id'],
                action='remove_from_aggregate',
                reason=_('Unable to eject %s '
                         'from the pool; No main found') % host)
Esempio n. 4
0
    def add_to_aggregate(self, context, aggregate, host, **kwargs):
        """Add a compute host to an aggregate."""
        if not pool_states.is_hv_pool(context, aggregate.id):
            return

        invalid = {
            pool_states.CHANGING: 'setup in progress',
            pool_states.DISMISSED: 'aggregate deleted',
            pool_states.ERROR: 'aggregate in error'
        }

        if (db.aggregate_metadata_get(context, aggregate.id)[pool_states.KEY]
                in invalid.keys()):
            raise exception.InvalidAggregateAction(
                action='add host',
                aggregate_id=aggregate.id,
                reason=invalid[db.aggregate_metadata_get(
                    context, aggregate.id)[pool_states.KEY]])

        if (db.aggregate_metadata_get(
                context,
                aggregate.id)[pool_states.KEY] == pool_states.CREATED):
            db.aggregate_metadata_add(context, aggregate.id,
                                      {pool_states.KEY: pool_states.CHANGING})
        if len(aggregate.hosts) == 1:
            # this is the first host of the pool -> make it master
            self._init_pool(aggregate.id, aggregate.name)
            # save metadata so that we can find the master again
            metadata = {
                'master_compute': host,
                host: self._host_uuid,
                pool_states.KEY: pool_states.ACTIVE
            }
            db.aggregate_metadata_add(context, aggregate.id, metadata)
        else:
            # the pool is already up and running, we need to figure out
            # whether we can serve the request from this host or not.
            master_compute = db.aggregate_metadata_get(
                context, aggregate.id)['master_compute']
            if master_compute == FLAGS.host and master_compute != host:
                # this is the master ->  do a pool-join
                # To this aim, nova compute on the slave has to go down.
                # NOTE: it is assumed that ONLY nova compute is running now
                self._join_slave(aggregate.id, host,
                                 kwargs.get('compute_uuid'), kwargs.get('url'),
                                 kwargs.get('user'), kwargs.get('passwd'))
                metadata = {
                    host: kwargs.get('xenhost_uuid'),
                }
                db.aggregate_metadata_add(context, aggregate.id, metadata)
            elif master_compute and master_compute != host:
                # send rpc cast to master, asking to add the following
                # host with specified credentials.
                forward_request(context, "add_aggregate_host", master_compute,
                                aggregate.id, host, self._host_addr,
                                self._host_uuid)
Esempio n. 5
0
    def remove_from_aggregate(self, context, aggregate, host, **kwargs):
        """Remove a compute host from an aggregate."""
        if not pool_states.is_hv_pool(context, aggregate.id):
            return

        invalid = {
            pool_states.CREATED: 'no hosts to remove',
            pool_states.CHANGING: 'setup in progress',
            pool_states.DISMISSED: 'aggregate deleted',
        }
        if (db.aggregate_metadata_get(context, aggregate.id)[pool_states.KEY]
                in invalid.keys()):
            raise exception.InvalidAggregateAction(
                action='remove host',
                aggregate_id=aggregate.id,
                reason=invalid[db.aggregate_metadata_get(
                    context, aggregate.id)[pool_states.KEY]])

        master_compute = db.aggregate_metadata_get(
            context, aggregate.id)['master_compute']
        if master_compute == FLAGS.host and master_compute != host:
            # this is the master -> instruct it to eject a host from the pool
            host_uuid = db.aggregate_metadata_get(context, aggregate.id)[host]
            self._eject_slave(aggregate.id, kwargs.get('compute_uuid'),
                              host_uuid)
            db.aggregate_metadata_delete(context, aggregate.id, host)
        elif master_compute == host:
            # Remove master from its own pool -> destroy pool only if the
            # master is on its own, otherwise raise fault. Destroying a
            # pool made only by master is fictional
            if len(aggregate.hosts) > 1:
                # NOTE: this could be avoided by doing a master
                # re-election, but this is simpler for now.
                raise exception.InvalidAggregateAction(
                    aggregate_id=aggregate.id,
                    action='remove_from_aggregate',
                    reason=_('Unable to eject %(host)s '
                             'from the pool; pool not empty') % locals())
            self._clear_pool(aggregate.id)
            for key in ['master_compute', host]:
                db.aggregate_metadata_delete(context, aggregate.id, key)
        elif master_compute and master_compute != host:
            # A master exists -> forward pool-eject request to master
            forward_request(context, "remove_aggregate_host", master_compute,
                            aggregate.id, host, self._host_addr,
                            self._host_uuid)
        else:
            # this shouldn't have happened
            raise exception.AggregateError(
                aggregate_id=aggregate.id,
                action='remove_from_aggregate',
                reason=_('Unable to eject %(host)s '
                         'from the pool; No master found') % locals())
Esempio n. 6
0
    def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
        """Remove a compute host from an aggregate."""
        slave_info = slave_info or dict()
        if not pool_states.is_hv_pool(aggregate['metadetails']):
            return

        invalid = {pool_states.CREATED: 'no hosts to remove',
                   pool_states.CHANGING: 'setup in progress',
                   pool_states.DISMISSED: 'aggregate deleted', }
        if aggregate['metadetails'][pool_states.KEY] in invalid.keys():
            raise exception.InvalidAggregateAction(
                    action='remove host',
                    aggregate_id=aggregate['id'],
                    reason=invalid[aggregate['metadetails'][pool_states.KEY]])

        master_compute = aggregate['metadetails']['master_compute']
        if master_compute == CONF.host and master_compute != host:
            # this is the master -> instruct it to eject a host from the pool
            host_uuid = aggregate['metadetails'][host]
            self._eject_slave(aggregate['id'],
                              slave_info.get('compute_uuid'), host_uuid)
            self._virtapi.aggregate_metadata_delete(context, aggregate,
                                                    host)
        elif master_compute == host:
            # Remove master from its own pool -> destroy pool only if the
            # master is on its own, otherwise raise fault. Destroying a
            # pool made only by master is fictional
            if len(aggregate['hosts']) > 1:
                # NOTE: this could be avoided by doing a master
                # re-election, but this is simpler for now.
                raise exception.InvalidAggregateAction(
                                    aggregate_id=aggregate['id'],
                                    action='remove_from_aggregate',
                                    reason=_('Unable to eject %(host)s '
                                             'from the pool; pool not empty')
                                             % locals())
            self._clear_pool(aggregate['id'])
            for key in ['master_compute', host]:
                self._virtapi.aggregate_metadata_delete(context,
                        aggregate, key)
        elif master_compute and master_compute != host:
            # A master exists -> forward pool-eject request to master
            slave_info = self._create_slave_info()

            self.compute_rpcapi.remove_aggregate_host(
                context, aggregate['id'], host, master_compute, slave_info)
        else:
            # this shouldn't have happened
            raise exception.AggregateError(aggregate_id=aggregate['id'],
                                           action='remove_from_aggregate',
                                           reason=_('Unable to eject %(host)s '
                                           'from the pool; No master found')
                                           % locals())
Esempio n. 7
0
    def remove_from_aggregate(self, context, aggregate, host, **kwargs):
        """Remove a compute host from an aggregate."""
        if not pool_states.is_hv_pool(context, aggregate.id):
            return

        invalid = {pool_states.CREATED: 'no hosts to remove',
                   pool_states.CHANGING: 'setup in progress',
                   pool_states.DISMISSED: 'aggregate deleted', }
        if (db.aggregate_metadata_get(context, aggregate.id)[pool_states.KEY]
                in invalid.keys()):
            raise exception.InvalidAggregateAction(
                    action='remove host',
                    aggregate_id=aggregate.id,
                    reason=invalid[db.aggregate_metadata_get(context,
                            aggregate.id)[pool_states.KEY]])

        master_compute = db.aggregate_metadata_get(context,
                aggregate.id)['master_compute']
        if master_compute == FLAGS.host and master_compute != host:
            # this is the master -> instruct it to eject a host from the pool
            host_uuid = db.aggregate_metadata_get(context, aggregate.id)[host]
            self._eject_slave(aggregate.id,
                              kwargs.get('compute_uuid'), host_uuid)
            db.aggregate_metadata_delete(context, aggregate.id, host)
        elif master_compute == host:
            # Remove master from its own pool -> destroy pool only if the
            # master is on its own, otherwise raise fault. Destroying a
            # pool made only by master is fictional
            if len(aggregate.hosts) > 1:
                # NOTE: this could be avoided by doing a master
                # re-election, but this is simpler for now.
                raise exception.InvalidAggregateAction(
                                    aggregate_id=aggregate.id,
                                    action='remove_from_aggregate',
                                    reason=_('Unable to eject %(host)s '
                                             'from the pool; pool not empty')
                                             % locals())
            self._clear_pool(aggregate.id)
            for key in ['master_compute', host]:
                db.aggregate_metadata_delete(context, aggregate.id, key)
        elif master_compute and master_compute != host:
            # A master exists -> forward pool-eject request to master
            forward_request(context, "remove_aggregate_host", master_compute,
                            aggregate.id, host,
                            self._host_addr, self._host_uuid)
        else:
            # this shouldn't have happened
            raise exception.AggregateError(aggregate_id=aggregate.id,
                                           action='remove_from_aggregate',
                                           reason=_('Unable to eject %(host)s '
                                           'from the pool; No master found')
                                           % locals())
Esempio n. 8
0
    def add_to_aggregate(self, context, aggregate, host, **kwargs):
        """Add a compute host to an aggregate."""
        if not pool_states.is_hv_pool(context, aggregate.id):
            return

        invalid = {pool_states.CHANGING: 'setup in progress',
                   pool_states.DISMISSED: 'aggregate deleted',
                   pool_states.ERROR: 'aggregate in error'}

        if (db.aggregate_metadata_get(context, aggregate.id)[pool_states.KEY]
                in invalid.keys()):
            raise exception.InvalidAggregateAction(
                    action='add host',
                    aggregate_id=aggregate.id,
                    reason=invalid[db.aggregate_metadata_get(context,
                            aggregate.id)
                    [pool_states.KEY]])

        if (db.aggregate_metadata_get(context, aggregate.id)[pool_states.KEY]
                == pool_states.CREATED):
            db.aggregate_metadata_add(context, aggregate.id,
                    {pool_states.KEY: pool_states.CHANGING})
        if len(aggregate.hosts) == 1:
            # this is the first host of the pool -> make it master
            self._init_pool(aggregate.id, aggregate.name)
            # save metadata so that we can find the master again
            metadata = {'master_compute': host,
                        host: self._host_uuid,
                        pool_states.KEY: pool_states.ACTIVE}
            db.aggregate_metadata_add(context, aggregate.id, metadata)
        else:
            # the pool is already up and running, we need to figure out
            # whether we can serve the request from this host or not.
            master_compute = db.aggregate_metadata_get(context,
                    aggregate.id)['master_compute']
            if master_compute == FLAGS.host and master_compute != host:
                # this is the master ->  do a pool-join
                # To this aim, nova compute on the slave has to go down.
                # NOTE: it is assumed that ONLY nova compute is running now
                self._join_slave(aggregate.id, host,
                                 kwargs.get('compute_uuid'),
                                 kwargs.get('url'), kwargs.get('user'),
                                 kwargs.get('passwd'))
                metadata = {host: kwargs.get('xenhost_uuid'), }
                db.aggregate_metadata_add(context, aggregate.id, metadata)
            elif master_compute and master_compute != host:
                # send rpc cast to master, asking to add the following
                # host with specified credentials.
                forward_request(context, "add_aggregate_host", master_compute,
                                aggregate.id, host,
                                self._host_addr, self._host_uuid)
Esempio n. 9
0
File: pool.py Progetto: syotani/nova
    def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
        """Remove a compute host from an aggregate."""
        slave_info = slave_info or dict()
        if not pool_states.is_hv_pool(aggregate["metadata"]):
            return

        invalid = {
            pool_states.CREATED: "no hosts to remove",
            pool_states.CHANGING: "setup in progress",
            pool_states.DISMISSED: "aggregate deleted",
        }
        if aggregate["metadata"][pool_states.KEY] in invalid.keys():
            raise exception.InvalidAggregateAction(
                action="remove host",
                aggregate_id=aggregate["id"],
                reason=invalid[aggregate["metadata"][pool_states.KEY]],
            )

        master_compute = aggregate["metadata"]["master_compute"]
        if master_compute == CONF.host and master_compute != host:
            # this is the master -> instruct it to eject a host from the pool
            host_uuid = aggregate["metadata"][host]
            self._eject_slave(aggregate["id"], slave_info.get("compute_uuid"), host_uuid)
            aggregate.update_metadata({host: None})
        elif master_compute == host:
            # Remove master from its own pool -> destroy pool only if the
            # master is on its own, otherwise raise fault. Destroying a
            # pool made only by master is fictional
            if len(aggregate["hosts"]) > 1:
                # NOTE: this could be avoided by doing a master
                # re-election, but this is simpler for now.
                raise exception.InvalidAggregateAction(
                    aggregate_id=aggregate["id"],
                    action="remove_from_aggregate",
                    reason=_("Unable to eject %s " "from the pool; pool not empty") % host,
                )
            self._clear_pool(aggregate["id"])
            aggregate.update_metadata({"master_compute": None, host: None})
        elif master_compute and master_compute != host:
            # A master exists -> forward pool-eject request to master
            slave_info = self._create_slave_info()

            self.compute_rpcapi.remove_aggregate_host(context, aggregate["id"], host, master_compute, slave_info)
        else:
            # this shouldn't have happened
            raise exception.AggregateError(
                aggregate_id=aggregate["id"],
                action="remove_from_aggregate",
                reason=_("Unable to eject %s " "from the pool; No master found") % host,
            )
Esempio n. 10
0
    def add_to_aggregate(self, context, aggregate, host, slave_info=None):
        """Add a compute host to an aggregate."""
        if not pool_states.is_hv_pool(aggregate['metadetails']):
            return

        invalid = {pool_states.CHANGING: 'setup in progress',
                   pool_states.DISMISSED: 'aggregate deleted',
                   pool_states.ERROR: 'aggregate in error'}

        if (aggregate['metadetails'][pool_states.KEY] in invalid.keys()):
            raise exception.InvalidAggregateAction(
                    action='add host',
                    aggregate_id=aggregate['id'],
                    reason=aggregate['metadetails'][pool_states.KEY])

        if (aggregate['metadetails'][pool_states.KEY] == pool_states.CREATED):
            self._virtapi.aggregate_metadata_add(context, aggregate,
                                                 {pool_states.KEY:
                                                      pool_states.CHANGING})
        if len(aggregate['hosts']) == 1:
            # this is the first host of the pool -> make it master
            self._init_pool(aggregate['id'], aggregate['name'])
            # save metadata so that we can find the master again
            metadata = {'master_compute': host,
                        host: self._host_uuid,
                        pool_states.KEY: pool_states.ACTIVE}
            self._virtapi.aggregate_metadata_add(context, aggregate,
                                                 metadata)
        else:
            # the pool is already up and running, we need to figure out
            # whether we can serve the request from this host or not.
            master_compute = aggregate['metadetails']['master_compute']
            if master_compute == CONF.host and master_compute != host:
                # this is the master ->  do a pool-join
                # To this aim, nova compute on the slave has to go down.
                # NOTE: it is assumed that ONLY nova compute is running now
                self._join_slave(aggregate['id'], host,
                                 slave_info.get('compute_uuid'),
                                 slave_info.get('url'), slave_info.get('user'),
                                 slave_info.get('passwd'))
                metadata = {host: slave_info.get('xenhost_uuid'), }
                self._virtapi.aggregate_metadata_add(context, aggregate,
                                                     metadata)
            elif master_compute and master_compute != host:
                # send rpc cast to master, asking to add the following
                # host with specified credentials.
                slave_info = self._create_slave_info()

                self.compute_rpcapi.add_aggregate_host(
                    context, aggregate, host, master_compute, slave_info)
Esempio n. 11
0
    def add_to_aggregate(self, context, aggregate, host, slave_info=None):
        """Add a compute host to an aggregate."""
        if not pool_states.is_hv_pool(aggregate.metadata):
            return

        if CONF.xenserver.independent_compute:
            raise exception.NotSupportedWithOption(
                operation='adding to a XenServer pool',
                option='CONF.xenserver.independent_compute')

        invalid = {pool_states.CHANGING: _('setup in progress'),
                   pool_states.DISMISSED: _('aggregate deleted'),
                   pool_states.ERROR: _('aggregate in error')}

        if (aggregate.metadata[pool_states.KEY] in invalid.keys()):
            raise exception.InvalidAggregateActionAdd(
                    aggregate_id=aggregate.id,
                    reason=invalid[aggregate.metadata[pool_states.KEY]])

        if (aggregate.metadata[pool_states.KEY] == pool_states.CREATED):
            aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING})
        if len(aggregate.hosts) == 1:
            # this is the first host of the pool -> make it master
            self._init_pool(aggregate.id, aggregate.name)
            # save metadata so that we can find the master again
            metadata = {'master_compute': host,
                        host: self._host_uuid,
                        pool_states.KEY: pool_states.ACTIVE}
            aggregate.update_metadata(metadata)
        else:
            # the pool is already up and running, we need to figure out
            # whether we can serve the request from this host or not.
            master_compute = aggregate.metadata['master_compute']
            if master_compute == CONF.host and master_compute != host:
                # this is the master ->  do a pool-join
                # To this aim, nova compute on the slave has to go down.
                # NOTE: it is assumed that ONLY nova compute is running now
                self._join_slave(aggregate.id, host,
                                 slave_info.get('compute_uuid'),
                                 slave_info.get('url'), slave_info.get('user'),
                                 slave_info.get('passwd'))
                metadata = {host: slave_info.get('xenhost_uuid'), }
                aggregate.update_metadata(metadata)
            elif master_compute and master_compute != host:
                # send rpc cast to master, asking to add the following
                # host with specified credentials.
                slave_info = self._create_slave_info()

                self.compute_rpcapi.add_aggregate_host(
                    context, host, aggregate, master_compute, slave_info)
Esempio n. 12
0
File: pool.py Progetto: syotani/nova
    def add_to_aggregate(self, context, aggregate, host, slave_info=None):
        """Add a compute host to an aggregate."""
        if not pool_states.is_hv_pool(aggregate["metadata"]):
            return

        invalid = {
            pool_states.CHANGING: "setup in progress",
            pool_states.DISMISSED: "aggregate deleted",
            pool_states.ERROR: "aggregate in error",
        }

        if aggregate["metadata"][pool_states.KEY] in invalid.keys():
            raise exception.InvalidAggregateAction(
                action="add host", aggregate_id=aggregate["id"], reason=aggregate["metadata"][pool_states.KEY]
            )

        if aggregate["metadata"][pool_states.KEY] == pool_states.CREATED:
            aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING})
        if len(aggregate["hosts"]) == 1:
            # this is the first host of the pool -> make it master
            self._init_pool(aggregate["id"], aggregate["name"])
            # save metadata so that we can find the master again
            metadata = {"master_compute": host, host: self._host_uuid, pool_states.KEY: pool_states.ACTIVE}
            aggregate.update_metadata(metadata)
        else:
            # the pool is already up and running, we need to figure out
            # whether we can serve the request from this host or not.
            master_compute = aggregate["metadata"]["master_compute"]
            if master_compute == CONF.host and master_compute != host:
                # this is the master ->  do a pool-join
                # To this aim, nova compute on the slave has to go down.
                # NOTE: it is assumed that ONLY nova compute is running now
                self._join_slave(
                    aggregate["id"],
                    host,
                    slave_info.get("compute_uuid"),
                    slave_info.get("url"),
                    slave_info.get("user"),
                    slave_info.get("passwd"),
                )
                metadata = {host: slave_info.get("xenhost_uuid")}
                aggregate.update_metadata(metadata)
            elif master_compute and master_compute != host:
                # send rpc cast to master, asking to add the following
                # host with specified credentials.
                slave_info = self._create_slave_info()

                self.compute_rpcapi.add_aggregate_host(context, aggregate, host, master_compute, slave_info)
Esempio n. 13
0
File: pool.py Progetto: hadib/nova
 def _is_hv_pool(self, context, aggregate_id):
     return pool_states.is_hv_pool(context, aggregate_id)
Esempio n. 14
0
 def _is_hv_pool(self, context, aggregate_id):
     return pool_states.is_hv_pool(
         self._virtapi.aggregate_metadata_get(context, aggregate_id))
Esempio n. 15
0
 def _is_hv_pool(self, context, aggregate_id):
     return pool_states.is_hv_pool(
         self._virtapi.aggregate_metadata_get(context, aggregate_id))
Esempio n. 16
0
 def _is_hv_pool(self, context, aggregate_id):
     return pool_states.is_hv_pool(context, aggregate_id)