Exemplo n.º 1
0
    def test_memcached_driver(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        hostkey = str("%s:%s" % (self._topic, self._host))
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=self.down_time)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        self.useFixture(test.TimeOverride())
        timeutils.advance_time_seconds(self.down_time + 1)
        self.servicegroup_api._driver._report_state(serv)
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        timeutils.advance_time_seconds(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
Exemplo n.º 2
0
    def test_get_all(self):
        host1 = self._host + '_1'
        host2 = self._host + '_2'

        serv1 = self.useFixture(
            ServiceFixture(host1, self._binary, self._topic)).serv
        serv1.start()

        serv2 = self.useFixture(
            ServiceFixture(host2, self._binary, self._topic)).serv
        serv2.start()

        service_ref1 = db.service_get_by_args(self._ctx,
                                              host1,
                                              self._binary)
        service_ref2 = db.service_get_by_args(self._ctx,
                                              host2,
                                              self._binary)

        services = self.servicegroup_api.get_all(self._topic)

        self.assertTrue(service_ref1['host'] in services)
        self.assertTrue(service_ref2['host'] in services)

        service_id = self.servicegroup_api.get_one(self._topic)
        self.assertTrue(service_id in services)
Exemplo n.º 3
0
    def test_get_all(self):
        host1 = self._host + '_1'
        host2 = self._host + '_2'

        serv1 = service.Service(host1,
                                self._binary,
                                self._topic,
                                'nova.tests.test_service.FakeManager',
                                1, 1)
        serv1.start()

        serv2 = service.Service(host2,
                                self._binary,
                                self._topic,
                                'nova.tests.test_service.FakeManager',
                                1, 1)
        serv2.start()

        service_ref1 = db.service_get_by_args(self._ctx,
                                              host1,
                                              self._binary)
        service_ref2 = db.service_get_by_args(self._ctx,
                                              host2,
                                              self._binary)

        services = self.servicegroup_api.get_all(self._topic)

        self.assertTrue(service_ref1['host'] in services)
        self.assertTrue(service_ref2['host'] in services)

        service_id = self.servicegroup_api.get_one(self._topic)
        self.assertTrue(service_id in services)
Exemplo n.º 4
0
    def test_get_all(self):
        host1 = self._host + '_1'
        host2 = self._host + '_2'

        serv1 = self.useFixture(
            ServiceFixture(host1, self._binary, self._topic)).serv
        serv1.start()

        serv2 = self.useFixture(
            ServiceFixture(host2, self._binary, self._topic)).serv
        serv2.start()

        service_ref1 = db.service_get_by_args(self._ctx,
                                              host1,
                                              self._binary)
        service_ref2 = db.service_get_by_args(self._ctx,
                                              host2,
                                              self._binary)

        services = self.servicegroup_api.get_all(self._topic)

        self.assertIn(service_ref1['host'], services)
        self.assertIn(service_ref2['host'], services)

        service_id = self.servicegroup_api.get_one(self._topic)
        self.assertIn(service_id, services)
Exemplo n.º 5
0
    def _test_service_check_create_race(self, ex):
        self.manager_mock = self.mox.CreateMock(FakeManager)
        self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager',
                                 use_mock_anything=True)
        self.mox.StubOutWithMock(self.manager_mock, 'init_host')
        self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
        self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')

        FakeManager(host=self.host).AndReturn(self.manager_mock)

        # init_host is called before any service record is created
        self.manager_mock.init_host()

        db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
                               ).AndRaise(exception.NotFound)
        db.service_create(mox.IgnoreArg(), mox.IgnoreArg()
                          ).AndRaise(ex)

        class TestException(Exception):
            pass

        db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
                               ).AndRaise(TestException)

        self.mox.ReplayAll()

        serv = service.Service(self.host,
                               self.binary,
                               self.topic,
                               'nova.tests.unit.test_service.FakeManager')
        self.assertRaises(TestException, serv.start)
Exemplo n.º 6
0
    def _test_service_check_create_race(self, ex):
        self.manager_mock = self.mox.CreateMock(FakeManager)
        self.mox.StubOutWithMock(sys.modules[__name__],
                                 'FakeManager',
                                 use_mock_anything=True)
        self.mox.StubOutWithMock(self.manager_mock, 'init_host')
        self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
        self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')

        FakeManager(host=self.host).AndReturn(self.manager_mock)

        # init_host is called before any service record is created
        self.manager_mock.init_host()

        db.service_get_by_args(mox.IgnoreArg(), self.host,
                               self.binary).AndRaise(exception.NotFound)
        db.service_create(mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(ex)

        class TestException(Exception):
            pass

        db.service_get_by_args(mox.IgnoreArg(), self.host,
                               self.binary).AndRaise(TestException)

        self.mox.ReplayAll()

        serv = service.Service(self.host, self.binary, self.topic,
                               'nova.tests.test_service.FakeManager')
        self.assertRaises(TestException, serv.start)
Exemplo n.º 7
0
    def _service_start_mocks(self):
        service_create = {"host": self.host, "binary": self.binary, "topic": self.topic, "report_count": 0}
        service_ref = {"host": self.host, "binary": self.binary, "topic": self.topic, "report_count": 0, "id": 1}

        db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary).AndRaise(exception.NotFound())
        db.service_create(mox.IgnoreArg(), service_create).AndReturn(service_ref)
        return service_ref
    def test_memcached_driver(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        hostkey = str("%s:%s" % (self._topic, self._host))
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=self.down_time)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
Exemplo n.º 9
0
 def test_doesnt_report_disabled_hosts_as_up(self):
     """Ensures driver doesn't find hosts before they are enabled"""
     compute1 = self.start_service('compute', host='host1')
     compute2 = self.start_service('compute', host='host2')
     s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
     s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
     db.service_update(self.context, s1['id'], {'disabled': True})
     db.service_update(self.context, s2['id'], {'disabled': True})
     hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
     self.assertEqual(0, len(hosts))
     compute1.kill()
     compute2.kill()
Exemplo n.º 10
0
 def test_doesnt_report_disabled_hosts_as_up(self):
     """Ensures driver doesn't find hosts before they are enabled"""
     compute1 = self.start_service('compute', host='host1')
     compute2 = self.start_service('compute', host='host2')
     s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
     s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
     db.service_update(self.context, s1['id'], {'disabled': True})
     db.service_update(self.context, s2['id'], {'disabled': True})
     hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
     self.assertEqual(0, len(hosts))
     compute1.kill()
     compute2.kill()
Exemplo n.º 11
0
 def test_doesnt_report_disabled_hosts_as_up(self):
     """Ensures driver doesn't find hosts before they are enabled"""
     compute1 = self.start_service("compute", host="host1")
     compute2 = self.start_service("compute", host="host2")
     s1 = db.service_get_by_args(self.context, "host1", "nova-compute")
     s2 = db.service_get_by_args(self.context, "host2", "nova-compute")
     db.service_update(self.context, s1["id"], {"disabled": True})
     db.service_update(self.context, s2["id"], {"disabled": True})
     hosts = self.scheduler.driver.hosts_up(self.context, "compute")
     self.assertEqual(0, len(hosts))
     compute1.kill()
     compute2.kill()
Exemplo n.º 12
0
    def test_DB_driver(self):
        serv = self.useFixture(ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx, self._host, self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host, self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host, self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
Exemplo n.º 13
0
    def test_get_all(self):
        host1 = self._host + '_1'
        host2 = self._host + '_2'
        host3 = self._host + '_3'

        serv1 = self.useFixture(
            ServiceFixture(host1, self._binary, self._topic)).serv
        serv1.start()

        serv2 = self.useFixture(
            ServiceFixture(host2, self._binary, self._topic)).serv
        serv2.start()

        serv3 = self.useFixture(
            ServiceFixture(host3, self._binary, self._topic)).serv
        serv3.start()

        service_ref1 = db.service_get_by_args(self._ctx,
                                              host1,
                                              self._binary)
        service_ref2 = db.service_get_by_args(self._ctx,
                                              host2,
                                              self._binary)
        service_ref3 = db.service_get_by_args(self._ctx,
                                              host3,
                                              self._binary)

        host1key = str("%s:%s" % (self._topic, host1))
        host2key = str("%s:%s" % (self._topic, host2))
        host3key = str("%s:%s" % (self._topic, host3))
        self.servicegroup_api._driver.mc.set(host1key,
                                             timeutils.utcnow(),
                                             time=self.down_time)
        self.servicegroup_api._driver.mc.set(host2key,
                                             timeutils.utcnow(),
                                             time=self.down_time)
        self.servicegroup_api._driver.mc.set(host3key,
                                             timeutils.utcnow(),
                                             time=-1)

        services = self.servicegroup_api.get_all(self._topic)

        self.assertTrue(host1 in services)
        self.assertTrue(host2 in services)
        self.assertFalse(host3 in services)

        service_id = self.servicegroup_api.get_one(self._topic)
        self.assertTrue(service_id in services)
Exemplo n.º 14
0
    def _service_start_mocks(self):
        service_create = {'host': self.host,
                          'binary': self.binary,
                          'topic': self.topic,
                          'report_count': 0}
        service_ref = {'host': self.host,
                          'binary': self.binary,
                          'topic': self.topic,
                          'report_count': 0,
                          'id': 1}

        db.service_get_by_args(mox.IgnoreArg(),
                self.host, self.binary).AndRaise(exception.NotFound())
        db.service_create(mox.IgnoreArg(),
                service_create).AndReturn(service_ref)
        return service_ref
Exemplo n.º 15
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        elevated = context.elevated()

        volume_ref = db.volume_get(context, volume_id)
        availability_zone = volume_ref.get('availability_zone')

        zone, host = None, None
        if availability_zone:
            zone, _x, host = availability_zone.partition(':')
        if host and context.is_admin:
            service = db.service_get_by_args(elevated, host, 'nova-volume')
            if not self.service_is_up(service):
                raise exception.WillNotSchedule(host=host)
            driver.cast_to_volume_host(context, host, 'create_volume',
                    volume_id=volume_id, **_kwargs)
            return None

        results = db.service_get_all_volume_sorted(elevated)
        if zone:
            results = [(service, gigs) for (service, gigs) in results
                       if service['availability_zone'] == zone]
        for result in results:
            (service, volume_gigabytes) = result
            if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
                msg = _("All hosts have too many gigabytes")
                raise exception.NoValidHost(reason=msg)
            if self.service_is_up(service):
                driver.cast_to_volume_host(context, service['host'],
                        'create_volume', volume_id=volume_id, **_kwargs)
                return None
        msg = _("Is the appropriate service running?")
        raise exception.NoValidHost(reason=msg)
Exemplo n.º 16
0
    def update(self, req, id, body):
        """Enable/Disable scheduling for a service."""
        context = req.environ["nova.context"]
        authorize(context)

        if id == "enable":
            disabled = False
        elif id == "disable":
            disabled = True
        else:
            raise webob.exc.HTTPNotFound("Unknown action")

        try:
            host = body["host"]
            service = body["service"]
        except (TypeError, KeyError):
            raise webob.exc.HTTPUnprocessableEntity()

        try:
            svc = db.service_get_by_args(context, host, service)
            if not svc:
                raise webob.exc.HTTPNotFound("Unknown service")

            db.service_update(context, svc["id"], {"disabled": disabled})
        except exception.ServiceNotFound:
            raise webob.exc.HTTPNotFound("service not found")

        return {"host": host, "service": service, "disabled": disabled}
Exemplo n.º 17
0
    def update(self, req, id, body):
        """Enable/Disable scheduling for a service."""
        context = req.environ['nova.context']
        authorize(context)

        if id == "enable":
            disabled = False
        elif id == "disable":
            disabled = True
        else:
            raise webob.exc.HTTPNotFound("Unknown action")

        try:
            host = body['host']
            binary = body['binary']
        except (TypeError, KeyError):
            raise webob.exc.HTTPUnprocessableEntity()

        try:
            svc = db.service_get_by_args(context, host, binary)
            if not svc:
                raise webob.exc.HTTPNotFound('Unknown service')

            db.service_update(context, svc['id'], {'disabled': disabled})
        except exception.ServiceNotFound:
            raise webob.exc.HTTPNotFound("service not found")

        status = id + 'd'
        return {'service': {'host': host, 'binary': binary, 'status': status}}
Exemplo n.º 18
0
    def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
        """Picks a host that is up and has the fewest running instances."""
        elevated = context.elevated()

        availability_zone = instance_opts.get('availability_zone')

        zone, host = FLAGS.default_schedule_zone, None
        if availability_zone:
            zone, _x, host = availability_zone.partition(':')

        if host and context.is_admin:
            service = db.service_get_by_args(elevated, host, 'nova-compute')
            if not self.service_is_up(service):
                raise exception.WillNotSchedule(host=host)
            return host

        results = db.service_get_all_compute_sorted(elevated)
        if zone:
            results = [(service, cores) for (service, cores) in results
                       if service['availability_zone'] == zone]
        for result in results:
            (service, instance_cores) = result
            if instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
                msg = _("Not enough allocatable CPU cores remaining")
                raise exception.NoValidHost(reason=msg)
            if self.service_is_up(service):
                return service['host']
        msg = _("Is the appropriate service running?")
        raise exception.NoValidHost(reason=msg)
Exemplo n.º 19
0
    def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest running instances."""
        instance_ref = db.instance_get(context, instance_id)

        if (instance_ref['availability_zone']
                and ':' in instance_ref['availability_zone']
                and context.is_admin):

            zone, _x, host = instance_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-compute')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s is not alive") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = datetime.datetime.utcnow()
            db.instance_update(context, instance_id, {
                'host': host,
                'scheduled_at': now
            })
            return host

        results = db.service_get_all_compute_sorted(context)

        for result in results:
            (service, instance_cores) = result

            compute_ref = db.service_get_all_compute_by_host(
                context, service['host'])[0]
            compute_node_ref = compute_ref['compute_node'][0]

            if (instance_ref['vcpus'] + instance_cores >
                    compute_node_ref['vcpus'] * FLAGS.max_cores):
                raise driver.NoValidHost(_("All hosts have too many cores"))

            LOG.debug(
                _("requested instance cores = %s + used compute node cores = %s < total compute node cores = %s * max cores = %s"
                  ) % (instance_ref['vcpus'], instance_cores,
                       compute_node_ref['vcpus'], FLAGS.max_cores))

            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = datetime.datetime.utcnow()
                db.instance_update(context, instance_id, {
                    'host': service['host'],
                    'scheduled_at': now
                })

                LOG.debug(
                    _("instance = %s scheduled to host = %s") %
                    (instance_id, service['host']))

                return service['host']

        raise driver.NoValidHost(
            _("Scheduler was unable to locate a host"
              " for this request. Is the appropriate"
              " service running?"))
Exemplo n.º 20
0
    def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
        """Picks a host that is up and has the fewest running instances."""
        elevated = context.elevated()

        availability_zone = instance_opts.get('availability_zone')

        zone, host = FLAGS.default_schedule_zone, None
        if availability_zone:
            zone, _x, host = availability_zone.partition(':')

        if host and context.is_admin:
            service = db.service_get_by_args(elevated, host, 'nova-compute')
            if not self.service_is_up(service):
                raise exception.WillNotSchedule(host=host)
            return host

        results = db.service_get_all_compute_sorted(elevated)
        if zone:
            results = [(service, cores) for (service, cores) in results
                       if service['availability_zone'] == zone]
        for result in results:
            (service, instance_cores) = result
            if instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
                msg = _("Not enough allocatable CPU cores remaining")
                raise exception.NoValidHost(reason=msg)
            if self.service_is_up(service):
                return service['host']
        msg = _("Is the appropriate service running?")
        raise exception.NoValidHost(reason=msg)
Exemplo n.º 21
0
    def update(self, req, id, body):
        """Enable/Disable scheduling for a service"""
        context = req.environ['nova.context']
        authorize(context)

        if id == "enable":
            disabled = False
        elif id == "disable":
            disabled = True
        else:
            raise webob.exc.HTTPNotFound("Unknown action")

        try:
            host = body['host']
            service = body['service']
        except (TypeError, KeyError):
            raise webob.exc.HTTPUnprocessableEntity()

        try:
            svc = db.service_get_by_args(context, host, service)
            if not svc:
                raise webob.exc.HTTPNotFound('Unknown service')

            db.service_update(context, svc['id'], {'disabled': disabled})
        except exception.ServiceNotFound:
            raise webob.exc.HTTPNotFound("service not found")

        return {'host': host, 'service': service, 'disabled': disabled}
Exemplo n.º 22
0
    def start(self):
        manager_class = utils.import_class(self.manager_class_name)
        self.manager = manager_class(host=self.host, *self.saved_args, **self.saved_kwargs)
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt, self.host, self.binary)
            self.service_id = service_ref["id"]
        except exception.NotFound:
            self._create_service_ref(ctxt)

        conn1 = rpc.Connection.instance(new=True)
        conn2 = rpc.Connection.instance(new=True)
        if self.report_interval:
            consumer_all = rpc.AdapterConsumer(connection=conn1, topic=self.topic, proxy=self)
            consumer_node = rpc.AdapterConsumer(connection=conn2, topic="%s.%s" % (self.topic, self.host), proxy=self)

            self.timers.append(consumer_all.attach_to_eventlet())
            self.timers.append(consumer_node.attach_to_eventlet())

            pulse = utils.LoopingCall(self.report_state)
            pulse.start(interval=self.report_interval, now=False)
            self.timers.append(pulse)

        if self.periodic_interval:
            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval, now=False)
            self.timers.append(periodic)
Exemplo n.º 23
0
 def test_doesnt_report_disabled_hosts_as_up(self):
     """Ensures driver doesn't find hosts before they are enabled"""
     # NOTE(vish): constructing service without create method
     #             because we are going to use it without queue
     compute1 = service.Service("host1", "nova-compute", "compute", FLAGS.compute_manager)
     compute1.start()
     compute2 = service.Service("host2", "nova-compute", "compute", FLAGS.compute_manager)
     compute2.start()
     s1 = db.service_get_by_args(self.context, "host1", "nova-compute")
     s2 = db.service_get_by_args(self.context, "host2", "nova-compute")
     db.service_update(self.context, s1["id"], {"disabled": True})
     db.service_update(self.context, s2["id"], {"disabled": True})
     hosts = self.scheduler.driver.hosts_up(self.context, "compute")
     self.assertEqual(0, len(hosts))
     compute1.kill()
     compute2.kill()
Exemplo n.º 24
0
    def test_DB_driver(self):
        serv = self.useFixture(ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx, self._host, self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        self.useFixture(test.TimeOverride())
        timeutils.advance_time_seconds(self.down_time + 1)
        self.servicegroup_api._driver._report_state(serv)
        service_ref = db.service_get_by_args(self._ctx, self._host, self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        timeutils.advance_time_seconds(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host, self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
Exemplo n.º 25
0
    def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest running instances."""
        instance_ref = db.instance_get(context, instance_id)
        if (instance_ref['availability_zone']
            and ':' in instance_ref['availability_zone']
            and context.is_admin):
            zone, _x, host = instance_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-compute')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s is not alive") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = datetime.datetime.utcnow()
            db.instance_update(context, instance_id, {'host': host,
                                                      'scheduled_at': now})
            return host
        results = db.service_get_all_compute_sorted(context)
        for result in results:
            (service, instance_cores) = result
            if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
                raise driver.NoValidHost(_("All hosts have too many cores"))
            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = datetime.datetime.utcnow()
                db.instance_update(context,
                                   instance_id,
                                   {'host': service['host'],
                                    'scheduled_at': now})
                return service['host']
        raise driver.NoValidHost(_("Scheduler was unable to locate a host"
                                   " for this request. Is the appropriate"
                                   " service running?"))
Exemplo n.º 26
0
    def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest running instances."""
        instance_ref = db.instance_get(context, instance_id)
        if (instance_ref['availability_zone']
                and ':' in instance_ref['availability_zone']
                and context.is_admin):
            zone, _x, host = instance_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-compute')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s is not alive") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = datetime.datetime.utcnow()
            db.instance_update(context, instance_id, {
                'host': host,
                'scheduled_at': now
            })
            return host
        results = db.service_get_all_compute_sorted(context)
        for result in results:
            (service, instance_cores) = result
            if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
                raise driver.NoValidHost(_("All hosts have too many cores"))
            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = datetime.datetime.utcnow()
                db.instance_update(context, instance_id, {
                    'host': service['host'],
                    'scheduled_at': now
                })
                return service['host']
        raise driver.NoValidHost(_("No hosts found"))
Exemplo n.º 27
0
    def _service_start_mocks(self):
        service_create = {'host': self.host,
                          'binary': self.binary,
                          'topic': self.topic,
                          'report_count': 0}
        service_ref = {'host': self.host,
                          'binary': self.binary,
                          'topic': self.topic,
                          'report_count': 0,
                          'id': 1}

        db.service_get_by_args(mox.IgnoreArg(),
                self.host, self.binary).AndRaise(exception.NotFound())
        db.service_create(mox.IgnoreArg(),
                service_create).AndReturn(service_ref)
        return service_ref
Exemplo n.º 28
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        volume_ref = db.volume_get(context, volume_id)
        if (volume_ref['availability_zone']
            and ':' in volume_ref['availability_zone']
            and context.is_admin):
            zone, _x, host = volume_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-volume')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s not available") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = datetime.datetime.utcnow()
            db.volume_update(context, volume_id, {'host': host,
                                                  'scheduled_at': now})
            return host
        results = db.service_get_all_volume_sorted(context)
        for result in results:
            (service, volume_gigabytes) = result
            if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
                raise driver.NoValidHost(_("All hosts have too many "
                                           "gigabytes"))
            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = datetime.datetime.utcnow()
                db.volume_update(context,
                                 volume_id,
                                 {'host': service['host'],
                                  'scheduled_at': now})
                return service['host']
        raise driver.NoValidHost(_("No hosts found"))
Exemplo n.º 29
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        elevated = context.elevated()

        volume_ref = db.volume_get(context, volume_id)
        availability_zone = volume_ref.get('availability_zone')

        zone, host = None, None
        if availability_zone:
            zone, _x, host = availability_zone.partition(':')
        if host and context.is_admin:
            service = db.service_get_by_args(elevated, host, 'nova-volume')
            if not utils.service_is_up(service):
                raise exception.WillNotSchedule(host=host)
            driver.cast_to_volume_host(context, host, 'create_volume',
                    volume_id=volume_id, **_kwargs)
            return None

        results = db.service_get_all_volume_sorted(elevated)
        if zone:
            results = [(service, gigs) for (service, gigs) in results
                       if service['availability_zone'] == zone]
        for result in results:
            (service, volume_gigabytes) = result
            if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
                msg = _("Not enough allocatable volume gigabytes remaining")
                raise exception.NoValidHost(reason=msg)
            if utils.service_is_up(service) and not service['disabled']:
                driver.cast_to_volume_host(context, service['host'],
                        'create_volume', volume_id=volume_id, **_kwargs)
                return None
        msg = _("Is the appropriate service running?")
        raise exception.NoValidHost(reason=msg)
Exemplo n.º 30
0
    def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
        """Picks a host that is up and has the fewest volumes."""
        volume_ref = db.volume_get(context, volume_id)
        if (volume_ref['availability_zone']
                and ':' in volume_ref['availability_zone']
                and context.is_admin):
            zone, _x, host = volume_ref['availability_zone'].partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-volume')
            if not self.service_is_up(service):
                raise driver.WillNotSchedule(_("Host %s not available") % host)

            # TODO(vish): this probably belongs in the manager, if we
            #             can generalize this somehow
            now = datetime.datetime.utcnow()
            db.volume_update(context, volume_id, {
                'host': host,
                'scheduled_at': now
            })
            return host

        results = db.service_get_all_volume_sorted(context)

        for result in results:
            (service, volume_gigabytes) = result

            compute_ref = db.service_get_all_compute_by_host(
                context, service['host'])[0]
            compute_node_ref = compute_ref['compute_node'][0]

            if volume_ref['size'] + volume_gigabytes > compute_node_ref[
                    'local_gb']:
                raise driver.NoValidHost(
                    _("All hosts have too many "
                      "gigabytes"))

            LOG.debug(
                _("requested volume GBs = %s + used compute node GBs = %s < total compute node GBs = %s"
                  ) % (volume_ref['size'], volume_gigabytes,
                       compute_node_ref['local_gb']))

            if self.service_is_up(service):
                # NOTE(vish): this probably belongs in the manager, if we
                #             can generalize this somehow
                now = datetime.datetime.utcnow()
                db.volume_update(context, volume_id, {
                    'host': service['host'],
                    'scheduled_at': now
                })

                LOG.debug(
                    _("volume = %s scheduled to host = %s") %
                    (volume_id, service['host']))

                return service['host']
        raise driver.NoValidHost(
            _("Scheduler was unable to locate a host"
              " for this request. Is the appropriate"
              " service running?"))
    def test_service_is_up(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        fake_now = 1000
        down_time = 5
        self.flags(service_down_time=down_time)
        self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
        self.servicegroup_api = servicegroup.API()
        hostkey = str("%s:%s" % (self._topic, self._host))

        # Up (equal)
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertTrue(result)

        self.mox.ResetAll()
        # Up
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertTrue(result)

        self.mox.ResetAll()
        # Down
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertFalse(result)

        self.mox.ResetAll()
        # Down
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertFalse(result)

        self.mox.ResetAll()
Exemplo n.º 32
0
    def test_service_is_up(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        fake_now = 1000
        down_time = 15
        self.flags(service_down_time=down_time)
        self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
        self.servicegroup_api = servicegroup.API()
        hostkey = str("%s:%s" % (self._topic, self._host))

        # Up (equal)
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertTrue(result)

        self.mox.ResetAll()
        # Up
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertTrue(result)

        self.mox.ResetAll()
        # Down
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertFalse(result)

        self.mox.ResetAll()
        # Down
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertFalse(result)

        self.mox.ResetAll()
Exemplo n.º 33
0
 def test_will_schedule_on_disabled_host_if_specified(self):
     compute1 = self.start_service("compute", host="host1")
     s1 = db.service_get_by_args(self.context, "host1", "nova-compute")
     db.service_update(self.context, s1["id"], {"disabled": True})
     instance_id2 = self._create_instance(availability_zone="nova:host1")
     host = self.scheduler.driver.schedule_run_instance(self.context, instance_id2)
     self.assertEqual("host1", host)
     db.instance_destroy(self.context, instance_id2)
     compute1.kill()
Exemplo n.º 34
0
    def start(self):
        vcs_string = version.version_string_with_vcs()
        logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {
            'topic': self.topic,
            'vcs_string': vcs_string
        })
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt, self.host, self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        if 'nova-compute' == self.binary:
            self.manager.update_available_resource(ctxt)

        self.conn = rpc.create_connection(new=True)
        logging.debug("Creating Consumer connection for Service %s" %
                      self.topic)

        # Share this same connection for these Consumers
        consumer_all = rpc.create_consumer(self.conn,
                                           self.topic,
                                           self,
                                           fanout=False)

        node_topic = '%s.%s' % (self.topic, self.host)
        consumer_node = rpc.create_consumer(self.conn,
                                            node_topic,
                                            self,
                                            fanout=False)

        fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True)

        consumers = [consumer_all, consumer_node, fanout]
        consumer_set = rpc.create_consumer_set(self.conn, consumers)

        # Wait forever, processing these consumers
        def _wait():
            try:
                consumer_set.wait()
            finally:
                consumer_set.close()

        self.consumer_set_thread = greenthread.spawn(_wait)

        if self.report_interval:
            pulse = utils.LoopingCall(self.report_state)
            pulse.start(interval=self.report_interval, now=False)
            self.timers.append(pulse)

        if self.periodic_interval:
            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval, now=False)
            self.timers.append(periodic)
Exemplo n.º 35
0
 def test_doesnt_report_disabled_hosts_as_up(self):
     """Ensures driver doesn't find hosts before they are enabled"""
     # NOTE(vish): constructing service without create method
     #             because we are going to use it without queue
     compute1 = service.Service('host1', 'nova-compute', 'compute',
                                FLAGS.compute_manager)
     compute1.start()
     compute2 = service.Service('host2', 'nova-compute', 'compute',
                                FLAGS.compute_manager)
     compute2.start()
     s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
     s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
     db.service_update(self.context, s1['id'], {'disabled': True})
     db.service_update(self.context, s2['id'], {'disabled': True})
     hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
     self.assertEqual(0, len(hosts))
     compute1.kill()
     compute2.kill()
Exemplo n.º 36
0
    def test_DB_driver(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
Exemplo n.º 37
0
    def test_DB_driver(self):
        serv = service.Service(self._host, self._binary, self._topic,
                               'nova.tests.test_service.FakeManager', 1, 1)
        serv.start()
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
Exemplo n.º 38
0
    def start(self):
        vcs_string = version.version_string_with_vcs()
        logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'),
                      {'topic': self.topic, 'vcs_string': vcs_string})
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt,
                                                 self.host,
                                                 self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        if 'nova-compute' == self.binary:
            self.manager.update_available_resource(ctxt)

        self.conn = rpc.Connection.instance(new=True)
        logging.debug("Creating Consumer connection for Service %s" %
                      self.topic)

        # Share this same connection for these Consumers
        consumer_all = rpc.TopicAdapterConsumer(
                connection=self.conn,
                topic=self.topic,
                proxy=self)
        consumer_node = rpc.TopicAdapterConsumer(
                connection=self.conn,
                topic='%s.%s' % (self.topic, self.host),
                proxy=self)
        fanout = rpc.FanoutAdapterConsumer(
                connection=self.conn,
                topic=self.topic,
                proxy=self)
        consumer_set = rpc.ConsumerSet(
                connection=self.conn,
                consumer_list=[consumer_all, consumer_node, fanout])

        # Wait forever, processing these consumers
        def _wait():
            try:
                consumer_set.wait()
            finally:
                consumer_set.close()

        self.consumer_set_thread = greenthread.spawn(_wait)

        if self.report_interval:
            pulse = utils.LoopingCall(self.report_state)
            pulse.start(interval=self.report_interval, now=False)
            self.timers.append(pulse)

        if self.periodic_interval:
            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval, now=False)
            self.timers.append(periodic)
Exemplo n.º 39
0
    def start(self):
        vcs_string = version.version_string_with_vcs()
        LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {
            'topic': self.topic,
            'vcs_string': vcs_string
        })
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt, self.host, self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        if self.backdoor_port is not None:
            self.manager.backdoor_port = self.backdoor_port

        self.conn = rpc.create_connection(new=True)
        LOG.debug(
            _("Creating Consumer connection for Service %s") % self.topic)

        self.manager.pre_start_hook(rpc_connection=self.conn)

        rpc_dispatcher = self.manager.create_rpc_dispatcher()

        # Share this same connection for these Consumers
        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)

        node_topic = '%s.%s' % (self.topic, self.host)
        self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)

        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)

        # Consume from all consumers in a thread
        self.conn.consume_in_thread()

        self.manager.post_start_hook()

        LOG.debug(
            _("Join ServiceGroup membership for this service %s") % self.topic)
        # Add service to the ServiceGroup membership group.
        pulse = self.servicegroup_api.join(self.host, self.topic, self)
        if pulse:
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Exemplo n.º 40
0
    def start(self):
        verstr = version.version_string_with_package()
        LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
                  {'topic': self.topic, 'version': verstr})
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt,
                                                 self.host,
                                                 self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        if self.backdoor_port is not None:
            self.manager.backdoor_port = self.backdoor_port

        self.conn = rpc.create_connection(new=True)
        LOG.debug(_("Creating Consumer connection for Service %s") %
                  self.topic)

        self.manager.pre_start_hook(rpc_connection=self.conn)

        rpc_dispatcher = self.manager.create_rpc_dispatcher()

        # Share this same connection for these Consumers
        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)

        node_topic = '%s.%s' % (self.topic, self.host)
        self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)

        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)

        # Consume from all consumers in a thread
        self.conn.consume_in_thread()

        self.manager.post_start_hook()

        LOG.debug(_("Join ServiceGroup membership for this service %s")
                  % self.topic)
        # Add service to the ServiceGroup membership group.
        pulse = self.servicegroup_api.join(self.host, self.topic, self)
        if pulse:
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Exemplo n.º 41
0
 def disable(self, host, service):
     """Disable scheduling for a service."""
     ctxt = context.get_admin_context()
     try:
         svc = db.service_get_by_args(ctxt, host, service)
         db.service_update(ctxt, svc['id'], {'disabled': True})
     except exception.NotFound as ex:
         print _("error: %s") % ex
         return(2)
     print _("Service %(service)s on host %(host)s disabled.") % locals()
Exemplo n.º 42
0
 def test_will_schedule_on_disabled_host_if_specified(self):
     compute1 = self.start_service('compute', host='host1')
     s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
     db.service_update(self.context, s1['id'], {'disabled': True})
     instance_id2 = self._create_instance(availability_zone='nova:host1')
     host = self.scheduler.driver.schedule_run_instance(self.context,
                                                        instance_id2)
     self.assertEqual('host1', host)
     db.instance_destroy(self.context, instance_id2)
     compute1.kill()
Exemplo n.º 43
0
 def test_will_schedule_on_disabled_host_if_specified(self):
     compute1 = self.start_service('compute', host='host1')
     s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
     db.service_update(self.context, s1['id'], {'disabled': True})
     instance_id2 = self._create_instance(availability_zone='nova:host1')
     host = self.scheduler.driver.schedule_run_instance(
         self.context, instance_id2)
     self.assertEqual('host1', host)
     db.instance_destroy(self.context, instance_id2)
     compute1.kill()
Exemplo n.º 44
0
 def enable(self, host, service):
     """Enable scheduling for a service."""
     ctxt = context.get_admin_context()
     try:
         svc = db.service_get_by_args(ctxt, host, service)
         db.service_update(ctxt, svc["id"], {"disabled": False})
     except exception.NotFound as ex:
         print _("error: %s") % ex
         return 2
     print _("Service %(service)s on host %(host)s enabled.") % locals()
Exemplo n.º 45
0
    def _check_host_enforcement(self, context, availability_zone):
        if availability_zone and ":" in availability_zone and context.is_admin:
            zone, _x, host = availability_zone.partition(":")
            service = db.service_get_by_args(context.elevated(), host, "nova-volume")
            if service["disabled"] or not utils.service_is_up(service):
                raise exception.WillNotSchedule(host=host)

            return host
        else:
            return None
Exemplo n.º 46
0
    def test_get_all(self):
        host1 = self._host + '_1'
        host2 = self._host + '_2'
        host3 = self._host + '_3'

        serv1 = self.useFixture(
            ServiceFixture(host1, self._binary, self._topic)).serv
        serv1.start()

        serv2 = self.useFixture(
            ServiceFixture(host2, self._binary, self._topic)).serv
        serv2.start()

        serv3 = self.useFixture(
            ServiceFixture(host3, self._binary, self._topic)).serv
        serv3.start()

        service_ref1 = db.service_get_by_args(self._ctx, host1, self._binary)
        service_ref2 = db.service_get_by_args(self._ctx, host2, self._binary)
        service_ref3 = db.service_get_by_args(self._ctx, host3, self._binary)

        host1key = str("%s:%s" % (self._topic, host1))
        host2key = str("%s:%s" % (self._topic, host2))
        host3key = str("%s:%s" % (self._topic, host3))
        self.servicegroup_api._driver.mc.set(host1key,
                                             timeutils.utcnow(),
                                             time=self.down_time)
        self.servicegroup_api._driver.mc.set(host2key,
                                             timeutils.utcnow(),
                                             time=self.down_time)
        self.servicegroup_api._driver.mc.set(host3key,
                                             timeutils.utcnow(),
                                             time=-1)

        services = self.servicegroup_api.get_all(self._topic)

        self.assertTrue(host1 in services)
        self.assertTrue(host2 in services)
        self.assertFalse(host3 in services)

        service_id = self.servicegroup_api.get_one(self._topic)
        self.assertTrue(service_id in services)
Exemplo n.º 47
0
    def test_get_all(self):
        host1 = self._host + "_1"
        host2 = self._host + "_2"

        serv1 = self.useFixture(ServiceFixture(host1, self._binary, self._topic)).serv
        serv1.start()

        serv2 = self.useFixture(ServiceFixture(host2, self._binary, self._topic)).serv
        serv2.start()

        service_ref1 = db.service_get_by_args(self._ctx, host1, self._binary)
        service_ref2 = db.service_get_by_args(self._ctx, host2, self._binary)

        services = self.servicegroup_api.get_all(self._topic)

        self.assertTrue(service_ref1["host"] in services)
        self.assertTrue(service_ref2["host"] in services)

        service_id = self.servicegroup_api.get_one(self._topic)
        self.assertTrue(service_id in services)
Exemplo n.º 48
0
 def enable(self, host, service):
     """Enable scheduling for a service."""
     ctxt = context.get_admin_context()
     try:
         svc = db.service_get_by_args(ctxt, host, service)
         db.service_update(ctxt, svc['id'], {'disabled': False})
     except exception.NotFound as ex:
         print(_("error: %s") % ex)
         return(2)
     print((_("Service %(service)s on host %(host)s enabled.") %
            {'service': service, 'host': host}))
Exemplo n.º 49
0
    def test_DB_driver(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        self.useFixture(test.TimeOverride())
        timeutils.advance_time_seconds(self.down_time + 1)
        self.servicegroup_api._driver._report_state(serv)
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        timeutils.advance_time_seconds(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
Exemplo n.º 50
0
    def start(self):
        vcs_string = version.version_string_with_vcs()
        LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {
            'topic': self.topic,
            'vcs_string': vcs_string
        })
        utils.cleanup_file_locks()
        rpc.register_opts(FLAGS)
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt, self.host, self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        if 'nova-compute' == self.binary:
            self.manager.update_available_resource(ctxt)

        self.conn = rpc.create_connection(new=True)
        LOG.debug(
            _("Creating Consumer connection for Service %s") % self.topic)

        rpc_dispatcher = self.manager.create_rpc_dispatcher()

        # Share this same connection for these Consumers
        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)

        node_topic = '%s.%s' % (self.topic, self.host)
        self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)

        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)

        # Consume from all consumers in a thread
        self.conn.consume_in_thread()

        if self.report_interval:
            pulse = utils.LoopingCall(self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Exemplo n.º 51
0
    def _check_host_enforcement(self, context, availability_zone):
        if (availability_zone and ':' in availability_zone
                and context.is_admin):
            zone, _x, host = availability_zone.partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'nova-volume')
            if not self.service_is_up(service):
                raise exception.WillNotSchedule(host=host)

            return host
        else:
            return None
Exemplo n.º 52
0
 def enable(self, host, service):
     """Enable scheduling for a service."""
     ctxt = context.get_admin_context()
     try:
         svc = db.service_get_by_args(ctxt, host, service)
         db.service_update(ctxt, svc['id'], {'disabled': False})
     except exception.NotFound as ex:
         print(_("error: %s") % ex)
         return (2)
     print((_("Service %(service)s on host %(host)s enabled.") % {
         'service': service,
         'host': host
     }))
Exemplo n.º 53
0
    def test_memcached_driver(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)
        hostkey = str("%s:%s" % (self._topic, self._host))
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=self.down_time)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx, self._host,
                                             self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
Exemplo n.º 54
0
    def test_report_state(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        db.service_get_by_args(self._ctx, self._host, self._binary)
        self.servicegroup_api = servicegroup.API()

        # updating model_disconnected
        serv.model_disconnected = True
        self.servicegroup_api._driver._report_state(serv)
        self.assertFalse(serv.model_disconnected)

        # handling exception
        serv.model_disconnected = True
        self.servicegroup_api._driver.mc = None
        self.servicegroup_api._driver._report_state(serv)
        self.assertTrue(serv.model_disconnected)

        delattr(serv, 'model_disconnected')
        self.servicegroup_api._driver.mc = None
        self.servicegroup_api._driver._report_state(serv)
        self.assertTrue(serv.model_disconnected)
Exemplo n.º 55
0
 def test_wont_sechedule_if_specified_host_is_down(self):
     compute1 = self.start_service('compute', host='host1')
     s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
     now = utils.utcnow()
     delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
     past = now - delta
     db.service_update(self.context, s1['id'], {'updated_at': past})
     instance_id2 = self._create_instance(availability_zone='nova:host1')
     self.assertRaises(driver.WillNotSchedule,
                       self.scheduler.driver.schedule_run_instance,
                       self.context, instance_id2)
     db.instance_destroy(self.context, instance_id2)
     compute1.kill()
Exemplo n.º 56
0
    def test_get_all(self):
        host1 = self._host + '_1'
        host2 = self._host + '_2'

        serv1 = service.Service(host1, self._binary, self._topic,
                                'nova.tests.test_service.FakeManager', 1, 1)
        serv1.start()

        serv2 = service.Service(host2, self._binary, self._topic,
                                'nova.tests.test_service.FakeManager', 1, 1)
        serv2.start()

        service_ref1 = db.service_get_by_args(self._ctx, host1, self._binary)
        service_ref2 = db.service_get_by_args(self._ctx, host2, self._binary)

        services = self.servicegroup_api.get_all(self._topic)

        self.assertTrue(service_ref1['host'] in services)
        self.assertTrue(service_ref2['host'] in services)

        service_id = self.servicegroup_api.get_one(self._topic)
        self.assertTrue(service_id in services)
Exemplo n.º 57
0
    def schedule_create_volume(self, context, volume_id, snapshot_id,
                               image_id):
        """Picks a host that is up and has the fewest volumes."""
        deprecated.warn(
            _('nova-volume functionality is deprecated in Folsom '
              'and will be removed in Grizzly.  Volumes are now handled '
              'by Cinder'))
        elevated = context.elevated()

        volume_ref = db.volume_get(context, volume_id)
        availability_zone = volume_ref.get('availability_zone')

        zone, host = None, None
        if availability_zone:
            zone, _x, host = availability_zone.partition(':')
        if host and context.is_admin:
            service = db.service_get_by_args(elevated, host, 'nova-volume')
            if not utils.service_is_up(service):
                raise exception.WillNotSchedule(host=host)
            driver.cast_to_volume_host(context,
                                       host,
                                       'create_volume',
                                       volume_id=volume_id,
                                       snapshot_id=snapshot_id,
                                       image_id=image_id)
            return None

        results = db.service_get_all_volume_sorted(elevated)
        if zone:
            results = [(service, gigs) for (service, gigs) in results
                       if service['availability_zone'] == zone]
        for result in results:
            (service, volume_gigabytes) = result
            if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
                msg = _("Not enough allocatable volume gigabytes remaining")
                raise exception.NoValidHost(reason=msg)
            if utils.service_is_up(service) and not service['disabled']:
                driver.cast_to_volume_host(context,
                                           service['host'],
                                           'create_volume',
                                           volume_id=volume_id,
                                           snapshot_id=snapshot_id,
                                           image_id=image_id)
                return None
        msg = _("Is the appropriate service running?")
        raise exception.NoValidHost(reason=msg)
Exemplo n.º 58
0
    def start(self):
        vcs_string = version.version_string_with_vcs()
        logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {
            'topic': self.topic,
            'vcs_string': vcs_string
        })
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt, self.host, self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        if 'nova-compute' == self.binary:
            self.manager.update_available_resource(ctxt)

        conn1 = rpc.Connection.instance(new=True)
        conn2 = rpc.Connection.instance(new=True)
        conn3 = rpc.Connection.instance(new=True)
        if self.report_interval:
            consumer_all = rpc.TopicAdapterConsumer(connection=conn1,
                                                    topic=self.topic,
                                                    proxy=self)
            consumer_node = rpc.TopicAdapterConsumer(connection=conn2,
                                                     topic='%s.%s' %
                                                     (self.topic, self.host),
                                                     proxy=self)
            fanout = rpc.FanoutAdapterConsumer(connection=conn3,
                                               topic=self.topic,
                                               proxy=self)

            self.timers.append(consumer_all.attach_to_eventlet())
            self.timers.append(consumer_node.attach_to_eventlet())
            self.timers.append(fanout.attach_to_eventlet())

            pulse = utils.LoopingCall(self.report_state)
            pulse.start(interval=self.report_interval, now=False)
            self.timers.append(pulse)

        if self.periodic_interval:
            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval, now=False)
            self.timers.append(periodic)