Пример #1
0
    def setUp(self):

        self.notifier = FakeProvisionerNotifier()
        self.context_client = FakeContextClient()

        self.store = yield self.setup_store()
        self.site_drivers = {'fake-site1': FakeNodeDriver()}

        yield self._start_container()
        yield self.spawn_procs()

        pId = yield self.procRegistry.get("provisioner")
        self.client = ProvisionerClient(pid=pId)
Пример #2
0
    def setUp(self):

        self.notifier = FakeProvisionerNotifier()
        self.context_client = FakeContextClient()

        self.store = ProvisionerStore()
        self.site_drivers = {'fake-site1': FakeNodeDriver()}

        yield self._start_container()
        yield self.spawn_procs()

        self.fakecore = TerminateAllFakeCore()
        self.patch(self.provisioner, "core", self.fakecore)

        pId = yield self.procRegistry.get("provisioner")
        self.client = ProvisionerClient(pid=pId)
Пример #3
0
    def slc_init(self):
        interval = float(
            self.spawn_args.get("interval_seconds", DEFAULT_QUERY_INTERVAL))

        self.client = ProvisionerClient(self)

        log.debug('Starting provisioner query loop - %s second interval',
                  interval)
        self.loop = LoopingCall(self.query)
        self.loop.start(interval)
Пример #4
0
    def setUp(self):

        self.notifier = FakeProvisionerNotifier()
        self.context_client = FakeContextClient()

        self.store = yield self.setup_store()
        self.site_drivers = {'fake-site1' : FakeNodeDriver()}

        yield self._start_container()
        yield self.spawn_procs()

        pId = yield self.procRegistry.get("provisioner")
        self.client = ProvisionerClient(pid=pId)
Пример #5
0
    def setUp(self):

        self.notifier = FakeProvisionerNotifier()
        self.context_client = FakeContextClient()

        self.store = ProvisionerStore()
        self.site_drivers = {'fake-site1' : FakeNodeDriver()}

        yield self._start_container()
        yield self.spawn_procs()

        self.fakecore = TerminateAllFakeCore()
        self.patch(self.provisioner, "core", self.fakecore)

        pId = yield self.procRegistry.get("provisioner")
        self.client = ProvisionerClient(pid=pId)
Пример #6
0
    def setUp(self):

        # @itv decorator is gone. This test could probably go away entirely but I'v
        # found it personally useful. Unconditionally skipping for now, til we know
        # what to do with it.
        raise unittest.SkipTest("developer-only Nimbus integration test")

        # skip this test if IaaS credentials are unavailable
        maybe_skip_test()

        self.notifier = FakeProvisionerNotifier()
        self.context_client = get_context_client()

        self.store = yield self.setup_store()
        self.site_drivers = provisioner.get_site_drivers(
            get_nimbus_test_sites())

        yield self._start_container()
        yield self.spawn_procs()

        pId = yield self.procRegistry.get("provisioner")
        self.client = ProvisionerClient(pid=pId)
Пример #7
0
class ProvisionerServiceTerminateAllTest(BaseProvisionerServiceTests):
    """Tests that use a fake ProvisionerCore to test the Deferred RPC
    polling mechanism of terminate_all
    """
    @defer.inlineCallbacks
    def setUp(self):

        self.notifier = FakeProvisionerNotifier()
        self.context_client = FakeContextClient()

        self.store = ProvisionerStore()
        self.site_drivers = {'fake-site1': FakeNodeDriver()}

        yield self._start_container()
        yield self.spawn_procs()

        self.fakecore = TerminateAllFakeCore()
        self.patch(self.provisioner, "core", self.fakecore)

        pId = yield self.procRegistry.get("provisioner")
        self.client = ProvisionerClient(pid=pId)

    @defer.inlineCallbacks
    def tearDown(self):
        yield self._shutdown_processes()
        yield self._stop_container()

    @defer.inlineCallbacks
    def test_terminate_all_deferred(self):
        """Check the specific behavior with terminate_all_deferred.
        """

        service_deferred = defer.Deferred()
        self.fakecore.deferred = service_deferred
        client_deferred = self.client.terminate_all(rpcwait=True, poll=0.1)
        yield procutils.asleep(0.3)

        # first time the core fires its Deferred, check_terminate_all still
        # says there are instances. So client should not yet return
        self.fakecore.all_terminated = False
        self.fakecore.deferred = defer.Deferred()  # set up the next Deferred
        service_deferred.callback(None)
        service_deferred = self.fakecore.deferred
        yield procutils.asleep(0.3)
        self.assertFalse(client_deferred.called)
        self.assertEqual(self.fakecore.check_terminate_all_count, 1)

        # now we flip terminate_all_check to True. client should return
        # on next cycle
        self.fakecore.all_terminated = True
        service_deferred.callback(None)
        yield client_deferred

    @defer.inlineCallbacks
    def test_terminate_all_deferred_error_retry(self):

        service_deferred = defer.Deferred()
        self.fakecore.deferred = service_deferred

        client_deferred = self.client.terminate_all(rpcwait=True,
                                                    poll=0.01,
                                                    retries=3)
        yield procutils.asleep(0.1)
        for i in range(3):
            self.assertEqual(self.fakecore.terminate_all_count, i + 1)

            self.fakecore.deferred = defer.Deferred()
            service_deferred.errback(Exception("went bad #%d" % (i + 1)))
            service_deferred = self.fakecore.deferred
            yield procutils.asleep(0.2)
            self.assertFalse(client_deferred.called)
            self.assertEqual(self.fakecore.terminate_all_count, i + 2)

        #this last errback should cause client_deferred to errback itself
        self.fakecore.deferred = defer.Deferred()
        service_deferred.errback(Exception("went bad for the last time"))
        yield procutils.asleep(0.03)
        try:
            yield client_deferred
        except Exception, e:
            log.exception(
                "Expected error, couldn't terminate all after retries: %s", e)
        else:
Пример #8
0
class ProvisionerServiceTest(BaseProvisionerServiceTests):
    """Integration tests that use fake context broker and IaaS driver fixtures
    """
    @defer.inlineCallbacks
    def setUp(self):

        self.notifier = FakeProvisionerNotifier()
        self.context_client = FakeContextClient()

        self.store = yield self.setup_store()
        self.site_drivers = {'fake-site1': FakeNodeDriver()}

        yield self._start_container()
        yield self.spawn_procs()

        pId = yield self.procRegistry.get("provisioner")
        self.client = ProvisionerClient(pid=pId)

    @defer.inlineCallbacks
    def tearDown(self):
        yield self.shutdown_procs()
        yield self.teardown_store()
        yield self._stop_container()

    def setup_store(self):
        return defer.succeed(ProvisionerStore())

    def teardown_store(self):
        return defer.succeed(None)

    @defer.inlineCallbacks
    def test_provision_bad_dt(self):
        client = self.client
        notifier = self.notifier

        worker_node_count = 3
        deployable_type = 'this-doesnt-exist'
        nodes = {
            'head-node':
            FakeLaunchItem(1, 'fake-site1', 'small', None),
            'worker-node':
            FakeLaunchItem(worker_node_count, 'fake-site1', 'small', None)
        }

        launch_id = _new_id()

        node_ids = [
            node_id for node in nodes.itervalues()
            for node_id in node.instance_ids
        ]
        self.assertEqual(len(node_ids), worker_node_count + 1)

        yield client.provision(launch_id, deployable_type, nodes,
                               ('subscriber', ))

        ok = yield notifier.wait_for_state(states.FAILED, node_ids)
        self.assertTrue(ok)
        self.assertTrue(notifier.assure_record_count(1))

        yield self.assertStoreNodeRecords(states.FAILED, *node_ids)
        yield self.assertStoreLaunchRecord(states.FAILED, launch_id)

    @defer.inlineCallbacks
    def test_provision_broker_error(self):
        client = self.client
        notifier = self.notifier

        worker_node_count = 3
        deployable_type = 'base-cluster'
        nodes = {
            'head-node':
            FakeLaunchItem(1, 'fake-site1', 'small', None),
            'worker-node':
            FakeLaunchItem(worker_node_count, 'fake-site1', 'small', None)
        }

        launch_id = _new_id()

        node_ids = [
            node_id for node in nodes.itervalues()
            for node_id in node.instance_ids
        ]
        self.assertEqual(len(node_ids), worker_node_count + 1)

        self.context_client.create_error = BrokerError("fake failure")

        yield client.provision(launch_id, deployable_type, nodes,
                               ('subscriber', ))

        ok = yield notifier.wait_for_state(states.FAILED, node_ids)
        self.assertTrue(ok)
        self.assertTrue(notifier.assure_record_count(1))

        yield self.assertStoreNodeRecords(states.FAILED, *node_ids)
        yield self.assertStoreLaunchRecord(states.FAILED, launch_id)

    @defer.inlineCallbacks
    def test_dump_state(self):
        running_launch, running_nodes = make_launch_and_nodes(
            _new_id(), 10, states.RUNNING)
        yield self.store.put_launch(running_launch)
        yield self.store.put_nodes(running_nodes)

        pending_launch, pending_nodes = make_launch_and_nodes(
            _new_id(), 3, states.PENDING)
        yield self.store.put_launch(pending_launch)
        yield self.store.put_nodes(pending_nodes)

        running_node_ids = [node['node_id'] for node in running_nodes]
        pending_node_ids = [node['node_id'] for node in pending_nodes]
        all_node_ids = running_node_ids + pending_node_ids

        yield self.client.dump_state(running_node_ids)
        ok = yield self.notifier.wait_for_state(states.RUNNING,
                                                nodes=running_node_ids)
        self.assertTrue(ok)
        self.assertEqual(len(self.notifier.nodes), len(running_nodes))

        yield self.client.dump_state(pending_node_ids)
        ok = yield self.notifier.wait_for_state(states.PENDING,
                                                nodes=pending_node_ids)
        self.assertTrue(ok)
        self.assertEqual(len(self.notifier.nodes), len(all_node_ids))

        # we should have not gotten any dupe records yet
        self.assertTrue(self.notifier.assure_record_count(1))

        # empty dump request should dump nothing
        yield self.client.dump_state([])
        self.assertTrue(self.notifier.assure_record_count(1))

    @defer.inlineCallbacks
    def test_dump_state_unknown_node(self):
        node_ids = ["09ddd3f8-a5a5-4196-ac13-eab4d4b0c777"]
        subscribers = ["hello1_subscriber"]
        yield self.client.dump_state(node_ids, force_subscribe=subscribers[0])
        ok = yield self.notifier.wait_for_state(states.FAILED, nodes=node_ids)
        self.assertTrue(ok)
        self.assertEqual(len(self.notifier.nodes), len(node_ids))
        for node_id in node_ids:
            ok = yield self.notifier.assure_subscribers(node_id, subscribers)
            self.assertTrue(ok)

    @defer.inlineCallbacks
    def test_terminate(self):
        launch_id = _new_id()
        running_launch, running_nodes = make_launch_and_nodes(
            launch_id, 10, states.RUNNING, site="fake-site1")
        yield self.store.put_launch(running_launch)
        yield self.store.put_nodes(running_nodes)

        node_ids = [node['node_id'] for node in running_nodes]

        # terminate half of the nodes then the launch as a whole
        first_five = node_ids[:5]
        yield self.client.terminate_nodes(first_five)
        ok = yield self.notifier.wait_for_state(states.TERMINATED,
                                                nodes=first_five)
        self.assertTrue(ok)
        self.assertEqual(set(first_five), set(self.notifier.nodes))

        yield self.client.terminate_launches((launch_id, ))
        ok = yield self.notifier.wait_for_state(states.TERMINATED,
                                                nodes=node_ids)
        self.assertTrue(ok)
        self.assertEqual(set(node_ids), set(self.notifier.nodes))
        # should be TERMINATING and TERMINATED record for each node
        self.assertTrue(self.notifier.assure_record_count(2))

        self.assertEqual(len(self.site_drivers['fake-site1'].destroyed),
                         len(node_ids))

    @defer.inlineCallbacks
    def test_terminate_all(self):
        # create a ton of launches
        launch_specs = [(30, 3, states.RUNNING), (50, 1, states.TERMINATED),
                        (80, 1, states.RUNNING)]

        to_be_terminated_node_ids = []

        for launchcount, nodecount, state in launch_specs:
            for i in range(launchcount):
                launch_id = _new_id()
                launch, nodes = make_launch_and_nodes(launch_id,
                                                      nodecount,
                                                      state,
                                                      site="fake-site1")
                yield self.store.put_launch(launch)
                yield self.store.put_nodes(nodes)

                if state < states.TERMINATED:
                    to_be_terminated_node_ids.extend(node["node_id"]
                                                     for node in nodes)

        log.debug("Expecting %d nodes to be terminated",
                  len(to_be_terminated_node_ids))

        yield self.client.terminate_all(rpcwait=True)
        yield self.assertStoreNodeRecords(states.TERMINATED,
                                          *to_be_terminated_node_ids)

        ok = self.notifier.assure_state(states.TERMINATED,
                                        nodes=to_be_terminated_node_ids)
        self.assertTrue(ok)
        self.assertEqual(set(to_be_terminated_node_ids),
                         set(self.notifier.nodes))

        self.assertEqual(len(self.site_drivers['fake-site1'].destroyed),
                         len(to_be_terminated_node_ids))

    @defer.inlineCallbacks
    def test_query(self):
        #default is non-rpc. should be None result
        res = yield self.client.query()
        self.assertEqual(res, None)

        #returns true in RPC case
        res = yield self.client.query(rpc=True)
        self.assertEqual(res, True)
Пример #9
0
class ProvisionerServiceTerminateAllTest(BaseProvisionerServiceTests):
    """Tests that use a fake ProvisionerCore to test the Deferred RPC
    polling mechanism of terminate_all
    """
    @defer.inlineCallbacks
    def setUp(self):

        self.notifier = FakeProvisionerNotifier()
        self.context_client = FakeContextClient()

        self.store = ProvisionerStore()
        self.site_drivers = {'fake-site1' : FakeNodeDriver()}

        yield self._start_container()
        yield self.spawn_procs()

        self.fakecore = TerminateAllFakeCore()
        self.patch(self.provisioner, "core", self.fakecore)

        pId = yield self.procRegistry.get("provisioner")
        self.client = ProvisionerClient(pid=pId)

    @defer.inlineCallbacks
    def tearDown(self):
        yield self._shutdown_processes()
        yield self._stop_container()

    @defer.inlineCallbacks
    def test_terminate_all_deferred(self):
        """Check the specific behavior with terminate_all_deferred.
        """

        service_deferred = defer.Deferred()
        self.fakecore.deferred = service_deferred
        client_deferred = self.client.terminate_all(rpcwait=True, poll=0.1)
        yield procutils.asleep(0.3)

        # first time the core fires its Deferred, check_terminate_all still
        # says there are instances. So client should not yet return
        self.fakecore.all_terminated = False
        self.fakecore.deferred = defer.Deferred() # set up the next Deferred
        service_deferred.callback(None)
        service_deferred = self.fakecore.deferred
        yield procutils.asleep(0.3)
        self.assertFalse(client_deferred.called)
        self.assertEqual(self.fakecore.check_terminate_all_count, 1)

        # now we flip terminate_all_check to True. client should return
        # on next cycle
        self.fakecore.all_terminated = True
        service_deferred.callback(None)
        yield client_deferred

    @defer.inlineCallbacks
    def test_terminate_all_deferred_error_retry(self):

        service_deferred = defer.Deferred()
        self.fakecore.deferred = service_deferred

        client_deferred = self.client.terminate_all(rpcwait=True, poll=0.01, retries=3)
        yield procutils.asleep(0.1)
        for i in range(3):
            self.assertEqual(self.fakecore.terminate_all_count, i+1)

            self.fakecore.deferred = defer.Deferred()
            service_deferred.errback(Exception("went bad #%d" % (i+1)))
            service_deferred = self.fakecore.deferred
            yield procutils.asleep(0.2)
            self.assertFalse(client_deferred.called)
            self.assertEqual(self.fakecore.terminate_all_count, i+2)

        #this last errback should cause client_deferred to errback itself
        self.fakecore.deferred = defer.Deferred()
        service_deferred.errback(Exception("went bad for the last time"))
        yield procutils.asleep(0.03)
        try:
            yield client_deferred
        except Exception,e:
            log.exception("Expected error, couldn't terminate all after retries: %s", e)
        else:
Пример #10
0
class ProvisionerServiceTest(BaseProvisionerServiceTests):
    """Integration tests that use fake context broker and IaaS driver fixtures
    """
    @defer.inlineCallbacks
    def setUp(self):

        self.notifier = FakeProvisionerNotifier()
        self.context_client = FakeContextClient()

        self.store = yield self.setup_store()
        self.site_drivers = {'fake-site1' : FakeNodeDriver()}

        yield self._start_container()
        yield self.spawn_procs()

        pId = yield self.procRegistry.get("provisioner")
        self.client = ProvisionerClient(pid=pId)

    @defer.inlineCallbacks
    def tearDown(self):
        yield self.shutdown_procs()
        yield self.teardown_store()
        yield self._stop_container()

    def setup_store(self):
        return defer.succeed(ProvisionerStore())

    def teardown_store(self):
        return defer.succeed(None)

    @defer.inlineCallbacks
    def test_provision_bad_dt(self):
        client = self.client
        notifier = self.notifier

        worker_node_count = 3
        deployable_type = 'this-doesnt-exist'
        nodes = {'head-node' : FakeLaunchItem(1, 'fake-site1', 'small', None),
                'worker-node' : FakeLaunchItem(worker_node_count,
                    'fake-site1', 'small', None)}

        launch_id = _new_id()

        node_ids = [node_id for node in nodes.itervalues()
                for node_id in node.instance_ids]
        self.assertEqual(len(node_ids), worker_node_count + 1)

        yield client.provision(launch_id, deployable_type, nodes, ('subscriber',))

        ok = yield notifier.wait_for_state(states.FAILED, node_ids)
        self.assertTrue(ok)
        self.assertTrue(notifier.assure_record_count(1))

        yield self.assertStoreNodeRecords(states.FAILED, *node_ids)
        yield self.assertStoreLaunchRecord(states.FAILED, launch_id)

    @defer.inlineCallbacks
    def test_provision_broker_error(self):
        client = self.client
        notifier = self.notifier

        worker_node_count = 3
        deployable_type = 'base-cluster'
        nodes = {'head-node' : FakeLaunchItem(1, 'fake-site1', 'small', None),
                'worker-node' : FakeLaunchItem(worker_node_count,
                    'fake-site1', 'small', None)}

        launch_id = _new_id()

        node_ids = [node_id for node in nodes.itervalues()
                for node_id in node.instance_ids]
        self.assertEqual(len(node_ids), worker_node_count + 1)

        self.context_client.create_error = BrokerError("fake failure")

        yield client.provision(launch_id, deployable_type, nodes, ('subscriber',))

        ok = yield notifier.wait_for_state(states.FAILED, node_ids)
        self.assertTrue(ok)
        self.assertTrue(notifier.assure_record_count(1))

        yield self.assertStoreNodeRecords(states.FAILED, *node_ids)
        yield self.assertStoreLaunchRecord(states.FAILED, launch_id)

    @defer.inlineCallbacks
    def test_dump_state(self):
        running_launch, running_nodes = make_launch_and_nodes(_new_id(), 10, states.RUNNING)
        yield self.store.put_launch(running_launch)
        yield self.store.put_nodes(running_nodes)

        pending_launch, pending_nodes = make_launch_and_nodes(_new_id(), 3, states.PENDING)
        yield self.store.put_launch(pending_launch)
        yield self.store.put_nodes(pending_nodes)

        running_node_ids = [node['node_id'] for node in running_nodes]
        pending_node_ids = [node['node_id'] for node in pending_nodes]
        all_node_ids = running_node_ids + pending_node_ids

        yield self.client.dump_state(running_node_ids)
        ok = yield self.notifier.wait_for_state(states.RUNNING, nodes=running_node_ids)
        self.assertTrue(ok)
        self.assertEqual(len(self.notifier.nodes), len(running_nodes))

        yield self.client.dump_state(pending_node_ids)
        ok = yield self.notifier.wait_for_state(states.PENDING, nodes=pending_node_ids)
        self.assertTrue(ok)
        self.assertEqual(len(self.notifier.nodes), len(all_node_ids))

        # we should have not gotten any dupe records yet
        self.assertTrue(self.notifier.assure_record_count(1))

        # empty dump request should dump nothing
        yield self.client.dump_state([])
        self.assertTrue(self.notifier.assure_record_count(1))

    @defer.inlineCallbacks
    def test_dump_state_unknown_node(self):
        node_ids = ["09ddd3f8-a5a5-4196-ac13-eab4d4b0c777"]
        subscribers = ["hello1_subscriber"]
        yield self.client.dump_state(node_ids, force_subscribe=subscribers[0])
        ok = yield self.notifier.wait_for_state(states.FAILED, nodes=node_ids)
        self.assertTrue(ok)
        self.assertEqual(len(self.notifier.nodes), len(node_ids))
        for node_id in node_ids:
            ok = yield self.notifier.assure_subscribers(node_id, subscribers)
            self.assertTrue(ok)

    @defer.inlineCallbacks
    def test_terminate(self):
        launch_id = _new_id()
        running_launch, running_nodes = make_launch_and_nodes(launch_id, 10,
                                                              states.RUNNING,
                                                              site="fake-site1")
        yield self.store.put_launch(running_launch)
        yield self.store.put_nodes(running_nodes)

        node_ids = [node['node_id'] for node in running_nodes]

        # terminate half of the nodes then the launch as a whole
        first_five = node_ids[:5]
        yield self.client.terminate_nodes(first_five)
        ok = yield self.notifier.wait_for_state(states.TERMINATED, nodes=first_five)
        self.assertTrue(ok)
        self.assertEqual(set(first_five), set(self.notifier.nodes))

        yield self.client.terminate_launches((launch_id,))
        ok = yield self.notifier.wait_for_state(states.TERMINATED, nodes=node_ids)
        self.assertTrue(ok)
        self.assertEqual(set(node_ids), set(self.notifier.nodes))
        # should be TERMINATING and TERMINATED record for each node
        self.assertTrue(self.notifier.assure_record_count(2))

        self.assertEqual(len(self.site_drivers['fake-site1'].destroyed),
                         len(node_ids))

    @defer.inlineCallbacks
    def test_terminate_all(self):
        # create a ton of launches
        launch_specs = [(30, 3, states.RUNNING), (50, 1, states.TERMINATED), (80, 1, states.RUNNING)]

        to_be_terminated_node_ids = []

        for launchcount, nodecount, state in launch_specs:
            for i in range(launchcount):
                launch_id = _new_id()
                launch, nodes = make_launch_and_nodes(
                    launch_id, nodecount, state, site="fake-site1")
                yield self.store.put_launch(launch)
                yield self.store.put_nodes(nodes)

                if state < states.TERMINATED:
                    to_be_terminated_node_ids.extend(node["node_id"] for node in nodes)

        log.debug("Expecting %d nodes to be terminated", len(to_be_terminated_node_ids))

        yield self.client.terminate_all(rpcwait=True)
        yield self.assertStoreNodeRecords(states.TERMINATED, *to_be_terminated_node_ids)

        ok = self.notifier.assure_state(states.TERMINATED, nodes=to_be_terminated_node_ids)
        self.assertTrue(ok)
        self.assertEqual(set(to_be_terminated_node_ids), set(self.notifier.nodes))

        self.assertEqual(len(self.site_drivers['fake-site1'].destroyed),
                         len(to_be_terminated_node_ids))

    @defer.inlineCallbacks
    def test_query(self):
        #default is non-rpc. should be None result
        res = yield self.client.query()
        self.assertEqual(res, None)

        #returns true in RPC case
        res = yield self.client.query(rpc=True)
        self.assertEqual(res, True)
Пример #11
0
    def slc_init(self):

        scoped_name = self.get_scoped_name("system", self.svc_name)
        self.scoped_name = scoped_name

        queue_name_work = self.spawn_args.get("queue_name_work")
        if queue_name_work:
            self.queue_name_work = self.get_scoped_name(
                "system", queue_name_work)

            extradict = {"queue_name_work": self.queue_name_work}
            cei_events.event(self.svc_name, "init_begin", extra=extradict)
            yield self._make_queue(queue_name_work)

            queuestat_client = QueueStatClient(self)
            yield queuestat_client.watch_queue(self.queue_name_work,
                                               self.scoped_name, 'sensor_info')
            cei_events.event(self.svc_name, "queue_watched")

        else:
            self.worker_queue_receiver = None
            self.queue_name_work = None
            extradict = None
            cei_events.event(self.svc_name, "init_begin", extra=extradict)

        engineclass = "epu.decisionengine.impls.NpreservingEngine"
        if self.spawn_args.has_key("engine_class"):
            engineclass = self.spawn_args["engine_class"]
            log.info("Using configured decision engine: %s" % engineclass)
        else:
            log.info("Using default decision engine: %s" % engineclass)

        if self.spawn_args.has_key("engine_conf"):
            engine_conf = self.spawn_args["engine_conf"]
            if isinstance(engine_conf, str):
                engine_conf = json.loads(engine_conf)
        else:
            engine_conf = None

        if self.spawn_args.has_key("cassandra"):
            cass = self.spawn_args["cassandra"]
            host = cass['hostname']
            username = cass['username']
            password = cass['password']
            port = cass['port']
            keyspace = cass['keyspace']

            store = CassandraControllerStore(self.svc_name, host, port,
                                             username, password, keyspace,
                                             CoreInstance, SensorItem)
            store.initialize()
            store.activate()
        elif self.spawn_args.has_key('store'):
            store = self.spawn_args['store']
        else:
            store = ControllerStore()

        self.core = ControllerCore(ProvisionerClient(self),
                                   engineclass,
                                   scoped_name,
                                   conf=engine_conf,
                                   store=store)

        # run state recovery and engine initialization

        # this one needs to run before any messages start arriving. It pulls
        # information from persistence and refreshes local caches.
        yield self.core.run_recovery()

        # temporarily doing this later due to a potential bug in ioncore where
        # queues may not be bound before slc_init runs. This means  if the
        # provisioner is quck to reply to dump_state some messages may be
        # missed.
        reactor.callLater(1, self._delayed_init)