def tearDown(self):

        super(TestAvailableJobs, self).tearDown()

        ObjectCache.clear()

        connection.use_debug_cursor = False
Beispiel #2
0
    def spider_api(self):
        ObjectCache.clear()

        from chroma_api.urls import api

        xs = filter(lambda x: x[0] != "action", api._registry.items())
        xs = filter(lambda x: "get" in x[1]._meta.list_allowed_methods, xs)

        for _, resource in xs:
            list_uri = resource.get_resource_uri()
            response = self.api_client.get(list_uri, data={"limit": 0})
            self.assertEqual(
                response.status_code, 200, "%s: %s %s" %
                (list_uri, response.status_code, self.deserialize(response)))
            if "get" in resource._meta.detail_allowed_methods:
                objects = self.deserialize(response)["objects"]

                for object in objects:
                    # Deal with the newer bulk data format, if it is in that format.
                    if ("resource_uri" not in object) and (
                            "traceback" in object) and ("error" in object):
                        del object["traceback"]
                        del object["error"]
                        self.assertEqual(len(object), 1)
                        object = object.values()[0]

                    response = self.api_client.get(object["resource_uri"])
                    self.assertEqual(
                        response.status_code,
                        200,
                        "resource_url: %s, %s %s %s" %
                        (object["resource_uri"], response.status_code,
                         self.deserialize(response), object),
                    )
Beispiel #3
0
    def spider_api(self):
        ObjectCache.clear()

        from chroma_api.urls import api
        for name, resource in api._registry.items():
            if 'get' in resource._meta.list_allowed_methods:
                list_uri = resource.get_resource_uri()
                response = self.api_client.get(list_uri, data={'limit': 0})
                self.assertEqual(
                    response.status_code, 200,
                    "%s: %s %s" % (list_uri, response.status_code,
                                   self.deserialize(response)))
                if 'get' in resource._meta.detail_allowed_methods:
                    objects = self.deserialize(response)['objects']

                    for object in objects:
                        # Deal with the newer bulk data format, if it is in that format.
                        if ('resource_uri' not in object) and (
                                'traceback' in object) and ('error' in object):
                            del object['traceback']
                            del object['error']
                            self.assertEqual(len(object), 1)
                            object = object.values()[0]

                        response = self.api_client.get(object['resource_uri'])
                        self.assertEqual(
                            response.status_code, 200,
                            "resource_url: %s, %s %s %s" %
                            (object['resource_uri'], response.status_code,
                             self.deserialize(response), object))
Beispiel #4
0
    def tearDown(self):
        from chroma_api.authentication import CsrfAuthentication
        CsrfAuthentication.is_authenticated = self.old_is_authenticated

        #  Restore
        from chroma_core.services.job_scheduler import job_scheduler_client
        job_scheduler_client.JobSchedulerClient.available_transitions = self.old_available_transitions
        job_scheduler_client.JobSchedulerClient.available_jobs = self.old_available_jobs

        ObjectCache.clear()
Beispiel #5
0
    def _measure_scaling(self,
                         create_n,
                         measured_resource,
                         scaled_resource=None):
        """

        :param create_n: Function to create N of scaled_resource
        :param measured_resource: The resource we will measure the query load for
        :param scaled_resource: The object which is actually being scaled with N
        :return: Instance of Order1, OrderN, OrderBad
        """
        if scaled_resource is None:
            scaled_resource = measured_resource

        query_counts = {}
        samples = [5, 6, 7, 8]

        for n in samples:
            ObjectCache.clear()
            create_n(n)
            # Queries get reset at the start of a request
            self.assertEqual(scaled_resource._meta.queryset.count(), n)
            with CaptureQueriesContext(connection) as queries:
                response = self.api_client.get(
                    "/api/%s/" % measured_resource._meta.resource_name,
                    data={"limit": 0})
                self.assertEqual(
                    response.status_code, 200, "%s:%s" %
                    (response.content, measured_resource._meta.resource_name))
                query_count = len(queries)

            self.assertEqual(len(self.deserialize(response)["objects"]),
                             measured_resource._meta.queryset.count())
            query_counts[n] = query_count

        # Ignore samples[0], it was just to clear out any setup overhead from first call to API

        # gradient between samples[1] and samples[2]
        grad1 = (query_counts[samples[2]] -
                 query_counts[samples[1]]) / (samples[2] - samples[1])
        # gradient between samples[2] and samples[3]
        grad2 = (query_counts[samples[3]] -
                 query_counts[samples[2]]) / (samples[3] - samples[2])

        if grad1 == 0 and grad2 == 0:
            # Hoorah, O(1)
            return Order1(query_counts[samples[3]])
        elif grad1 > 0 and grad1 == grad2:
            # O(N)
            return OrderN(grad1)
        else:
            # Worse than O(N)
            return OrderBad()
Beispiel #6
0
    def tearDown(self):

        super(TestAvailableTransitions, self).tearDown()

        ObjectCache.clear()
Beispiel #7
0
def _passthrough_create_filesystem(target_data):
    ObjectCache.clear()
    return JobScheduler().create_filesystem(target_data)
Beispiel #8
0
    def tearDown(self):

        super(TestAvailableJobs, self).tearDown()

        ObjectCache.clear()
    def setUp(self):
        super(JobTestCase, self).setUp()

        from chroma_core.services.http_agent import HttpAgentRpc
        from chroma_core.services.http_agent import Service as HttpAgentService

        # FIXME: have to do self before every test because otherwise
        # one test will get all the setup of StoragePluginClass records,
        # the in-memory instance of storage_plugin_manager will expect
        # them to still be there but they'll have been cleaned
        # out of the database.  Setting up self stuff should be done
        # as part of the initial DB setup before any test is started
        # so that it's part of the baseline that's rolled back to
        # after each test.
        import chroma_core.lib.storage_plugin.manager

        chroma_core.lib.storage_plugin.manager.storage_plugin_manager = (
            chroma_core.lib.storage_plugin.manager.StoragePluginManager())

        # Intercept attempts to call out to lustre servers
        import chroma_core.services.job_scheduler.agent_rpc

        self.old_agent_rpc = chroma_core.services.job_scheduler.agent_rpc.AgentRpc
        self.old_agent_ssh = chroma_core.services.job_scheduler.agent_rpc.AgentSsh
        MockAgentRpc.mock_servers = self.mock_servers
        MockAgentSsh.mock_servers = self.mock_servers

        chroma_core.services.job_scheduler.agent_rpc.AgentRpc = MockAgentRpc
        chroma_core.services.job_scheduler.agent_rpc.AgentSsh = MockAgentSsh

        # Any RPCs that are going to get called need explicitly overriding to
        # turn into local calls -- self is a catch-all to prevent any RPC classes
        # from trying to do network comms during unit tests
        ServiceRpcInterface._call = mock.Mock(side_effect=NotImplementedError)
        ServiceQueue.put = mock.Mock()
        ServiceQueue.purge = mock.Mock()

        # Create an instance for the purposes of the test
        from chroma_core.services.plugin_runner.resource_manager import ResourceManager

        resource_manager = ResourceManager()
        from chroma_core.services.plugin_runner import AgentPluginHandlerCollection

        def patch_daemon_rpc(rpc_class, test_daemon):
            # Patch AgentDaemonRpc to call our instance instead of trying to do an RPC
            def rpc_local(fn_name, *args, **kwargs):
                # Run the response through a serialize/deserialize cycle to
                # give it that special RPC flavor.
                retval = json.loads(
                    json.dumps(getattr(test_daemon, fn_name)(*args, **kwargs)))
                log.info("patch_daemon_rpc: %s(%s %s) -> %s" %
                         (fn_name, args, kwargs, retval))
                return retval

            rpc_class._call = mock.Mock(side_effect=rpc_local)

        aphc = AgentPluginHandlerCollection(resource_manager)

        patch_daemon_rpc(AgentDaemonRpcInterface, aphc)

        aphc.update_host_resources = mock.Mock(
            side_effect=parse_synthentic_device_info)

        patch_daemon_rpc(HttpAgentRpc, HttpAgentService())

        from chroma_core.services.job_scheduler.dep_cache import DepCache
        from chroma_core.services.job_scheduler.job_scheduler import JobScheduler, RunJobThread
        from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerRpc
        from chroma_core.services.job_scheduler.job_scheduler_notify import NotificationQueue

        ObjectCache.clear()
        self.job_scheduler = JobScheduler()
        patch_daemon_rpc(JobSchedulerRpc, self.job_scheduler)

        # self.job_scheduler.progress.put = mock.Mock(side_effect = lambda msg: self.job_scheduler.progress._handle(msg))
        # self.job_scheduler.progress.advance = mock.Mock(side_effect = lambda msg: self.job_scheduler.progress._handle(msg))

        from chroma_core.services.job_scheduler import QueueHandler

        job_scheduler_queue_handler = QueueHandler(self.job_scheduler)

        def job_scheduler_queue_immediate(body):
            log.info("job_scheduler_queue_immediate: %s" % body)
            job_scheduler_queue_handler.on_message(body)

        NotificationQueue.put = mock.Mock(
            side_effect=job_scheduler_queue_immediate)

        import chroma_core.services.job_scheduler.job_scheduler

        chroma_core.services.job_scheduler.job_scheduler._disable_database = mock.Mock(
        )

        def _spawn_job(job):
            log.debug("functional spawn job")
            thread = RunJobThread(self.job_scheduler.progress,
                                  self.job_scheduler._db_quota, job,
                                  job.get_steps())
            self.job_scheduler._run_threads[job.id] = thread
            thread._run()

        self.job_scheduler._spawn_job = mock.Mock(side_effect=_spawn_job)

        def run_next():
            while True:
                runnable_jobs = self.job_scheduler._job_collection.ready_jobs

                log.info(
                    "run_next: %d runnable jobs of (%d pending, %d tasked)" % (
                        len(runnable_jobs),
                        len(self.job_scheduler._job_collection.pending_jobs),
                        len(self.job_scheduler._job_collection.tasked_jobs),
                    ))

                if not runnable_jobs:
                    break

                dep_cache = DepCache()
                ok_jobs, cancel_jobs = self.job_scheduler._check_jobs(
                    runnable_jobs, dep_cache)
                self.job_scheduler._job_collection.update_many(
                    ok_jobs, "tasked")
                for job in cancel_jobs:
                    self.job_scheduler._complete_job(job, False, True)
                for job in ok_jobs:
                    self.job_scheduler._spawn_job(job)

                self.drain_progress(skip_advance=True)

        JobScheduler._run_next = mock.Mock(side_effect=run_next)

        #
        # def complete_job(job, errored = False, cancelled = False):
        #     ObjectCache.clear()
        #     self.job_scheduler._complete_job(job, errored, cancelled)

        # JobScheduler.complete_job = mock.Mock(side_effect=complete_job)

        # Patch host removal because we use a _test_lun function that generates Volumes
        # with no corresponding StorageResourceRecords, so the real implementation wouldn't
        # remove them
        def fake_remove_host_resources(host_id):
            from chroma_core.models.host import Volume, VolumeNode

            for vn in VolumeNode.objects.filter(host__id=host_id):
                vn.mark_deleted()
            for volume in Volume.objects.all():
                if volume.volumenode_set.count() == 0:
                    volume.mark_deleted()

        AgentDaemonRpcInterface.remove_host_resources = mock.Mock(
            side_effect=fake_remove_host_resources)
    def tearDown(self):
        super(TestClientManagementJobs, self).tearDown()

        # clear out the singleton to avoid polluting other tests
        ObjectCache.clear()
Beispiel #11
0
    def tearDown(self):
        JobSchedulerClient.command_run_jobs = self.old_command_run_jobs

        ObjectCache.clear()