def setUp(self):
     #make sure there is no messages left in the worker queue 
     #from previous runs:
     self.cleanup()
     #
     self.worker_processes = WorkerProcesses()
     self.testrun_id = None
Exemplo n.º 2
0
 def setUp(self):
     self.queue = ROUTING_KEY
     try:
         if not DEBUG:
             delete_queue("localhost", self.queue)
     except:
         pass
     #
     if not DEBUG:
         self.worker_processes = WorkerProcesses()
         self.testrun_id = None
         self.testrun_id2 = None
Exemplo n.º 3
0
    def setUp(self):
        self.queue = ROUTING_KEY

        #make sure there is no messages left in the worker queue
        #from previous runs:
        try:
            if not DEBUG:
                delete_queue("localhost", self.queue)
        except:
            pass
        #
        if not DEBUG:
            self.worker_processes = WorkerProcesses()
            self.testrun_id = None
Exemplo n.º 4
0
 def setUp(self):
     self.queue = ROUTING_KEY
     try:
         if not DEBUG:
             delete_queue("localhost", self.queue)
     except:
         pass
     #
     if not DEBUG:
         self.worker_processes = WorkerProcesses()
         self.testrun_id = None
         self.testrun_id2 = None
    def setUp(self):
        self.queue = ROUTING_KEY

        # make sure there is no messages left in the worker queue
        # from previous runs:
        try:
            if not DEBUG:
                delete_queue("localhost", self.queue)
        except:
            pass
        #
        if not DEBUG:
            self.worker_processes = WorkerProcesses()
            self.testrun_id = None
class TestPropertiesDistribution(unittest.TestCase):

    def cleanup(self):
        try:
            for queue in ALL_QUEUES:
                delete_queue("localhost", queue)
                os.system("rm -f /tmp/%s" %queue)
        except:
            pass
        
    def run_task(self, routing_key):
        """
        sends 'echo $PPID >> /tmp/routing_key' command to the routing_key and makes sure it is executed
        by checking that the file exists. Returns the $PPID value read from the file
        """
        self.assertFalse(os.path.isfile("/tmp/%s" % routing_key))
        taskrunner1 = taskrunner_factory(
                             routing_key = routing_key, 
                             execution_timeout = 2,
                             testrun_id = self.testrun_id,
                             config_file = self._distributor_config_filename())
       
        taskrunner1.add_task(["echo", "$PPID",">>","/tmp/%s" % routing_key])
        taskrunner1.run()
        self.assertTrue(os.path.isfile("/tmp/%s" % routing_key))
        f = open('/tmp/%s'% routing_key, 'r')
        read_data = f.read()
        f.close()
        os.system("rm -f /tmp/%s" % routing_key)
        return read_data

    def setUp(self):
        #make sure there is no messages left in the worker queue 
        #from previous runs:
        self.cleanup()
        #
        self.worker_processes = WorkerProcesses()
        self.testrun_id = None
          
    def tearDown(self):
        self.worker_processes.terminate()
        self.cleanup()
        if self.testrun_id:
            delete_queue("localhost", testrun_queue_name(self.testrun_id))

    def test_worker_consumes_from_all_queues(self):
        # Starts 1 worker with group1, device1, hwid1
        # Checks that worker consumes from all queues
        #
        self.worker_processes.start(1, _config_path("group1_device1_hwid1.ini"))

        self.testrun_id = 111      
        
        self.run_task("group1")
        self.run_task("group1.device1")
        self.run_task("group1.device1.hwid1")

    def test_worker_consumes_only_from_right_queues(self):
        # Starts 1 worker with group1, device1, hwid1
        # Checks that worker consumes only from right queues
        #

        # Start another worker to make sure "false" queues exist
        worker1 = self.worker_processes.start(1, _config_path("group1_device1_hwid1.ini"))


        # Start the actual worker we want to test
        worker2 = self.worker_processes.start(1, _config_path("group1.ini"))
        

        self.testrun_id = 111

        # This can go to one of the workers
        task_pid = int(self.run_task("group1"))
        self.assertTrue(task_pid == worker1[0] or task_pid == worker2[0])

        # These should go to worker1
        task_pid = int(self.run_task("group1.device1"))
        self.assertEquals(task_pid, worker1[0])

        task_pid = int(self.run_task("group1.device1.hwid1"))
        self.assertEquals(task_pid, worker1[0])

        # Make sure workers don't follow wrong queues does not get executed
        self.assertRaises(OtsQueueDoesNotExistError, self.run_task, "group2")
        self.assertRaises(OtsQueueDoesNotExistError, self.run_task, "group1.device2")
        self.assertRaises(OtsQueueDoesNotExistError, self.run_task, "group1.device1.hwid2")
        self.assertRaises(OtsQueueDoesNotExistError, self.run_task, "group1.device2.hwid1")



    def test_workers_consume_only_from_right_queues_with_multiple_workers(self):

        worker1 = self.worker_processes.start(1, _config_path("group1_device1_hwid1.ini"))
        worker2 = self.worker_processes.start(1, _config_path("group1_device1_hwid2.ini"))
        worker3 = self.worker_processes.start(1, _config_path("group1_device2_hwid1.ini"))
        worker4 = self.worker_processes.start(1, _config_path("group1.ini"))
        worker5 = self.worker_processes.start(1, _config_path("group2_device1_hwid1.ini"))

        time.sleep(2)


        self.testrun_id = 111

        # This can go to workers 1,2,3,4
        expected_pids = (worker1[0], worker2[0], worker3[0], worker4[0])
        task_pid = int(self.run_task("group1"))
        self.assertTrue(task_pid in expected_pids)

        # This should go to worker1
        task_pid = int(self.run_task("group1.device1.hwid1"))
        self.assertEquals(task_pid, worker1[0])

        # This should go to worker2
        task_pid = int(self.run_task("group1.device1.hwid2"))
        self.assertEquals(task_pid, worker2[0])

        # This should go to worker3
        task_pid = int(self.run_task("group1.device2"))
        self.assertEquals(task_pid, worker3[0])

        # This should go to worker3
        task_pid = int(self.run_task("group1.device2.hwid1"))
        self.assertEquals(task_pid, worker3[0])

        # This should go to worker5
        task_pid = int(self.run_task("group2"))
        self.assertEquals(task_pid, worker5[0])


    ###############################
    # HELPERS
    ###############################

    @staticmethod
    def _distributor_config_filename():
        distributor_dirname = os.path.dirname(
                              os.path.abspath(__file__))
        distributor_config_filename = os.path.join(distributor_dirname,
                                                  "test_config.ini")
        if not os.path.exists(distributor_config_filename):
            raise Exception("%s not found"%(distributor_config_filename))
        return distributor_config_filename
Exemplo n.º 7
0
class TestStateBehaviour(unittest.TestCase):
    def setUp(self):
        self.queue = ROUTING_KEY
        try:
            if not DEBUG:
                delete_queue("localhost", self.queue)
        except:
            pass
        #
        if not DEBUG:
            self.worker_processes = WorkerProcesses()
            self.testrun_id = None
            self.testrun_id2 = None

    def tearDown(self):
        if not DEBUG:
            self.worker_processes.terminate()
        if self.queue:
            delete_queue("localhost", self.queue)
        if self.testrun_id:
            delete_queue("localhost", testrun_queue_name(self.testrun_id))
        if self.testrun_id2:
            delete_queue("localhost", testrun_queue_name(self.testrun_id2))

    def test_failing_task(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        taskrunner = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id,
            config_file=_distributor_config_filename())

        command = ["command_error_mock", "localhost", str(self.testrun_id)]
        taskrunner.add_task(command)

        self.is_exception_raised = False

        def cb_handler(signal, dto, **kwargs):
            if isinstance(dto, Exception):
                self.is_exception_raised = True

        DTO_SIGNAL.connect(cb_handler)

        taskrunner.run()

        self.assertTrue(self.is_exception_raised)
        self.send_quit()
        time.sleep(1)

        self.assertFalse(all(self.worker_processes.exitcodes))

    def test_worker_alive_after_failing_task(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        taskrunner1 = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id,
            config_file=_distributor_config_filename())

        command = ["command_error_mock", "localhost", str(self.testrun_id)]
        taskrunner1.add_task(command)

        self.is_exception_raised = False

        def cb_handler(signal, dto, **kwargs):
            if isinstance(dto, Exception):
                self.is_exception_raised = True

        DTO_SIGNAL.connect(cb_handler)

        taskrunner1.run()

        self.assertTrue(self.is_exception_raised)

        self.is_exception_raised = False

        # Trigger another task to make sure worker is still alive
        taskrunner2 = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id,
            config_file=_distributor_config_filename())

        command = ["echo", "foo"]
        taskrunner2.add_task(command)
        taskrunner2.run()
        self.assertFalse(self.is_exception_raised)

        self.send_quit()
        time.sleep(1)

        self.assertFalse(all(self.worker_processes.exitcodes))

    def test_worker_alive_after_server_timeout(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        self.testrun_id2 = 112
        taskrunner1 = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id,
            config_file=_distributor_config_filename())

        command = ["sleep", "5"]
        taskrunner1.add_task(command)

        self.is_exception_raised = False

        def cb_handler(signal, dto, **kwargs):
            if isinstance(dto, Exception):
                self.is_exception_raised = True

        DTO_SIGNAL.connect(cb_handler)

        # Overwrite server side timeout handler with a one that timeouts
        from ots.server.distributor.timeout import Timeout
        taskrunner1.timeout_handler = Timeout(1, 1, 1)

        self.assertRaises(OtsExecutionTimeoutError, taskrunner1.run)

        #        self.assertTrue(self.is_exception_raised)

        self.is_exception_raised = False

        time.sleep(10)  # Give worker time to reconnect

        # Trigger another task to make sure worker is still alive
        taskrunner2 = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id2,
            config_file=_distributor_config_filename())
        taskrunner2.add_task(["echo", "foo"])
        taskrunner2.run()
        self.assertFalse(self.is_exception_raised)

        self.send_quit()
        time.sleep(1)

        self.assertFalse(all(self.worker_processes.exitcodes))

    def test_worker_alive_after_server_timeout_failing_task(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        self.testrun_id2 = 112
        taskrunner1 = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id,
            config_file=_distributor_config_filename())

        command = [
            "sleep", "5", ";", "command_error_mock", "localhost",
            str(self.testrun_id)
        ]
        taskrunner1.add_task(command)

        self.is_exception_raised = False

        def cb_handler(signal, dto, **kwargs):
            if isinstance(dto, Exception):
                self.is_exception_raised = True

        DTO_SIGNAL.connect(cb_handler)

        # Overwrite server side timeout handler with a one that timeouts
        from ots.server.distributor.timeout import Timeout
        taskrunner1.timeout_handler = Timeout(1, 1, 1)

        self.assertRaises(OtsExecutionTimeoutError, taskrunner1.run)

        #        self.assertTrue(self.is_exception_raised)

        self.is_exception_raised = False

        time.sleep(10)  # Give worker time to reconnect

        # Trigger another task to make sure worker is still alive
        taskrunner2 = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id2,
            config_file=_distributor_config_filename())
        taskrunner2.add_task(["echo", "foo"])
        taskrunner2.run()
        self.assertFalse(self.is_exception_raised)

        self.send_quit()
        time.sleep(1)

        self.assertFalse(all(self.worker_processes.exitcodes))

    def send_quit(self):
        cmd_msg = CommandMessage(["quit"], self.queue, 111)
        message = pack_message(cmd_msg)

        conn = amqp.Connection(host="localhost",
                               userid="guest",
                               password="******",
                               virtual_host="/",
                               insist=False)
        channel = conn.channel()
        channel.basic_publish(message,
                              exchange=ROUTING_KEY,
                              routing_key=ROUTING_KEY)
Exemplo n.º 8
0
class TestDeviceDistribution(unittest.TestCase):
    def setUp(self):
        self.queue = ROUTING_KEY

        #make sure there is no messages left in the worker queue
        #from previous runs:
        try:
            if not DEBUG:
                delete_queue("localhost", self.queue)
        except:
            pass
        #
        if not DEBUG:
            self.worker_processes = WorkerProcesses()
            self.testrun_id = None

    def tearDown(self):
        if not DEBUG:
            self.worker_processes.terminate()

        self._remove_zip_file(TEST_DEFINITION_ZIP)
        self._remove_files_created_by_remote_commands()

        if self.queue:
            delete_queue("localhost", self.queue)
        if self.testrun_id:
            delete_queue("localhost", testrun_queue_name(self.testrun_id))

    ###################
    # Tests
    ###################

    def test_one_task_one_worker(self):
        """
        The simplest configuration...
        Check that the results come back OK from the Worker 
        """

        self.test_definition_file_received = False
        self.results_file_received = False

        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        taskrunner = taskrunner_factory(routing_key=ROUTING_KEY,
                                        execution_timeout=10,
                                        testrun_id=self.testrun_id,
                                        config_file=server_config_filename())

        #Create a zip file with a test definition
        zipfile_name = os.path.join(TESTS_MODULE_DIRNAME, "data",
                                    "test_definition_1.xml")
        steps = ["mkdir foo", "mkdir bar", "mkdir baz"]
        self._create_zip_test_definition_file(zipfile_name, steps)

        #Add a Task
        command = ["ots_mock", '"%s"' % (zipfile_name), "%s" % self.testrun_id]
        taskrunner.add_task(command)
        #
        command_quit = ["quit"]
        taskrunner.add_task(command_quit)

        #Callback to handler results
        def cb_handler(signal, dto, **kwargs):
            self.cb_called = True
            if isinstance(dto, Results):
                filename = dto.data.name
                if filename == "test_definition.xml":
                    self.test_definition_file_received = True
                    self.assertEquals(
                        EXPECTED.replace(' ', '').replace('\n', ''),
                        dto.data.read().replace(' ', '').replace('\n', ''))
                elif filename == "dummy_results_file.xml":
                    self.results_file_received = True
                    expected = self._dummy_results_xml(filename)
                    self.assertEquals(expected, dto.data.read())

        DTO_SIGNAL.connect(cb_handler)

        #Run...
        time_before_run = time.time()
        time.sleep(1)
        taskrunner.run()
        time.sleep(1)
        time_after_run = time.time()

        #Check the results
        if not DEBUG:
            foo = os.path.join(EXECUTION_DIRNAME, "foo")
            foo_time = os.path.getctime(foo)
            bar = os.path.join(EXECUTION_DIRNAME, "bar")
            bar_time = os.path.getctime(bar)
            baz = os.path.join(EXECUTION_DIRNAME, "baz")
            baz_time = os.path.getctime(baz)
            self.assertTrue(time_before_run < foo_time <= bar_time <= baz_time
                            <= time_after_run)
            self.assertTrue(self.results_file_received)
            self.assertTrue(self.test_definition_file_received)
            #
            self.assertFalse(all(self.worker_processes.exitcodes))

    def test_two_tasks_one_worker(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        taskrunner = taskrunner_factory(routing_key=ROUTING_KEY,
                                        execution_timeout=10,
                                        testrun_id=self.testrun_id,
                                        config_file=server_config_filename())
        #
        zipfile_1_name = os.path.join(TESTS_MODULE_DIRNAME, "data",
                                      "test_definition_1.xml")
        steps = ["mkdir foo"]
        self._create_zip_test_definition_file(zipfile_1_name, steps)
        command_1 = [
            "ots_mock",
            '"%s"' % (zipfile_1_name),
            "%s" % self.testrun_id
        ]
        taskrunner.add_task(command_1)
        #
        zipfile_2_name = os.path.join(TESTS_MODULE_DIRNAME, "data",
                                      "test_definition_2.xml")
        steps = ["mkdir bar"]
        self._create_zip_test_definition_file(zipfile_2_name, steps)
        command_2 = [
            "ots_mock",
            '"%s"' % (zipfile_2_name),
            "%s" % self.testrun_id
        ]
        taskrunner.add_task(command_2)
        #
        command_quit = ["quit"]
        taskrunner.add_task(command_quit)

        time_before_run = time.time()
        time.sleep(1)
        taskrunner.run()
        time.sleep(1)
        time_after_run = time.time()

        if not DEBUG:
            foo = os.path.join(EXECUTION_DIRNAME, "foo")
            foo_time = os.path.getctime(foo)
            bar = os.path.join(EXECUTION_DIRNAME, "bar")
            bar_time = os.path.getctime(bar)
            self.assertTrue(time_before_run < foo_time)
            self.assertTrue(foo_time <= bar_time)
            self.assertTrue(bar_time <= time_after_run)
            #
            self.assertFalse(all(self.worker_processes.exitcodes))

    def test_two_tasks_two_workers(self):
        if not DEBUG:
            self.worker_processes.start(2)
        self.testrun_id = 111
        taskrunner = taskrunner_factory(routing_key=ROUTING_KEY,
                                        execution_timeout=10,
                                        testrun_id=self.testrun_id,
                                        config_file=server_config_filename())
        #
        zipfile_1_name = os.path.join(TESTS_MODULE_DIRNAME, "data",
                                      "test_definition_1.xml")
        steps = ["mkdir foo", "sleep 2"]
        self._create_zip_test_definition_file(zipfile_1_name, steps)
        command_1 = [
            "ots_mock",
            '"%s"' % (zipfile_1_name),
            "%s" % self.testrun_id
        ]
        taskrunner.add_task(command_1)
        #
        zipfile_2_name = os.path.join(TESTS_MODULE_DIRNAME, "data",
                                      "test_definition_2.xml")
        steps = ["mkdir bar", "sleep 2"]
        self._create_zip_test_definition_file(zipfile_2_name, steps)
        command_2 = [
            "ots_mock",
            '"%s"' % (zipfile_2_name),
            "%s" % self.testrun_id
        ]
        taskrunner.add_task(command_2)
        #

        time_before_run = time.time()
        time.sleep(1)
        taskrunner.run()
        time.sleep(1)
        time_after_run = time.time()

        if not DEBUG:
            foo = os.path.join(EXECUTION_DIRNAME, "foo")
            foo_time = os.path.getctime(foo)
            bar = os.path.join(EXECUTION_DIRNAME, "bar")
            bar_time = os.path.getctime(bar)

            self.assertTrue(abs(foo_time - bar_time) < 0.1)
            self.assertTrue(time_before_run < foo_time <= time_after_run)
            self.assertTrue(time_before_run < bar_time <= time_after_run)

    #################################
    # HELPERS
    #################################

    def _create_zip_test_definition_file(self, zip_filename, steps):
        zip_filename = os.path.join(TESTS_MODULE_DIRNAME, "data", zip_filename)
        file = zipfile.ZipFile(zip_filename, "w")
        cases = ""
        for step in steps:
            case = CASE_TEMPLATE.replace("$NAME", step)
            case = case.replace("$STEP", step)
            cases += case
        tdf = TDF_TEMPLATE.replace("$CASES", cases)
        file.writestr(TEST_DEFINITION_XML, tdf)
        file.close()
Exemplo n.º 9
0
class TestStateBehaviour(unittest.TestCase):

    def setUp(self):
        self.queue = ROUTING_KEY
        try:
            if not DEBUG:
                delete_queue("localhost", self.queue)
        except:
            pass
        #
        if not DEBUG:
            self.worker_processes = WorkerProcesses()
            self.testrun_id = None
            self.testrun_id2 = None
          
    def tearDown(self):
        if not DEBUG:
            self.worker_processes.terminate()
        if self.queue:
            delete_queue("localhost", self.queue)
        if self.testrun_id:
            delete_queue("localhost", testrun_queue_name(self.testrun_id))
        if self.testrun_id2:
            delete_queue("localhost", testrun_queue_name(self.testrun_id2))

    def test_failing_task(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        taskrunner = taskrunner_factory(
                             routing_key = ROUTING_KEY,
                             execution_timeout = 10,
                             testrun_id = self.testrun_id,
                             config_file = _distributor_config_filename())

        command = ["command_error_mock", "localhost", str(self.testrun_id)]
        taskrunner.add_task(command)
        
        self.is_exception_raised = False

        def cb_handler(signal, dto, **kwargs):
            if isinstance(dto, Exception):
                self.is_exception_raised = True

        DTO_SIGNAL.connect(cb_handler)
        
        taskrunner.run()
        
        self.assertTrue(self.is_exception_raised)
        self.send_quit()
        time.sleep(1)

        self.assertFalse(all(self.worker_processes.exitcodes))

    def test_worker_alive_after_failing_task(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        taskrunner1 = taskrunner_factory(
                             routing_key = ROUTING_KEY,
                             execution_timeout = 10,
                             testrun_id = self.testrun_id,
                             config_file = _distributor_config_filename())

        command = ["command_error_mock", "localhost", str(self.testrun_id)]
        taskrunner1.add_task(command)
        
        self.is_exception_raised = False

        def cb_handler(signal, dto, **kwargs):
            if isinstance(dto, Exception):
                self.is_exception_raised = True

        DTO_SIGNAL.connect(cb_handler)
        
        taskrunner1.run()
        
        self.assertTrue(self.is_exception_raised)
        
        self.is_exception_raised = False

        # Trigger another task to make sure worker is still alive
        taskrunner2 = taskrunner_factory(
                             routing_key = ROUTING_KEY,
                             execution_timeout = 10,
                             testrun_id = self.testrun_id,
                             config_file = _distributor_config_filename())

        command = ["echo", "foo"]
        taskrunner2.add_task(command)
        taskrunner2.run()
        self.assertFalse(self.is_exception_raised)

        self.send_quit()
        time.sleep(1)

        self.assertFalse(all(self.worker_processes.exitcodes))

    def test_worker_alive_after_server_timeout(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        self.testrun_id2 = 112
        taskrunner1 = taskrunner_factory(
                             routing_key = ROUTING_KEY,
                             execution_timeout = 10,
                             testrun_id = self.testrun_id,
                             config_file = _distributor_config_filename())

        command = ["sleep", "5"]
        taskrunner1.add_task(command)

        self.is_exception_raised = False

        def cb_handler(signal, dto, **kwargs):
            if isinstance(dto, Exception):
                self.is_exception_raised = True

        DTO_SIGNAL.connect(cb_handler)

        # Overwrite server side timeout handler with a one that timeouts
        from ots.server.distributor.timeout import Timeout
        taskrunner1.timeout_handler = Timeout(1,
                                       1,
                                       1)

        self.assertRaises(OtsExecutionTimeoutError, taskrunner1.run)
        
#        self.assertTrue(self.is_exception_raised)
        
        self.is_exception_raised = False

        time.sleep(10) # Give worker time to reconnect

    # Trigger another task to make sure worker is still alive
        taskrunner2 = taskrunner_factory(
                             routing_key = ROUTING_KEY,
                             execution_timeout = 10,
                             testrun_id = self.testrun_id2,
                             config_file = _distributor_config_filename())
        taskrunner2.add_task(["echo", "foo"])
        taskrunner2.run()
        self.assertFalse(self.is_exception_raised)

        self.send_quit()
        time.sleep(1)

        self.assertFalse(all(self.worker_processes.exitcodes))

    def test_worker_alive_after_server_timeout_failing_task(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        self.testrun_id2 = 112
        taskrunner1 = taskrunner_factory(
                             routing_key = ROUTING_KEY,
                             execution_timeout = 10,
                             testrun_id = self.testrun_id,
                             config_file = _distributor_config_filename())

        command = ["sleep", "5",";",
                   "command_error_mock", "localhost", str(self.testrun_id)]
        taskrunner1.add_task(command)

        self.is_exception_raised = False

        def cb_handler(signal, dto, **kwargs):
            if isinstance(dto, Exception):
                self.is_exception_raised = True

        DTO_SIGNAL.connect(cb_handler)

        # Overwrite server side timeout handler with a one that timeouts
        from ots.server.distributor.timeout import Timeout
        taskrunner1.timeout_handler = Timeout(1,
                                       1,
                                       1)

        self.assertRaises(OtsExecutionTimeoutError, taskrunner1.run)
        
#        self.assertTrue(self.is_exception_raised)
        
        self.is_exception_raised = False

        time.sleep(10) # Give worker time to reconnect


    # Trigger another task to make sure worker is still alive
        taskrunner2 = taskrunner_factory(
                             routing_key = ROUTING_KEY,
                             execution_timeout = 10,
                             testrun_id = self.testrun_id2,
                             config_file = _distributor_config_filename())
        taskrunner2.add_task(["echo", "foo"])
        taskrunner2.run()
        self.assertFalse(self.is_exception_raised)

        self.send_quit()
        time.sleep(1)

        self.assertFalse(all(self.worker_processes.exitcodes))


    def send_quit(self):
        cmd_msg = CommandMessage(["quit"],
                                 self.queue,
                                 111)
        message = pack_message(cmd_msg)

        conn = amqp.Connection(host = "localhost", 
                        userid = "guest",
                        password = "******",
                        virtual_host = "/", 
                        insist = False)
        channel = conn.channel()
        channel.basic_publish(message, 
                              exchange = ROUTING_KEY,
                              routing_key = ROUTING_KEY)
Exemplo n.º 10
0
class TestDeviceDistribution(unittest.TestCase):
    def setUp(self):
        self.queue = ROUTING_KEY

        # make sure there is no messages left in the worker queue
        # from previous runs:
        try:
            if not DEBUG:
                delete_queue("localhost", self.queue)
        except:
            pass
        #
        if not DEBUG:
            self.worker_processes = WorkerProcesses()
            self.testrun_id = None

    def tearDown(self):
        if not DEBUG:
            self.worker_processes.terminate()

        self._remove_zip_file(TEST_DEFINITION_ZIP)
        self._remove_files_created_by_remote_commands()

        if self.queue:
            delete_queue("localhost", self.queue)
        if self.testrun_id:
            delete_queue("localhost", testrun_queue_name(self.testrun_id))

    ###################
    # Tests
    ###################

    def test_one_task_one_worker(self):
        """
        The simplest configuration...
        Check that the results come back OK from the Worker 
        """

        self.test_definition_file_received = False
        self.results_file_received = False

        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        taskrunner = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id,
            config_file=server_config_filename(),
        )

        # Create a zip file with a test definition
        zipfile_name = os.path.join(TESTS_MODULE_DIRNAME, "data", "test_definition_1.xml")
        steps = ["mkdir foo", "mkdir bar", "mkdir baz"]
        self._create_zip_test_definition_file(zipfile_name, steps)

        # Add a Task
        command = ["ots_mock", '"%s"' % (zipfile_name), "%s" % self.testrun_id]
        taskrunner.add_task(command)
        #
        command_quit = ["quit"]
        taskrunner.add_task(command_quit)

        # Callback to handler results
        def cb_handler(signal, dto, **kwargs):
            self.cb_called = True
            if isinstance(dto, Results):
                filename = dto.data.name
                if filename == "test_definition.xml":
                    self.test_definition_file_received = True
                    self.assertEquals(
                        EXPECTED.replace(" ", "").replace("\n", ""), dto.data.read().replace(" ", "").replace("\n", "")
                    )
                elif filename == "dummy_results_file.xml":
                    self.results_file_received = True
                    expected = self._dummy_results_xml(filename)
                    self.assertEquals(expected, dto.data.read())

        DTO_SIGNAL.connect(cb_handler)

        # Run...
        time_before_run = time.time()
        time.sleep(1)
        taskrunner.run()
        time.sleep(1)
        time_after_run = time.time()

        # Check the results
        if not DEBUG:
            foo = os.path.join(EXECUTION_DIRNAME, "foo")
            foo_time = os.path.getctime(foo)
            bar = os.path.join(EXECUTION_DIRNAME, "bar")
            bar_time = os.path.getctime(bar)
            baz = os.path.join(EXECUTION_DIRNAME, "baz")
            baz_time = os.path.getctime(baz)
            self.assertTrue(time_before_run < foo_time <= bar_time <= baz_time <= time_after_run)
            self.assertTrue(self.results_file_received)
            self.assertTrue(self.test_definition_file_received)
            #
            self.assertFalse(all(self.worker_processes.exitcodes))

    def test_two_tasks_one_worker(self):
        if not DEBUG:
            self.worker_processes.start()
        self.testrun_id = 111
        taskrunner = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id,
            config_file=server_config_filename(),
        )
        #
        zipfile_1_name = os.path.join(TESTS_MODULE_DIRNAME, "data", "test_definition_1.xml")
        steps = ["mkdir foo"]
        self._create_zip_test_definition_file(zipfile_1_name, steps)
        command_1 = ["ots_mock", '"%s"' % (zipfile_1_name), "%s" % self.testrun_id]
        taskrunner.add_task(command_1)
        #
        zipfile_2_name = os.path.join(TESTS_MODULE_DIRNAME, "data", "test_definition_2.xml")
        steps = ["mkdir bar"]
        self._create_zip_test_definition_file(zipfile_2_name, steps)
        command_2 = ["ots_mock", '"%s"' % (zipfile_2_name), "%s" % self.testrun_id]
        taskrunner.add_task(command_2)
        #
        command_quit = ["quit"]
        taskrunner.add_task(command_quit)

        time_before_run = time.time()
        time.sleep(1)
        taskrunner.run()
        time.sleep(1)
        time_after_run = time.time()

        if not DEBUG:
            foo = os.path.join(EXECUTION_DIRNAME, "foo")
            foo_time = os.path.getctime(foo)
            bar = os.path.join(EXECUTION_DIRNAME, "bar")
            bar_time = os.path.getctime(bar)
            self.assertTrue(time_before_run < foo_time)
            self.assertTrue(foo_time <= bar_time)
            self.assertTrue(bar_time <= time_after_run)
            #
            self.assertFalse(all(self.worker_processes.exitcodes))

    def test_two_tasks_two_workers(self):
        if not DEBUG:
            self.worker_processes.start(2)
        self.testrun_id = 111
        taskrunner = taskrunner_factory(
            routing_key=ROUTING_KEY,
            execution_timeout=10,
            testrun_id=self.testrun_id,
            config_file=server_config_filename(),
        )
        #
        zipfile_1_name = os.path.join(TESTS_MODULE_DIRNAME, "data", "test_definition_1.xml")
        steps = ["mkdir foo", "sleep 2"]
        self._create_zip_test_definition_file(zipfile_1_name, steps)
        command_1 = ["ots_mock", '"%s"' % (zipfile_1_name), "%s" % self.testrun_id]
        taskrunner.add_task(command_1)
        #
        zipfile_2_name = os.path.join(TESTS_MODULE_DIRNAME, "data", "test_definition_2.xml")
        steps = ["mkdir bar", "sleep 2"]
        self._create_zip_test_definition_file(zipfile_2_name, steps)
        command_2 = ["ots_mock", '"%s"' % (zipfile_2_name), "%s" % self.testrun_id]
        taskrunner.add_task(command_2)
        #

        time_before_run = time.time()
        time.sleep(1)
        taskrunner.run()
        time.sleep(1)
        time_after_run = time.time()

        if not DEBUG:
            foo = os.path.join(EXECUTION_DIRNAME, "foo")
            foo_time = os.path.getctime(foo)
            bar = os.path.join(EXECUTION_DIRNAME, "bar")
            bar_time = os.path.getctime(bar)

            self.assertTrue(abs(foo_time - bar_time) < 0.1)
            self.assertTrue(time_before_run < foo_time <= time_after_run)
            self.assertTrue(time_before_run < bar_time <= time_after_run)

    #################################
    # HELPERS
    #################################

    def _create_zip_test_definition_file(self, zip_filename, steps):
        zip_filename = os.path.join(TESTS_MODULE_DIRNAME, "data", zip_filename)
        file = zipfile.ZipFile(zip_filename, "w")
        cases = ""
        for step in steps:
            case = CASE_TEMPLATE.replace("$NAME", step)
            case = case.replace("$STEP", step)
            cases += case
        tdf = TDF_TEMPLATE.replace("$CASES", cases)
        file.writestr(TEST_DEFINITION_XML, tdf)
        file.close()

    def _remove_zip_file(self, filename):
        zip_file_name = os.path.join(TESTS_MODULE_DIRNAME, "data", filename)
        if os.path.exists(zip_file_name):
            os.unlink(zip_file_name)

    def _remove_files_created_by_remote_commands(self):
        foo = os.path.join(EXECUTION_DIRNAME, "foo")
        if os.path.exists(foo):
            os.rmdir(foo)
        bar = os.path.join(EXECUTION_DIRNAME, "bar")
        if os.path.exists(bar):
            os.rmdir(bar)
        baz = os.path.join(EXECUTION_DIRNAME, "baz")
        if os.path.exists(baz):
            os.rmdir(baz)

    @staticmethod
    def _worker_config_filename():
        module = os.path.dirname(os.path.abspath(ots.worker.__file__))
        worker_config_filename = os.path.join(module, "config.ini")
        if not os.path.exists(worker_config_filename):
            raise Exception("%s not found" % (worker_config_filename))
        return worker_config_filename

    @staticmethod
    def _dummy_results_xml(filename):
        dirname = os.path.dirname(os.path.abspath(ots.worker.tests.__file__))
        fqname = os.path.join(dirname, "data", filename)
        return open(fqname, "r").read()

    @staticmethod
    def _dummy_test_definition_xml_fqname(filename):
        distributor_dirname = os.path.dirname(os.path.abspath(ots.server.distributor.__file__))
        return os.path.join(distributor_dirname, "tests", "data", filename)