Beispiel #1
0
 def test_job_safe(self):
     self.assertIsNotNone(self.fakejob.timeout)
     pipeline = TestTimeout.FakePipeline(job=self.fakejob)
     action = TestTimeout.SafeAction()
     pipeline.add_action(action)
     pipeline.add_action(TestTimeout.SafeAction())
     finalize = FinalizeAction()
     finalize.parameters['namespace'] = 'common'
     pipeline.add_action(finalize)
     self.fakejob.pipeline = pipeline
     self.fakejob.device = TestTimeout.FakeDevice()
     # run() raises an exception in case of error
     self.fakejob.run()
Beispiel #2
0
 def test_job_safe(self):
     self.assertIsNotNone(self.fakejob.timeout)
     pipeline = TestTimeout.FakePipeline(job=self.fakejob)
     action = TestTimeout.SafeAction()
     pipeline.add_action(action)
     pipeline.add_action(TestTimeout.SafeAction())
     finalize = FinalizeAction()
     finalize.parameters['namespace'] = 'common'
     pipeline.add_action(finalize)
     self.fakejob.pipeline = pipeline
     self.fakejob.device = TestTimeout.FakeDevice()
     # run() raises an exception in case of error
     self.fakejob.run()
Beispiel #3
0
 def test_job_timeout(self):
     self.assertIsNotNone(self.fakejob.timeout)
     pipeline = TestTimeout.FakePipeline(job=self.fakejob)
     action = TestTimeout.LongAction()
     pipeline.add_action(action)
     pipeline.add_action(TestTimeout.SafeAction())
     finalize = FinalizeAction()
     finalize.parameters['namespace'] = 'common'
     pipeline.add_action(finalize)
     self.fakejob.pipeline = pipeline
     self.fakejob.device = TestTimeout.FakeDevice()
     with self.assertRaises(JobError):
         self.fakejob.run()
Beispiel #4
0
 def test_job_timeout(self):
     self.assertIsNotNone(self.fakejob.timeout)
     pipeline = TestTimeout.FakePipeline(job=self.fakejob)
     action = TestTimeout.LongAction()
     pipeline.add_action(action)
     pipeline.add_action(TestTimeout.SafeAction())
     finalize = FinalizeAction()
     finalize.parameters['namespace'] = 'common'
     pipeline.add_action(finalize)
     self.fakejob.pipeline = pipeline
     self.fakejob.device = TestTimeout.FakeDevice()
     with self.assertRaises(JobError):
         self.fakejob.run()
Beispiel #5
0
 def test_long_job_safe(self):
     self.fakejob.timeout.duration = 8
     self.assertIsNotNone(self.fakejob.timeout)
     pipeline = TestTimeout.FakePipeline(job=self.fakejob)
     self.fakejob.pipeline = pipeline
     action = TestTimeout.SafeAction()
     action.timeout.duration = 2
     pipeline.add_action(action)
     pipeline.add_action(action)
     pipeline.add_action(TestTimeout.FakeSafeAction())
     pipeline.add_action(TestTimeout.FakeSafeAction())
     finalize = FinalizeAction()
     finalize.parameters['namespace'] = 'common'
     pipeline.add_action(finalize)
     self.fakejob.pipeline = pipeline
     self.fakejob.device = TestTimeout.FakeDevice()
     self.fakejob.run()
Beispiel #6
0
 def test_long_job_safe(self):
     self.fakejob.timeout.duration = 8
     self.assertIsNotNone(self.fakejob.timeout)
     pipeline = TestTimeout.FakePipeline(job=self.fakejob)
     self.fakejob.pipeline = pipeline
     action = TestTimeout.SafeAction()
     action.timeout.duration = 2
     pipeline.add_action(action)
     pipeline.add_action(action)
     pipeline.add_action(TestTimeout.FakeSafeAction())
     pipeline.add_action(TestTimeout.FakeSafeAction())
     finalize = FinalizeAction()
     finalize.parameters['namespace'] = 'common'
     pipeline.add_action(finalize)
     self.fakejob.pipeline = pipeline
     self.fakejob.device = TestTimeout.FakeDevice()
     self.fakejob.run()
Beispiel #7
0
    def parse(self, content, device, job_id, logger, dispatcher_config,
              env_dut=None):
        self.loader = yaml.SafeLoader(content)
        self.loader.compose_node = self.compose_node
        self.loader.construct_mapping = self.construct_mapping
        data = self.loader.get_single_data()
        job = CCMJob(job_id, data, logger)
        test_counts = {}
        job.device = device
        job.parameters['env_dut'] = env_dut
        # Load the dispatcher config
        job.parameters['dispatcher'] = {}
        if dispatcher_config is not None:
            config = yaml.safe_load(dispatcher_config)
            if isinstance(config, dict):
                job.parameters['dispatcher'] = config

        def deploy_local_source( src_path ):
            """ 
            Deploy rootfs/kernel/dtb/packages, local can be access easily.
            src_path = '/var/www/lava/source/qemu/kvm-debian-wheezy.img.gz' 
            """
            debug=True
            def print_debug(msg=''):
                if debug:
                    print(msg, file=open('/tmp/deploy_local_source.txt', 'a'))
                else:
                    print(msg)
            import os
            import shutil
            import random
            dst_path = ''.join(src_path.split('source/'))
            is_repo = False
            latest_path = None
            if os.path.isdir(src_path):
                for root, dirs, files in os.walk(src_path):
                    if 'repodata' in dirs:
                        is_repo = True
                        break

            if 'latest' in src_path:
                path_list = src_path.split('latest')
                src_link_path = path_list[0]+'latest'
                src_real_path = os.path.realpath(src_link_path)
                dst_link_path = ''.join(src_link_path.split('source'))
                dst_real_path = ''.join(src_real_path.split('source'))
                index = src_path.split('/').index('latest')
                link_dir = '/'+src_path.split('/')[index+1]
            else:
                src_real_path = os.path.dirname(src_path)
                dst_real_path = os.path.dirname(dst_path)
                link_dir = '/'+os.path.basename(src_path)

            os.makedirs(dst_real_path, exist_ok=True)

            if 'latest' in src_path:
                link_cmd = "ln -rs %s %s" % (dst_real_path, dst_link_path)
                print_debug(link_cmd)
                if os.path.exists(dst_link_path):
                    print_debug('dst_real_path:'+dst_real_path + '\ndst_link_path:'+dst_link_path)
                    if os.path.realpath(dst_real_path) != os.path.realpath(dst_link_path):
                        os.remove(dst_link_path)
                        os.system(link_cmd)
                else:
                    os.system(link_cmd)
            if is_repo and not os.path.exists(dst_real_path+link_dir):
                link_cmd = "ln -rs %s %s" % (src_real_path+link_dir, dst_real_path+link_dir)
                os.system(link_cmd)
            else:
                dir = os.path.dirname(dst_path)
                os.makedirs(dir, exist_ok=True)
                if not os.path.exists(dst_path):
                    end = str(random.random()).replace('.','')
                    shutil.copy(src_path, dst_path+end)
                    if not os.path.exists(dst_path):
                        shutil.move(dst_path+end, dst_path)
                    else:
                        os.remove(dst_path+end)
                print_debug('src_path :'+src_path + '\ndst_path:'+dst_path)

        # replace source http server as dispatcher server.
        import re
        def redefine_def_url( param ):
            if type(param) is dict:
                for i in param.keys():
                    param[i] = redefine_def_url(param[i])
            elif type(param) is list:
                for j in param:
                    j = redefine_def_url(j)
            elif type(param) is str:
                tmp = dispatcher_ip.split(':')
                if len(tmp) == 2:
                    tmp_ip = tmp[0]+':18080'
                else:
                    tmp_ip = tmp[0]+':8080'
                if 'http://pek-lava.wrs.com' in param:
                    src_path = re.sub('http:.*8080', '/var/www/lava/source', param)
                    deploy_local_source(src_path)
                    param = re.sub('http:.*8080', 'http://'+tmp_ip, param)
            return param

        dispatcher_ip =job.parameters['dispatcher']['dispatcher_ip']
        redefine_def_url(job.parameters)

        level_tuple = Protocol.select_all(job.parameters)
        # sort the list of protocol objects by the protocol class level.
        job.protocols = [item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])]
        pipeline = Pipeline(job=job)
        self._timeouts(data, job)

        # deploy and boot classes can populate the pipeline differently depending
        # on the test action type they are linked with (via namespacing).
        # This code builds an information dict for each namespace which is then
        # passed as a parameter to each Action class to use.
        test_info = {}
        test_actions = ([action for action in data['actions'] if 'test' in action])
        for test_action in test_actions:
            test_parameters = test_action['test']
            test_type = LavaTest.select(device, test_parameters)
            namespace = test_parameters.get('namespace', 'common')
            connection_namespace = test_parameters.get('connection-namespace', namespace)
            if namespace in test_info:
                test_info[namespace].append({'class': test_type, 'parameters': test_parameters})
            else:
                test_info.update({namespace: [{'class': test_type, 'parameters': test_parameters}]})
            if namespace != connection_namespace:
                test_info.update({connection_namespace: [{'class': test_type, 'parameters': test_parameters}]})

        # FIXME: also read permissable overrides from device config and set from job data
        # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default
        for action_data in data['actions']:
            action_data.pop('yaml_line', None)
            for name in action_data:
                # Set a default namespace if needed
                namespace = action_data[name].setdefault('namespace', 'common')
                test_counts.setdefault(namespace, 1)

                if name == 'deploy' or name == 'boot' or name == 'test':
                    parse_action(action_data, name, device, pipeline,
                                 test_info, test_counts[namespace])
                    if name == 'test':
                        test_counts[namespace] += 1
                elif name == 'repeat':
                    count = action_data[name]['count']  # first list entry must be the count dict
                    repeats = action_data[name]['actions']
                    for c_iter in range(count):
                        for repeating in repeats:  # block of YAML to repeat
                            for repeat_action in repeating:  # name of the action for this block
                                if repeat_action == 'yaml_line':
                                    continue
                                repeating[repeat_action]['repeat-count'] = c_iter
                                namespace = repeating[repeat_action].setdefault('namespace', 'common')
                                test_counts.setdefault(namespace, 1)
                                parse_action(repeating, repeat_action, device,
                                             pipeline, test_info, test_counts[namespace])
                                if repeat_action == 'test':
                                    test_counts[namespace] += 1

                elif name == 'command':
                    action = CommandAction()
                    action.parameters = action_data[name]
                    pipeline.add_action(action)

                else:
                    raise JobError("Unknown action name '%s'" % name)

        # there's always going to need to be a finalize_process action
        finalize = FinalizeAction()
        pipeline.add_action(finalize)
        finalize.populate(None)
        job.pipeline = pipeline
        if 'compatibility' in data:
            try:
                job_c = int(job.compatibility)
                data_c = int(data['compatibility'])
            except ValueError as exc:
                raise JobError('invalid compatibility value: %s' % exc)
            if job_c < data_c:
                raise JobError('Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c))
        return job
Beispiel #8
0
    def parse(self, content, device, job_id, logger, dispatcher_config, env_dut=None):
        data = yaml.safe_load(content)
        job = Job(job_id, data, logger)
        test_counts = {}
        job.device = device
        job.parameters["env_dut"] = env_dut
        # Load the dispatcher config
        job.parameters["dispatcher"] = {}
        if dispatcher_config is not None:
            config = yaml.safe_load(dispatcher_config)
            if isinstance(config, dict):
                job.parameters["dispatcher"] = config

        level_tuple = Protocol.select_all(job.parameters)
        # sort the list of protocol objects by the protocol class level.
        job.protocols = [
            item[0](job.parameters, job_id)
            for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])
        ]
        pipeline = Pipeline(job=job)
        self._timeouts(data, job)

        # deploy and boot classes can populate the pipeline differently depending
        # on the test action type they are linked with (via namespacing).
        # This code builds an information dict for each namespace which is then
        # passed as a parameter to each Action class to use.
        test_actions = [action for action in data["actions"] if "test" in action]
        for test_action in test_actions:
            test_parameters = test_action["test"]
            test_type = LavaTest.select(device, test_parameters)
            namespace = test_parameters.get("namespace", "common")
            connection_namespace = test_parameters.get(
                "connection-namespace", namespace
            )
            if namespace in job.test_info:
                job.test_info[namespace].append(
                    {"class": test_type, "parameters": test_parameters}
                )
            else:
                job.test_info.update(
                    {namespace: [{"class": test_type, "parameters": test_parameters}]}
                )
            if namespace != connection_namespace:
                job.test_info.update(
                    {
                        connection_namespace: [
                            {"class": test_type, "parameters": test_parameters}
                        ]
                    }
                )

        # FIXME: also read permissable overrides from device config and set from job data
        # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default
        for action_data in data["actions"]:
            for name in action_data:
                # Set a default namespace if needed
                namespace = action_data[name].setdefault("namespace", "common")
                test_counts.setdefault(namespace, 1)

                if name == "deploy" or name == "boot" or name == "test":
                    action = parse_action(
                        action_data,
                        name,
                        device,
                        pipeline,
                        job.test_info,
                        test_counts[namespace],
                    )
                    if name == "test" and action.needs_overlay():
                        test_counts[namespace] += 1
                elif name == "repeat":
                    count = action_data[name][
                        "count"
                    ]  # first list entry must be the count dict
                    repeats = action_data[name]["actions"]
                    for c_iter in range(count):
                        for repeating in repeats:  # block of YAML to repeat
                            for (
                                repeat_action
                            ) in repeating:  # name of the action for this block
                                repeating[repeat_action]["repeat-count"] = c_iter
                                namespace = repeating[repeat_action].setdefault(
                                    "namespace", "common"
                                )
                                test_counts.setdefault(namespace, 1)
                                action = parse_action(
                                    repeating,
                                    repeat_action,
                                    device,
                                    pipeline,
                                    job.test_info,
                                    test_counts[namespace],
                                )
                                if repeat_action == "test" and action.needs_overlay():
                                    test_counts[namespace] += 1

                elif name == "command":
                    action = CommandAction()
                    action.parameters = action_data[name]
                    pipeline.add_action(action)

                else:
                    raise JobError("Unknown action name '%s'" % name)

        # there's always going to need to be a finalize_process action
        finalize = FinalizeAction()
        pipeline.add_action(finalize)
        finalize.populate(None)
        job.pipeline = pipeline
        if "compatibility" in data:
            try:
                job_c = int(job.compatibility)
                data_c = int(data["compatibility"])
            except ValueError as exc:
                raise JobError("invalid compatibility value: %s" % exc)
            if job_c < data_c:
                raise JobError(
                    "Dispatcher unable to meet job compatibility requirement. %d > %d"
                    % (job_c, data_c)
                )
        return job
Beispiel #9
0
    def parse(self, content, device, job_id, logger, dispatcher_config,
              env_dut=None):
        self.loader = yaml.SafeLoader(content)
        self.loader.compose_node = self.compose_node
        self.loader.construct_mapping = self.construct_mapping
        data = self.loader.get_single_data()
        job = Job(job_id, data, logger)
        test_counts = {}
        job.device = device
        job.parameters['env_dut'] = env_dut
        # Load the dispatcher config
        job.parameters['dispatcher'] = {}
        if dispatcher_config is not None:
            config = yaml.safe_load(dispatcher_config)
            if isinstance(config, dict):
                job.parameters['dispatcher'] = config

        level_tuple = Protocol.select_all(job.parameters)
        # sort the list of protocol objects by the protocol class level.
        job.protocols = [item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])]
        pipeline = Pipeline(job=job)
        self._timeouts(data, job)

        # deploy and boot classes can populate the pipeline differently depending
        # on the test action type they are linked with (via namespacing).
        # This code builds an information dict for each namespace which is then
        # passed as a parameter to each Action class to use.
        test_info = {}
        test_actions = ([action for action in data['actions'] if 'test' in action])
        for test_action in test_actions:
            test_parameters = test_action['test']
            test_type = LavaTest.select(device, test_parameters)
            namespace = test_parameters.get('namespace', 'common')
            connection_namespace = test_parameters.get('connection-namespace', namespace)
            if namespace in test_info:
                test_info[namespace].append({'class': test_type, 'parameters': test_parameters})
            else:
                test_info.update({namespace: [{'class': test_type, 'parameters': test_parameters}]})
            if namespace != connection_namespace:
                test_info.update({connection_namespace: [{'class': test_type, 'parameters': test_parameters}]})

        # FIXME: also read permissable overrides from device config and set from job data
        # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default
        for action_data in data['actions']:
            action_data.pop('yaml_line', None)
            for name in action_data:
                # Set a default namespace if needed
                namespace = action_data[name].setdefault('namespace', 'common')
                test_counts.setdefault(namespace, 1)

                if name == 'deploy' or name == 'boot' or name == 'test':
                    parse_action(action_data, name, device, pipeline,
                                 test_info, test_counts[namespace])
                    if name == 'test':
                        test_counts[namespace] += 1
                elif name == 'repeat':
                    count = action_data[name]['count']  # first list entry must be the count dict
                    repeats = action_data[name]['actions']
                    for c_iter in range(count):
                        for repeating in repeats:  # block of YAML to repeat
                            for repeat_action in repeating:  # name of the action for this block
                                if repeat_action == 'yaml_line':
                                    continue
                                repeating[repeat_action]['repeat-count'] = c_iter
                                namespace = repeating[repeat_action].setdefault('namespace', 'common')
                                test_counts.setdefault(namespace, 1)
                                parse_action(repeating, repeat_action, device,
                                             pipeline, test_info, test_counts[namespace])
                                if repeat_action == 'test':
                                    test_counts[namespace] += 1

                elif name == 'command':
                    action = CommandAction()
                    action.parameters = action_data[name]
                    pipeline.add_action(action)

                else:
                    raise JobError("Unknown action name '%s'" % name)

        # there's always going to need to be a finalize_process action
        finalize = FinalizeAction()
        pipeline.add_action(finalize)
        finalize.populate(None)
        job.pipeline = pipeline
        if 'compatibility' in data:
            try:
                job_c = int(job.compatibility)
                data_c = int(data['compatibility'])
            except ValueError as exc:
                raise JobError('invalid compatibility value: %s' % exc)
            if job_c < data_c:
                raise JobError('Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c))
        return job