示例#1
0
    def test_run__skip_from_init(self):
        """ A flow can receive during init a list of tasks to skip """

        # instantiate a flow with two tasks
        flow_config = FlowConfig({
            "description": "Run two tasks",
            "steps": {
                1: {
                    "task": "pass_name"
                },
                2: {
                    "task": "name_response",
                    "options": {
                        "response": "^^pass_name.name"
                    },
                },
            },
        })
        flow = FlowCoordinator(self.project_config,
                               flow_config,
                               skip=["name_response"])
        flow.run(self.org_config)

        # the number of results should be 1 instead of 2
        self.assertEqual(1, len(flow.results))
示例#2
0
    def test_run__no_steps(self):
        """ A flow with no tasks will have no results. """
        flow_config = FlowConfig({"description": "Run no tasks", "steps": {}})
        flow = FlowCoordinator(self.project_config, flow_config)
        flow.run(self.org_config)

        self.assertEqual([], flow.steps)
        self.assertEqual([], flow.results)
    def test_run__no_steps(self):
        """ A flow with no tasks will have no results. """
        flow_config = FlowConfig({"description": "Run no tasks", "steps": {}})
        flow = FlowCoordinator(self.project_config, flow_config)
        flow.run(self.org_config)

        self.assertEqual([], flow.steps)
        self.assertEqual([], flow.results)
    def test_run__task_raises_exception_fail(self):
        """ A flow aborts when a task raises an exception """

        flow_config = FlowConfig(
            {"description": "Run a task", "steps": {1: {"task": "raise_exception"}}}
        )
        flow = FlowCoordinator(self.project_config, flow_config)
        with self.assertRaises(Exception):
            flow.run(self.org_config)
示例#5
0
 def test_run__skip_conditional_step(self):
     flow_config = FlowConfig(
         {"steps": {
             1: {
                 "task": "pass_name",
                 "when": "False"
             }
         }})
     flow = FlowCoordinator(self.project_config, flow_config)
     flow.run(self.org_config)
     assert len(flow.results) == 0
 def test_run__nested_flow(self):
     """ Flows can run inside other flows """
     flow_config = FlowConfig(
         {
             "description": "Run a task and a flow",
             "steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow"}},
         }
     )
     flow = FlowCoordinator(self.project_config, flow_config)
     flow.run(self.org_config)
     self.assertEqual(2, len(flow.steps))
     self.assertEqual(flow.results[0].return_values, flow.results[1].return_values)
 def test_run__skip_flow_None(self):
     flow_config = FlowConfig(
         {
             "description": "A flow that skips its only step",
             "steps": {1: {"task": "None"}},
         }
     )
     callbacks = mock.Mock()
     flow = FlowCoordinator(
         self.project_config, flow_config, name="skip", callbacks=callbacks
     )
     flow.run(self.org_config)
     callbacks.pre_task.assert_not_called()
    def test_run__one_task(self):
        """ A flow with one task will execute the task """
        flow_config = FlowConfig(
            {"description": "Run one task", "steps": {1: {"task": "pass_name"}}}
        )
        flow = FlowCoordinator(self.project_config, flow_config)
        self.assertEqual(1, len(flow.steps))

        flow.run(self.org_config)

        self.assertTrue(
            any(flow_config.description in s for s in self.flow_log["info"])
        )
        self.assertEqual({"name": "supername"}, flow.results[0].return_values)
    def test_run__prints_org_id(self):
        """ A flow with an org prints the org ID """

        flow_config = FlowConfig(
            {
                "description": "Run two tasks",
                "steps": {1: {"task": "pass_name"}, 2: {"task": "sfdc_task"}},
            }
        )
        flow = FlowCoordinator(self.project_config, flow_config)
        flow.run(self.org_config)

        org_id_logs = [s for s in self.flow_log["info"] if ORG_ID in s]

        self.assertEqual(1, len(org_id_logs))
    def test_run__task_raises_exception_ignore(self):
        """ A flow continues when a task configured with ignore_failure raises an exception """

        flow_config = FlowConfig(
            {
                "description": "Run a task",
                "steps": {
                    1: {"task": "raise_exception", "ignore_failure": True},
                    2: {"task": "pass_name"},
                },
            }
        )
        flow = FlowCoordinator(self.project_config, flow_config)
        flow.run(self.org_config)
        self.assertEqual(2, len(flow.results))
        self.assertIsNotNone(flow.results[0].exception)
    def test_run__nested_option_backrefs(self):
        flow_config = FlowConfig(
            {
                "description": "Run two tasks",
                "steps": {
                    1: {"flow": "nested_flow"},
                    2: {
                        "task": "name_response",
                        "options": {"response": "^^nested_flow.pass_name.name"},
                    },
                },
            }
        )

        flow = FlowCoordinator(self.project_config, flow_config)
        flow.run(self.org_config)

        self.assertEqual("supername", flow.results[-1].result)
示例#12
0
    def test_run__option_backref_not_found(self):
        # instantiate a flow with two tasks
        flow_config = FlowConfig({
            "description": "Run two tasks",
            "steps": {
                1: {
                    "task": "pass_name"
                },
                2: {
                    "task": "name_response",
                    "options": {
                        "response": "^^bogus.name"
                    },
                },
            },
        })

        flow = FlowCoordinator(self.project_config, flow_config)
        with self.assertRaises(NameError):
            flow.run(self.org_config)
    def test_run__option_backrefs(self):
        """ A flow's options reach into return values from other tasks. """

        # instantiate a flow with two tasks
        flow_config = FlowConfig(
            {
                "description": "Run two tasks",
                "steps": {
                    1: {"task": "pass_name"},
                    2: {
                        "task": "name_response",
                        "options": {"response": "^^pass_name.name"},
                    },
                },
            }
        )

        flow = FlowCoordinator(self.project_config, flow_config)
        flow.run(self.org_config)
        # the flow results for the second task should be 'name'
        self.assertEqual("supername", flow.results[1].result)
    def test_run__skip_from_init(self):
        """ A flow can receive during init a list of tasks to skip """

        # instantiate a flow with two tasks
        flow_config = FlowConfig(
            {
                "description": "Run two tasks",
                "steps": {
                    1: {"task": "pass_name"},
                    2: {
                        "task": "name_response",
                        "options": {"response": "^^pass_name.name"},
                    },
                },
            }
        )
        flow = FlowCoordinator(self.project_config, flow_config, skip=["name_response"])
        flow.run(self.org_config)

        # the number of results should be 1 instead of 2
        self.assertEqual(1, len(flow.results))
示例#15
0
 def test_run__nested_flow_2(self):
     """ Flows can run inside other flows and call other flows """
     self.project_config.config["flows"]["test"] = {
         "description": "Run a task and a flow",
         "steps": {
             1: {
                 "task": "pass_name"
             },
             2: {
                 "flow": "nested_flow_2"
             }
         },
     }
     flow_config = self.project_config.get_flow("test")
     flow = FlowCoordinator(self.project_config, flow_config)
     flow.run(self.org_config)
     self.assertEqual(3, len(flow.steps))
     self.assertEqual(flow.results[0].return_values,
                      flow.results[1].return_values)
     self.assertEqual(flow.results[1].return_values,
                      flow.results[2].return_values)
示例#16
0
class BuildFlow(models.Model):
    build = models.ForeignKey("build.Build",
                              related_name="flows",
                              on_delete=models.CASCADE)
    rebuild = models.ForeignKey(
        "build.Rebuild",
        related_name="flows",
        null=True,
        blank=True,
        on_delete=models.CASCADE,
    )
    status = models.CharField(max_length=16,
                              choices=BUILD_FLOW_STATUSES,
                              default="queued")
    flow = models.CharField(max_length=255, null=True, blank=True)
    log = models.TextField(null=True, blank=True)
    exception = models.TextField(null=True, blank=True)
    traceback = models.TextField(null=True, blank=True)
    error_message = models.TextField(null=True, blank=True)
    time_queue = models.DateTimeField(auto_now_add=True)
    time_start = models.DateTimeField(null=True, blank=True)
    time_end = models.DateTimeField(null=True, blank=True)
    tests_total = models.IntegerField(null=True, blank=True)
    tests_pass = models.IntegerField(null=True, blank=True)
    tests_fail = models.IntegerField(null=True, blank=True)
    asset_hash = models.CharField(max_length=64,
                                  unique=True,
                                  default=generate_hash)

    def __str__(self):
        return "{}: {} - {} - {}".format(self.build.id, self.build.repo,
                                         self.build.commit, self.flow)

    def get_absolute_url(self):
        return reverse("build_detail", kwargs={"build_id": str(
            self.build.id)}) + "#flow-{}".format(self.flow)

    def get_log_html(self):
        if self.log:
            return format_log(self.log)

    def run(self, project_config, org_config, root_dir):
        self.root_dir = root_dir
        # Record the start
        set_build_info(self, status="running", time_start=timezone.now())

        # Update github status
        if settings.GITHUB_STATUS_UPDATES_ENABLED:
            set_github_status.delay(self.build_id)

        # Set up logger
        self.logger = init_logger(self)

        try:
            # Run the flow
            self.run_flow(project_config, org_config)

            # Determine build commit status
            self.set_commit_status()

            # Load test results
            self.load_test_results()

            # Record result
            exception = None
            status = "success"

        except FAIL_EXCEPTIONS as e:
            self.logger.error(traceback.format_exc())
            exception = e
            self.load_test_results()
            status = "fail"

        except Exception as e:
            self.logger.error(traceback.format_exc())
            exception = e
            status = "error"

        kwargs = {"status": status, "time_end": timezone.now()}
        if exception:
            kwargs["error_message"] = str(exception)
            kwargs["exception"] = exception.__class__.__name__
            kwargs["traceback"] = "".join(
                traceback.format_tb(exception.__traceback__))
        set_build_info(self, **kwargs)

    def run_flow(self, project_config, org_config):
        # Add the repo root to syspath to allow for custom tasks and flows in
        # the repo
        sys.path.append(project_config.repo_root)

        flow_config = project_config.get_flow(self.flow)

        callbacks = None
        if settings.METACI_FLOW_CALLBACK_ENABLED:
            from metaci.build.flows import MetaCIFlowCallback

            callbacks = MetaCIFlowCallback(buildflow_id=self.pk)

        # Create the flow and handle initialization exceptions
        self.flow_instance = FlowCoordinator(project_config,
                                             flow_config,
                                             name=self.flow,
                                             callbacks=callbacks)

        # Run the flow
        return self.flow_instance.run(org_config)

    def set_commit_status(self):
        if self.build.plan.commit_status_template:
            template = jinja2_env.from_string(
                self.build.plan.commit_status_template)
            message = template.render(results=self.flow_instance.results)
            self.build.commit_status = message
            self.build.save()

    def record_result(self):
        self.status = "success"
        self.time_end = timezone.now()
        self.save()

    def load_test_results(self):
        has_results = False

        root_dir_robot_path = "{}/output.xml".format(self.root_dir)
        # Load robotframework's output.xml if found
        if os.path.isfile("output.xml"):
            has_results = True
            import_robot_test_results(self, "output.xml")

        elif os.path.isfile(root_dir_robot_path):
            # FIXME: Not sure why robot stopped writing into the cwd
            # (build temp dir) but this should handle it so long as
            # only one build runs at a time
            has_results = True
            try:
                import_robot_test_results(self, root_dir_robot_path)
            finally:
                os.remove(root_dir_robot_path)

        # Load JUnit
        results = []
        if self.build.plan.junit_path:
            for filename in iglob(self.build.plan.junit_path):
                results.extend(self.load_junit(filename))
            if not results:
                self.logger.warning("No results found at JUnit path {}".format(
                    self.build.plan.junit_path))
        if results:
            has_results = True
            import_test_results(self, results, "JUnit")

        # Load from test_results.json
        results = []
        try:
            results_filename = "test_results.json"
            with open(results_filename, "r") as f:
                results.extend(json.load(f))
            for result in results:
                result["SourceFile"] = results_filename
        except IOError:
            try:
                results_filename = "test_results.xml"
                results.extend(self.load_junit(results_filename))
            except IOError:
                pass

        if results:
            has_results = True
            import_test_results(self, results, "Apex")

        if has_results:
            self.tests_total = self.test_results.count()
            self.tests_pass = self.test_results.filter(outcome="Pass").count()
            self.tests_fail = self.test_results.filter(
                outcome__in=["Fail", "CompileFail"]).count()
            self.save()

    def load_junit(self, filename):
        results = []
        tree = elementtree_parse_file(filename)
        testsuite = tree.getroot()
        for testcase in testsuite.iter("testcase"):
            result = {
                "ClassName": testcase.attrib["classname"],
                "Method": testcase.attrib["name"],
                "Outcome": "Pass",
                "StackTrace": "",
                "Message": "",
                "Stats": {
                    "duration": testcase.get("time")
                },
                "SourceFile": filename,
            }
            for element in testcase.iter():
                if element.tag not in ["failure", "error"]:
                    continue
                result["Outcome"] = "Fail"
                if element.text:
                    result["StackTrace"] += element.text + "\n"
                message = element.get("type", "")
                if element.get("message"):
                    message += ": " + element.get("message", "")
                    result["Message"] += message + "\n"
            results.append(result)
        return results
示例#17
0
class BuildFlow(models.Model):
    build = models.ForeignKey('build.Build',
                              related_name='flows',
                              on_delete=models.CASCADE)
    rebuild = models.ForeignKey('build.Rebuild',
                                related_name='flows',
                                null=True,
                                blank=True,
                                on_delete=models.CASCADE)
    status = models.CharField(max_length=16,
                              choices=BUILD_FLOW_STATUSES,
                              default='queued')
    flow = models.CharField(max_length=255, null=True, blank=True)
    log = models.TextField(null=True, blank=True)
    exception = models.TextField(null=True, blank=True)
    traceback = models.TextField(null=True, blank=True)
    error_message = models.TextField(null=True, blank=True)
    time_queue = models.DateTimeField(auto_now_add=True)
    time_start = models.DateTimeField(null=True, blank=True)
    time_end = models.DateTimeField(null=True, blank=True)
    tests_total = models.IntegerField(null=True, blank=True)
    tests_pass = models.IntegerField(null=True, blank=True)
    tests_fail = models.IntegerField(null=True, blank=True)
    asset_hash = models.CharField(max_length=64,
                                  unique=True,
                                  default=generate_hash)

    def __str__(self):
        return '{}: {} - {} - {}'.format(self.build.id, self.build.repo,
                                         self.build.commit, self.flow)

    def get_absolute_url(self):
        return reverse('build_detail', kwargs={'build_id': str(
            self.build.id)}) + '#flow-{}'.format(self.flow)

    def get_log_html(self):
        if self.log:
            return format_log(self.log)

    def run(self, project_config, org_config, root_dir):
        self.root_dir = root_dir
        # Record the start
        set_build_info(self, status='running', time_start=timezone.now())

        # Update github status
        if settings.GITHUB_STATUS_UPDATES_ENABLED:
            set_github_status.delay(self.build_id)

        # Set up logger
        self.logger = init_logger(self)

        try:
            # Run the flow
            result = self.run_flow(project_config, org_config)

            # Load test results
            self.load_test_results()

            # Record result
            exception = None
            status = 'success'

        except FAIL_EXCEPTIONS as e:
            exception = e
            self.load_test_results()
            status = 'fail'

        except Exception as e:
            exception = e
            status = 'error'

        kwargs = {
            'status': status,
            'time_end': timezone.now(),
        }
        if exception:
            if status == 'error':
                self.logger.error(str(exception))
            kwargs['error_message'] = str(exception)
            kwargs['exception'] = exception.__class__.__name__
            kwargs['traceback'] = ''.join(
                traceback.format_tb(exception.__traceback__))
        set_build_info(self, **kwargs)

    def run_flow(self, project_config, org_config):
        # Add the repo root to syspath to allow for custom tasks and flows in
        # the repo
        sys.path.append(project_config.repo_root)

        flow = getattr(project_config, 'flows__{}'.format(self.flow))
        if not flow:
            raise FlowNotFoundError('Flow not found: {}'.format(self.flow))
        flow_config = FlowConfig(flow)

        callbacks = None
        if settings.METACI_FLOW_CALLBACK_ENABLED:
            from metaci.build.flows import MetaCIFlowCallback
            callbacks = MetaCIFlowCallback(buildflow_id=self.pk)

        # Create the flow and handle initialization exceptions
        self.flow_instance = FlowCoordinator(project_config,
                                             flow_config,
                                             name=self.flow,
                                             callbacks=callbacks)

        # Run the flow
        return self.flow_instance.run(org_config)

    def record_result(self):
        self.status = 'success'
        self.time_end = timezone.now()
        self.save()

    def load_test_results(self):
        has_results = False

        root_dir_robot_path = '{}/output.xml'.format(self.root_dir, )
        # Load robotframework's output.xml if found
        if os.path.isfile('output.xml'):
            has_results = True
            import_robot_test_results(self, 'output.xml')

        elif os.path.isfile(root_dir_robot_path):
            # FIXME: Not sure why robot stopped writing into the cwd
            # (build temp dir) but this should handle it so long as
            # only one build runs at a time
            has_results = True
            try:
                import_robot_test_results(self, root_dir_robot_path)
            finally:
                os.remove(root_dir_robot_path)

        # Load JUnit
        results = []
        if self.build.plan.junit_path:
            for filename in iglob(self.build.plan.junit_path):
                results.extend(self.load_junit(filename))
            if not results:
                self.logger.warning('No results found at JUnit path {}'.format(
                    self.build.plan.junit_path))
        if results:
            has_results = True
            import_test_results(self, results, 'JUnit')

        # Load from test_results.json
        results = []
        try:
            results_filename = 'test_results.json'
            with open(results_filename, 'r') as f:
                results.extend(json.load(f))
            for result in results:
                result['SourceFile'] = results_filename
        except IOError as e:
            try:
                results_filename = 'test_results.xml'
                results.extend(self.load_junit(results_filename))
            except IOError as e:
                pass

        if results:
            has_results = True
            import_test_results(self, results, 'Apex')

        if has_results:
            self.tests_total = self.test_results.count()
            self.tests_pass = self.test_results.filter(outcome='Pass').count()
            self.tests_fail = self.test_results.filter(
                outcome__in=['Fail', 'CompileFail']).count()
            self.save()

    def load_junit(self, filename):
        results = []
        tree = elementtree_parse_file(filename)
        testsuite = tree.getroot()
        for testcase in testsuite.iter('testcase'):
            result = {
                'ClassName': testcase.attrib['classname'],
                'Method': testcase.attrib['name'],
                'Outcome': 'Pass',
                'StackTrace': '',
                'Message': '',
                'Stats': {
                    'duration': testcase.get('time'),
                },
                'SourceFile': filename,
            }
            for element in testcase.iter():
                if element.tag not in ['failure', 'error']:
                    continue
                result['Outcome'] = 'Fail'
                if element.text:
                    result['StackTrace'] += element.text + '\n'
                message = element.get('type', '')
                if element.get('message'):
                    message += ': ' + element.get('message', '')
                    result['Message'] += message + '\n'
            results.append(result)
        return results