Beispiel #1
0
    def test_start_pipeline(self, openshift, run_name_in_input, input_data):

        new_input_data = deepcopy(input_data)

        if new_input_data:
            new_input_data['metadata']['name'] = run_name_in_input

        p_run = PipelineRun(os=openshift,
                            pipeline_run_name=PIPELINE_RUN_NAME,
                            pipeline_run_data=new_input_data)

        responses.add(
            responses.POST,
            f'https://openshift.testing/apis/tekton.dev/v1beta1/namespaces/{TEST_OCP_NAMESPACE}/pipelineruns',  # noqa E501
            json={},
        )

        if new_input_data:
            if run_name_in_input == PIPELINE_RUN_NAME:
                p_run.start_pipeline_run()
                assert len(responses.calls) == 1
                req_body = json.loads(responses.calls[0].request.body)
                if new_input_data:
                    assert req_body['metadata']['name'] == run_name_in_input
            else:
                msg = f"Pipeline run name provided '{PIPELINE_RUN_NAME}' is different " \
                      f"than in input data '{run_name_in_input}'"
                with pytest.raises(OsbsException, match=msg):
                    p_run.start_pipeline_run()
                assert len(responses.calls) == 0
        else:
            match_exception = "No input data provided for pipeline run to start"
            with pytest.raises(OsbsException, match=match_exception):
                p_run.start_pipeline_run()
            assert len(responses.calls) == 0
Beispiel #2
0
    def create_source_container_pipeline_run(self,
                                             component=None,
                                             koji_task_id=None,
                                             target=None,
                                             **kwargs):
        """
        Take input args, create source pipeline run

        :return: instance of PiplelineRun
        """
        error_messages = []
        # most likely can be removed, source build should get component name
        # from binary build OSBS2 TBD
        if not component:
            error_messages.append(
                "required argument 'component' can't be empty")
        if error_messages:
            raise OsbsValidationException(", ".join(error_messages))

        pipeline_run_name, pipeline_run_data = self._get_source_container_pipeline_data(
        )

        build_json_store = self.os_conf.get_build_json_store()
        user_params = SourceContainerUserParams.make_params(
            build_json_dir=build_json_store,
            build_conf=self.os_conf,
            component=component,
            koji_target=target,
            koji_task_id=koji_task_id,
            pipeline_run_name=pipeline_run_name,
            **kwargs)

        self._set_source_container_pipeline_data(pipeline_run_name,
                                                 pipeline_run_data,
                                                 user_params)

        logger.info("creating source container image pipeline run: %s",
                    pipeline_run_name)

        pipeline_run = PipelineRun(self.os, pipeline_run_name,
                                   pipeline_run_data)

        try:
            logger.info("pipeline run created: %s",
                        pipeline_run.start_pipeline_run())
        except OsbsResponseException:
            logger.error("failed to create pipeline run %s", pipeline_run_name)
            raise

        return pipeline_run
Beispiel #3
0
    def test_retries_disabled(self, osbs_binary):  # noqa
        pipeline_run_name = 'test-pipeline'
        prun = PipelineRun(os=osbs_binary.os,
                           pipeline_run_name=pipeline_run_name,
                           pipeline_run_data={})
        get_info_url = f"/apis/tekton.dev/v1beta1/namespaces/{TEST_OCP_NAMESPACE}/" \
                       f"pipelineruns/{pipeline_run_name}"

        (flexmock(osbs_binary.os._con).should_receive('get').with_args(
            get_info_url, headers={}, verify_ssl=True,
            retries_enabled=False).and_return(Mock_Start_Pipeline()))

        with osbs_binary.retries_disabled():
            response_list = prun.get_info()
            assert response_list is not None

        pipeline_run_name = 'test-pipeline2'
        prun = PipelineRun(os=osbs_binary.os,
                           pipeline_run_name=pipeline_run_name,
                           pipeline_run_data={})
        get_info_url = f"/apis/tekton.dev/v1beta1/namespaces/{TEST_OCP_NAMESPACE}/" \
                       f"pipelineruns/{pipeline_run_name}"

        (flexmock(osbs_binary.os._con).should_receive('get').with_args(
            get_info_url, headers={}, verify_ssl=True,
            retries_enabled=True).and_return(Mock_Start_Pipeline()))

        # Verify that retries are re-enabled after contextmanager exits
        prun.get_info()
        assert response_list is not None
Beispiel #4
0
 def test_remove_pipeline(self, openshift):
     p_run = PipelineRun(os=openshift, pipeline_run_name=PIPELINE_RUN_NAME)
     response_json = {'kind': 'Status',
                      'apiVersion': 'v1',
                      'metadata': {},
                      'status': 'Success',
                      'details': {'name': PIPELINE_RUN_NAME, 'group': 'tekton.dev',
                                  'kind': 'pipelineruns',
                                  'uid': '16a9ad64-89f1-4612-baec-ded3e8a6df26'}
                      }
     responses.add(
         responses.DELETE,
         f'https://openshift.testing/apis/tekton.dev/v1beta1/namespaces/'
         f'{TEST_OCP_NAMESPACE}/pipelineruns/{PIPELINE_RUN_NAME}',
         json=response_json
     )
     result_response = p_run.remove_pipeline_run()
     assert len(responses.calls) == 1
     assert result_response == response_json
Beispiel #5
0
    def test_start_pipeline(self, openshift,
                            expected_request_body_pipeline_run,
                            run_name_in_input, input_data, labels):

        expected_request_body = deepcopy(expected_request_body_pipeline_run)
        new_input_data = deepcopy(input_data)

        if new_input_data:
            new_input_data['metadata']['name'] = run_name_in_input
            if labels:
                new_input_data['metadata']['labels'] = labels
                expected_request_body['metadata']['labels'] = labels

        p_run = PipelineRun(os=openshift,
                            pipeline_run_name=PIPELINE_RUN_NAME,
                            pipeline_run_data=new_input_data)

        responses.add(
            responses.POST,
            f'https://openshift.testing/apis/tekton.dev/v1beta1/namespaces/{TEST_OCP_NAMESPACE}/pipelineruns',  # noqa E501
            match=[responses.json_params_matcher(expected_request_body)],
            json={},
        )

        if new_input_data:
            if run_name_in_input == PIPELINE_RUN_NAME:
                p_run.start_pipeline_run()
                assert len(responses.calls) == 1
            else:
                msg = f"Pipeline run name provided '{PIPELINE_RUN_NAME}' is different " \
                      f"than in input data '{run_name_in_input}'"
                with pytest.raises(OsbsException, match=msg):
                    p_run.start_pipeline_run()
                assert len(responses.calls) == 0
        else:
            match_exception = "No input data provided for pipeline run to start"
            with pytest.raises(OsbsException, match=match_exception):
                p_run.start_pipeline_run()
            assert len(responses.calls) == 0
Beispiel #6
0
    def get_build_results(self, build_name) -> Dict[str, Any]:
        """Fetch the pipelineResults for this build.

        Converts the results array to a dict of {name: <JSON-decoded value>} and filters out
        results with null values.
        """
        pipeline_run = PipelineRun(self.os, build_name)
        pipeline_results = pipeline_run.pipeline_results

        def load_result(result: Dict[str, str]) -> Tuple[str, Any]:
            name = result['name']
            raw_value = result['value']
            try:
                value = json.loads(raw_value)
            except json.JSONDecodeError:
                raise OsbsValidationException(
                    f'{name} value is not valid JSON: {raw_value!r}')
            return name, value

        return {
            name: value
            for name, value in map(load_result, pipeline_results)
            if value is not None
        }
Beispiel #7
0
 def cancel_build(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.cancel_pipeline_run()
Beispiel #8
0
 def update_annotations_on_build(self, build_name, annotations):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.update_annotations(annotations)
Beispiel #9
0
 def get_build_annotations(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.annotations
Beispiel #10
0
 def get_build_labels(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.labels
Beispiel #11
0
 def wait_for_build_to_finish(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.wait_for_finish()
Beispiel #12
0
 def build_was_cancelled(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.was_cancelled()
Beispiel #13
0
    def test_get_build_name(self, osbs_binary):
        pipeline_run_name = 'test_pipeline'
        pipeline_run = PipelineRun(os={}, pipeline_run_name=pipeline_run_name)

        assert pipeline_run_name == osbs_binary.get_build_name(pipeline_run)
Beispiel #14
0
 def build_not_finished(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.has_not_finished()
Beispiel #15
0
 def get_build_error_message(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.get_error_message()
Beispiel #16
0
def test_print_output(tmpdir, capsys):
    """Test print_output function

    Tests:
      * if STDOUT is correct
      * if JSON exported metadata are correct
    """
    flexmock(time).should_receive('sleep').and_return(None)
    ppln_run = flexmock(PipelineRun(flexmock(), 'test_ppln'))
    (ppln_run.should_receive('get_info').and_return({
        'metadata': {
            'annotations': {
                # annotations are JSON
                'repositories':
                """{
                        "primary": ["test1"],
                        "floating": ["test2a", "test2b"],
                        "unique": ["test3"]
                    }""",
            }
        }
    }))
    ppln_run.should_receive('has_succeeded').and_return(True)
    ppln_run.should_receive('status_reason').and_return('complete')
    ppln_run.should_receive('has_not_finished').and_return(False)
    (ppln_run.should_receive('get_logs').and_return([
        '2021-11-25 23:17:49,886 platform:- - atomic_reactor.inner - INFO - YOLO 1',
        '2021-11-25 23:17:50,000 platform:- - smth - USER_WARNING - {"message": "user warning"}',
        '2021-11-25 23:17:59,123 platform:- - atomic_reactor.inner - INFO - YOLO 2',
    ]))

    export_metadata_file = os.path.join(tmpdir, 'metadata.json')
    print_output(ppln_run, export_metadata_file=export_metadata_file)

    captured = capsys.readouterr()
    expected_stdout = dedent("""\
        Pipeline run created (test_ppln), watching logs (feel free to interrupt)
        '2021-11-25 23:17:49,886 platform:- - atomic_reactor.inner - INFO - YOLO 1'
        '2021-11-25 23:17:59,123 platform:- - atomic_reactor.inner - INFO - YOLO 2'

        pipeline run test_ppln is complete
        primary repositories:
        \ttest1
        floating repositories:
        \ttest2a
        \ttest2b
        unique repositories:
        \ttest3

        user warnings:
        \tuser warning
        """)
    assert captured.out == expected_stdout

    expected_metadata = {
        'pipeline_run': {
            'name': 'test_ppln',
            'status': 'complete'
        },
        'results': {
            'error_msg': '',
            'repositories': {
                'floating': ['test2a', 'test2b'],
                'primary': ['test1'],
                'unique': ['test3']
            },
            'user_warnings': ['user warning']
        },
    }

    with open(export_metadata_file, 'r') as f:
        metadata = json.load(f)
    assert metadata == expected_metadata
Beispiel #17
0
 def update_labels_on_build(self, build_name, labels):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.update_labels(labels)
Beispiel #18
0
    def create_binary_container_pipeline_run(
            self,
            git_uri=_REQUIRED_PARAM,
            git_ref=_REQUIRED_PARAM,
            git_branch=_REQUIRED_PARAM,
            component=None,
            flatpak=None,
            git_commit_depth=None,
            isolated=None,
            koji_task_id=None,
            target=None,
            operator_csv_modifications_url=None,
            **kwargs):

        required_params = {
            "git_uri": git_uri,
            "git_ref": git_ref,
            "git_branch": git_branch
        }
        missing_params = []
        for param_name, param_arg in required_params.items():
            if param_arg is _REQUIRED_PARAM or not param_arg:
                missing_params.append(param_name)
        if missing_params:
            raise OsbsException('required parameter {} missing'.format(
                ", ".join(missing_params)))

        if operator_csv_modifications_url and not isolated:
            raise OsbsException(
                'Only isolated build can update operator CSV metadata')

        repo_info = utils.get_repo_info(git_uri,
                                        git_ref,
                                        git_branch=git_branch,
                                        depth=git_commit_depth)

        self._checks_for_flatpak(flatpak, repo_info)

        req_labels = self._check_labels(repo_info)

        user_params = self.get_user_params(
            base_image=repo_info.base_image,
            component=component,
            flatpak=flatpak,
            isolated=isolated,
            koji_target=target,
            koji_task_id=koji_task_id,
            req_labels=req_labels,
            repo_info=repo_info,
            operator_csv_modifications_url=operator_csv_modifications_url,
            **kwargs)

        self._checks_for_isolated(user_params)

        pipeline_run_name, pipeline_run_data = self._get_binary_container_pipeline_data(
            user_params)

        user_params.pipeline_run_name = pipeline_run_name

        self._set_binary_container_pipeline_data(pipeline_run_name,
                                                 pipeline_run_data,
                                                 user_params)

        logger.info("creating binary container image pipeline run: %s",
                    pipeline_run_name)

        pipeline_run = PipelineRun(self.os, pipeline_run_name,
                                   pipeline_run_data)

        try:
            logger.info("pipeline run created: %s",
                        pipeline_run.start_pipeline_run())
        except OsbsResponseException:
            logger.error("failed to create pipeline run %s", pipeline_run_name)
            raise

        return pipeline_run
Beispiel #19
0
 def build_has_any_cancelled_tasks(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.any_task_was_cancelled()
Beispiel #20
0
def pipeline_run(openshift):
    return PipelineRun(os=openshift,
                       pipeline_run_name=PIPELINE_RUN_NAME,
                       pipeline_run_data=PIPELINE_RUN_DATA)
Beispiel #21
0
def pipeline_run(osbs_for_capture):
    return PipelineRun(os=osbs_for_capture.os,
                       pipeline_run_name=PIPELINE_RUN_NAME,
                       pipeline_run_data=PIPELINE_RUN_DATA)
Beispiel #22
0
 def get_build_logs(self, build_name, follow=False, wait=False):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.get_logs(follow=follow, wait=wait)
Beispiel #23
0
 def get_build(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.get_info()
Beispiel #24
0
 def get_build_reason(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.status_reason
Beispiel #25
0
 def build_has_succeeded(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.has_succeeded()
Beispiel #26
0
def test_print_output_failure(tmpdir, capsys, get_logs_failed,
                              build_not_finished):
    """Test print_output function when build failed

    Tests:
      * if STDOUT is correct when build failed
      * if JSON exported metadata are correct
    """
    flexmock(time).should_receive('sleep').and_return(None)
    ppln_run = flexmock(PipelineRun(flexmock(), 'test_ppln'))
    ppln_run.should_receive('has_succeeded').and_return(False)
    ppln_run.should_receive('status_reason').and_return('failed')
    ppln_run.should_receive('get_error_message').and_return('Build failed ...')

    log_entries = [
        '2021-11-25 23:17:49,886 platform:- - atomic_reactor.inner - INFO - YOLO 1',
        '2021-11-25 23:17:50,000 platform:- - smth - USER_WARNING - {"message": "user warning"}',
        '2021-11-25 23:17:59,123 platform:- - atomic_reactor.inner - ERROR - YOLO 2',
    ]

    def get_logs():
        for log in log_entries:
            yield log
        if get_logs_failed:
            raise Exception("error reading logs")

    ppln_run.should_receive('get_logs').and_return(get_logs())
    ppln_run.should_receive('has_not_finished').and_return(build_not_finished)

    if get_logs_failed and build_not_finished:
        ppln_run.should_receive('cancel_pipeline_run').once()
    else:
        ppln_run.should_receive('cancel_pipeline_run').never()

    export_metadata_file = os.path.join(tmpdir, 'metadata.json')
    print_output(ppln_run, export_metadata_file=export_metadata_file)

    captured = capsys.readouterr()
    expected_stdout = dedent("""\
        Pipeline run created (test_ppln), watching logs (feel free to interrupt)
        '2021-11-25 23:17:49,886 platform:- - atomic_reactor.inner - INFO - YOLO 1'
        '2021-11-25 23:17:59,123 platform:- - atomic_reactor.inner - ERROR - YOLO 2'

        pipeline run test_ppln is failed

        user warnings:
        \tuser warning

        Build failed ...
        """)
    assert captured.out == expected_stdout

    expected_metadata = {
        'pipeline_run': {
            'name': 'test_ppln',
            'status': 'failed'
        },
        'results': {
            'error_msg': 'Build failed ...',
            'repositories': {},
            'user_warnings': ['user warning']
        },
    }

    with open(export_metadata_file, 'r') as f:
        metadata = json.load(f)
    assert metadata == expected_metadata
Beispiel #27
0
 def remove_build(self, build_name):
     pipeline_run = PipelineRun(self.os, build_name)
     return pipeline_run.remove_pipeline_run()