Ejemplo n.º 1
0
def _do_install_dependency(logger, project, dependency, upgrade,
                           force_reinstall, target_dir, log_file):
    batch = isinstance(dependency, collections.Iterable)

    pip_command_line = list()
    pip_command_line.extend(PIP_EXEC_STANZA)
    pip_command_line.append("install")
    pip_command_line.extend(
        build_pip_install_options(
            project.get_property("install_dependencies_index_url"),
            project.get_property("install_dependencies_extra_index_url"),
            upgrade,
            project.get_property("install_dependencies_insecure_installation"),
            force_reinstall, target_dir, project.get_property("verbose"),
            project.get_property("install_dependencies_trusted_host")))
    pip_command_line.extend(as_pip_install_target(dependency))
    logger.debug("Invoking pip: %s", pip_command_line)
    exit_code = execute_command(pip_command_line,
                                log_file,
                                env=os.environ,
                                shell=False)

    if exit_code != 0:
        if batch:
            dependency_name = " batch dependencies."
        else:
            dependency_name = " dependency '%s'." % dependency.name

        if project.get_property("verbose"):
            print_file_content(log_file)
            raise BuildFailedException("Unable to install%s" % dependency_name)
        else:
            raise BuildFailedException(
                "Unable to install%s See %s for details.", dependency_name,
                log_file)
Ejemplo n.º 2
0
def run_unit_tests(project, logger):
    sys.path.append(project.expand_path("$dir_source_main_python"))
    test_dir = project.expand_path("$dir_source_unittest_python")
    sys.path.append(test_dir)

    pyfix_unittest_file_suffix = project.get_property("pyfix_unittest_file_suffix")
    if pyfix_unittest_file_suffix is not None:
        logger.warn("pyfix_unittest_file_suffix is deprecated, please use pyfix_unittest_module_glob")
        module_glob = "*{0}".format(pyfix_unittest_file_suffix)
        if module_glob.endswith(".py"):
            module_glob = module_glob[:-3]
        project.set_property("pyfix_unittest_module_glob", module_glob)
    else:
        module_glob = project.get_property("pyfix_unittest_module_glob")

    logger.info("Executing pyfix unittest Python modules in %s", test_dir)
    logger.debug("Including files matching '%s.py'", module_glob)

    try:
        result = execute_tests_matching(logger, test_dir, module_glob)
        if result.number_of_tests_executed == 0:
            logger.warn("No pyfix executed")
        else:
            logger.info("Executed %d pyfix unittests", result.number_of_tests_executed)

        write_report(project, result)

        if not result.success:
            raise BuildFailedException("%d pyfix unittests failed", result.number_of_failures)

        logger.info("All pyfix unittests passed")
    except ImportError as e:
        logger.error("Error importing pyfix unittest: %s", e)
        raise BuildFailedException("Unable to execute unit tests.")
Ejemplo n.º 3
0
def pdoc_compile_docs(project, logger, reactor):
    logger.info("Generating PDoc documentation")

    if not project.get_property("pdoc_module_name"):
        raise BuildFailedException("'pdoc_module_name' must be specified")

    pdoc_command_args = project.get_property("pdoc_command_args", [])
    pdoc_output_dir = project.expand_path("$pdoc_output_dir")

    command_and_arguments = ["pdoc"] + pdoc_command_args
    if "--html" in pdoc_command_args:
        command_and_arguments += ["--html-dir", pdoc_output_dir]
    command_and_arguments += [project.get_property("pdoc_module_name")]

    source_directory = project.expand_path("$pdoc_source")
    environment = {
        "PYTHONPATH": source_directory,
        "PATH": reactor.pybuilder_venv.environ["PATH"]
    }

    report_file = project.expand_path("$dir_reports", "pdoc.err")
    logger.debug("Executing PDoc as: %s", command_and_arguments)
    return_code = reactor.pybuilder_venv.execute_command(
        command_and_arguments,
        outfile_name=project.expand_path("$dir_reports", "pdoc"),
        error_file_name=report_file,
        env=environment,
        cwd=pdoc_output_dir)

    if return_code:
        error_str = "PDoc failed! See %s for full details:\n%s" % (
            report_file, tail_log(report_file))
        logger.error(error_str)
        raise BuildFailedException(error_str)
def install_dependency(logger, project, dependency):
    url = getattr(dependency, "url", None)
    logger.info("Installing dependency '%s'%s", dependency.name,
                " from %s" % url if url else "")
    log_file = project.expand_path("$dir_install_logs", dependency.name)
    log_file = re.sub(r'<|>|=', '_', log_file)

    if sys.platform.startswith("win"):
        # we can't use quotes on windows
        pip_dependency = as_pip_argument(dependency)
    else:
        # on linux we need quotes because version pinning (>=) would be an IO redirect
        pip_dependency = "'{0}'".format(as_pip_argument(dependency))
    pip_command_line = "pip install {0}{1}".format(
        build_pip_install_options(project, dependency.name), pip_dependency)
    exit_code = execute_command(pip_command_line, log_file, shell=True)
    if exit_code != 0:
        if project.get_property("verbose"):
            print_file_content(log_file)
            raise BuildFailedException("Unable to install dependency '%s'.",
                                       dependency.name)
        else:
            raise BuildFailedException(
                "Unable to install dependency '%s'. See %s for details.",
                getattr(dependency, "name", dependency), log_file)
Ejemplo n.º 5
0
def _do_install_dependency(logger, project, dependency, upgrade, eager_upgrade,
                           force_reinstall, constraint_file, target_dir, log_file):
    batch = isinstance(dependency, collections.Iterable)

    exit_code = pip_utils.pip_install(
        install_targets=dependency,
        index_url=project.get_property("install_dependencies_index_url"),
        extra_index_url=project.get_property("install_dependencies_extra_index_url"),
        upgrade=upgrade,
        insecure_installs=project.get_property("install_dependencies_insecure_installation"),
        force_reinstall=force_reinstall,
        target_dir=target_dir,
        verbose=project.get_property("verbose"),
        trusted_host=project.get_property("install_dependencies_trusted_host"),
        constraint_file=constraint_file,
        eager_upgrade=eager_upgrade,
        logger=logger,
        outfile_name=log_file)

    if exit_code != 0:
        if batch:
            dependency_name = " batch dependencies."
        else:
            dependency_name = " dependency '%s'." % dependency.name

        if project.get_property("verbose"):
            print_file_content(log_file)
            raise BuildFailedException("Unable to install%s" % dependency_name)
        else:
            raise BuildFailedException("Unable to install%s See %s for details.",
                                       dependency_name,
                                       log_file)
Ejemplo n.º 6
0
def before_prepare(project):
    coverage_report = project.get_property('coverage_report', None)
    pytest_args = project.get_property('pytest', None)
    
    # Ensure coverage_report and pytest are mutually exclusive
    if coverage_report and pytest_args:
        raise BuildFailedException(
            'Both coverage_report and pytest property are given. They are mutually exclusive. Remove one.'
        )
        
    # coverage_report property
    if coverage_report:
        if coverage_report == 'travis':
            args = (
                '--cov {source_main} --cov-report term-missing {source_unittest}'
                .format(
                    source_main=project.expand_path('$dir_source_main_python'),
                    source_unittest=project.expand_path('$dir_source_unittest_python')
                )
            )
        else:
            raise BuildFailedException(
                'Invalid value for coverage_report: {}. '
                'Expected "travis".'
                .format(coverage_report)
            )
    else:
        # pytest property 
        if project.has_property('pytest'):
            args = pytest_args
        else:
            args = project.expand_path('$dir_source_unittest_python')
    
    project.set_property('pybuilder_pytest_args', args)
Ejemplo n.º 7
0
def run_tests(project, logger, reactor, execution_prefix, execution_name):
    logger.info("Running %s", execution_name)
    test_dir = _register_test_and_source_path_and_return_test_dir(
        project, sys.path, execution_prefix)

    file_suffix = project.get_property("%s_file_suffix" % execution_prefix)
    if file_suffix is not None:
        logger.warn(
            "%(prefix)s_file_suffix is deprecated, please use %(prefix)s_module_glob"
            % {"prefix": execution_prefix})
        module_glob = "*{0}".format(file_suffix)
        if module_glob.endswith(".py"):
            module_glob = module_glob[:-3]
        project.set_property("%s_module_glob" % execution_prefix, module_glob)
    else:
        module_glob = project.get_property("%s_module_glob" % execution_prefix)

    logger.info("Executing %s from Python modules in %s", execution_name,
                test_dir)
    logger.debug("Including files matching '%s'", module_glob)

    try:
        test_method_prefix = project.get_property("%s_test_method_prefix" %
                                                  execution_prefix)
        runner_generator = project.get_property("%s_runner" % execution_prefix)
        result, console_out = execute_tests_matching(
            reactor.tools, runner_generator, logger, test_dir, module_glob,
            test_method_prefix, project.get_property("remote_debug"))

        if result.testsRun == 0:
            logger.warn("No %s executed.", execution_name)
        else:
            logger.info("Executed %d %s", result.testsRun, execution_name)

        write_report(execution_prefix, project, logger, result, console_out)

        break_build = project.get_property("%s_breaks_build" %
                                           execution_prefix)
        if not result.wasSuccessful():
            msg = "There were %d error(s) and %d failure(s) in %s" % (len(
                result.errors), len(result.failures), execution_name)
            if break_build:
                raise BuildFailedException(msg)
            else:
                logger.warn(msg)
        logger.info("All %s passed.", execution_name)
    except ImportError as e:
        import traceback

        _, _, import_error_traceback = sys.exc_info()
        file_with_error, error_line, _, statement_causing_error = traceback.extract_tb(
            import_error_traceback)[-1]
        logger.error(
            "Import error in test file {0}, due to statement '{1}' on line {2}"
            .format(file_with_error, statement_causing_error, error_line))
        logger.error("Error importing %s: %s", execution_prefix, e)
        raise BuildFailedException("Unable to execute %s." % execution_name)
Ejemplo n.º 8
0
def twine_upload(project, logger):
    import plumbum as pb
    
    repo = _get_repo()
    
    # If current commit has no tag, fail
    commit = repo.commit()
    for tag in repo.tags:
        if tag.commit == commit:
            break
    else:
        raise BuildFailedException(
            'Current commit has no tag. '
            'To publish, it should have a tag named "{version}".'
        )
    
    # If tag is not a version tag or is different from project.version, fail
    try:
        if project.version != _version_from_tag(tag):
            raise BuildFailedException(
                'Version tag ({}) of current commit does not equal project.version ({}).'
                .format(tag.name, project.version)
            )
    except ValueError:
        raise BuildFailedException(
            'Current commit has tag ({}). '
            'To release, it should have a tag named "{version}".'
            .format(tag.name)
        )
        
    # If version < newest ancestor version, warn
    ancestors = list(repo.commit().iter_parents())
    versions = []
    for tag in repo.tags:
        if tag.commit in ancestors:
            try:
                versions.append(_version_from_tag(tag))
            except ValueError:
                pass
    newest_ancestor_version = max(versions, default=Version('0.0.0'))
             
    if project.version < newest_ancestor_version:
        logger.warn(
            'project.version ({}) is less than that of an ancestor commit ({})'
            .format(project.version, newest_ancestor_version)
        )
        
    # Upload
    logger.info('Uploading to PyPI')
    distributions = tuple(glob(project.expand_path('$dir_dist/dist/*')))
    repository = project.get_property('distutils_upload_repository')
    pb.local['twine'].__getitem__(('upload', '--repository', repository, '--repository-url', repository) + distributions) & pb.FG
Ejemplo n.º 9
0
def set_description(project, logger, reactor):
    if project.get_property("distutils_readme_description"):
        description = None
        if project.get_property("distutils_readme_file_convert"):
            try:
                reactor.pybuilder_venv.verify_can_execute(
                    ["pandoc", "--version"], "pandoc", "distutils")
                description = doc_convert(project, logger)
            except (MissingPrerequisiteException, ImportError):
                logger.warn(
                    "Was unable to find pandoc or pypandoc and did not convert the documentation"
                )
        else:
            with io.open(project.expand_path("$distutils_readme_file"),
                         "rt",
                         encoding=project.get_property(
                             "distutils_readme_file_encoding")) as f:
                description = f.read()

        if description:
            if (not hasattr(project, "summary") or project.summary is None
                    or project.get_property("distutils_summary_overwrite")):
                setattr(project, "summary",
                        description.splitlines()[0].strip())

            if (not hasattr(project, "description")
                    or project.description is None or
                    project.get_property("distutils_description_overwrite")):
                setattr(project, "description", description)

    warn = False
    if len(project.summary) >= 512:
        logger.warn(
            "Project summary SHOULD be shorter than 512 characters per PEP-426"
        )
        warn = True

    if "\n" in project.summary or "\r" in project.summary:
        logger.warn(
            "Project summary SHOULD NOT contain new-line characters per PEP-426"
        )
        warn = True

    if len(project.summary) >= 2048:
        raise BuildFailedException(
            "Project summary MUST NOT be shorter than 2048 characters per PEP-426"
        )

    if warn and project.get_property("distutils_fail_on_warnings"):
        raise BuildFailedException(
            "Distutil plugin warnings caused a build failure. Please see warnings above."
        )
Ejemplo n.º 10
0
def run_unit_tests(project, logger):
    test_dir = _register_test_and_source_path_and_return_test_dir(
        project, sys.path)

    unittest_file_suffix = project.get_property("unittest_file_suffix")
    if unittest_file_suffix is not None:
        logger.warn(
            "unittest_file_suffix is deprecated, please use unittest_module_glob"
        )
        module_glob = "*{0}".format(unittest_file_suffix)
        if module_glob.endswith(".py"):
            WITHOUT_DOT_PY = slice(0, -3)
            module_glob = module_glob[WITHOUT_DOT_PY]
        project.set_property("unittest_module_glob", module_glob)
    else:
        module_glob = project.get_property("unittest_module_glob")

    logger.info("Executing unittest Python modules in %s", test_dir)
    logger.debug("Including files matching '%s'", module_glob)

    try:
        test_method_prefix = project.get_property(
            "unittest_test_method_prefix")
        result, console_out = execute_tests_matching(logger, test_dir,
                                                     module_glob,
                                                     test_method_prefix)

        if result.testsRun == 0:
            logger.warn("No unittests executed.")
        else:
            logger.info("Executed %d unittests", result.testsRun)

        write_report("unittest", project, logger, result, console_out)

        if not result.wasSuccessful():
            raise BuildFailedException(
                "There were %d test error(s) and %d failure(s)" %
                (len(result.errors), len(result.failures)))
        logger.info("All unittests passed.")
    except ImportError as e:
        import traceback
        _, _, import_error_traceback = sys.exc_info()
        file_with_error, error_line, _, statement_causing_error = traceback.extract_tb(
            import_error_traceback)[-1]
        logger.error(
            "Import error in unittest file {0}, due to statement '{1}' on line {2}"
            .format(file_with_error, statement_causing_error, error_line))
        logger.error("Error importing unittests: %s", e)
        raise BuildFailedException("Unable to execute unit tests.")
Ejemplo n.º 11
0
def run_cram_tests(project, logger):
    logger.info("Running Cram command line tests")

    command_and_arguments = _cram_command_for(project)
    command_and_arguments.extend(_find_files(project))
    report_file = _report_file(project)

    env = os.environ.copy()
    source_dir = project.expand_path("$dir_source_main_python")
    _prepend_path(env, "PYTHONPATH", source_dir)
    script_dir = project.expand_path('$dir_source_main_scripts')
    _prepend_path(env, "PATH", script_dir)

    return_code = execute_command(command_and_arguments,
                                  report_file,
                                  env=env,
                                  error_file_name=report_file)

    report = read_file(report_file)
    result = report[-1][2:].strip()

    if return_code != 0:
        logger.error("Cram tests failed!")
        if project.get_property("verbose"):
            for line in report:
                logger.error(line.rstrip())
        else:
            logger.error(result)

        logger.error("See: '{0}' for details".format(report_file))
        raise BuildFailedException("Cram tests failed!")

    logger.info("Cram tests were fine")
    logger.info(result)
Ejemplo n.º 12
0
def analyze(project, logger):
    root_directory = os.getcwd()
    source_modules = discover_files_matching(project.get_property("dir_source_main_python"),
                                             "*.py")
    errors = []

    logger.info("Executing jedi linter on project sources.")

    try:
        for path in source_modules:
            for error in jedi.Script(path=path)._analysis():
                errors.append(error)
    except Exception as e:
        logger.error("Jedi crashed: {0}".format(e))
        import traceback
        logger.debug(traceback.format_exc())

    number_of_errors = len(errors)
    output = logger.info if number_of_errors == 0 else logger.warn
    output("Jedi linter found {0} errors.".format(number_of_errors))
    if project.get_property("jedi_linter_verbose") or project.get_property("verbose"):
        for error in errors:
            logger.warn(error)

    if project.get_property("jedi_linter_break_build") and number_of_errors > 0:
        raise BuildFailedException("Jedi linter found errors")

    os.chdir(root_directory)  # jedi chdirs into directories, so undo it
Ejemplo n.º 13
0
def positive_test(project, logger):
    print("Running a postive test")
    command = ExternalCommandBuilder('hydra', project)
    command.use_argument('positive-test')
    result = command.run_on_production_source_files(logger)
    if result.exit_code:
        raise BuildFailedException("Exit code is set")
Ejemplo n.º 14
0
def _add_extras_require(project, logger):
    indent_size = 4
    encoding = 'utf-8'

    setup_script = Path(project.expand_path('$dir_dist', 'setup.py'))
    logger.info("Adding 'extras_require' to setup.py")
    setup = setup_script.read_text(encoding=encoding).rstrip()
    if setup[-1] != ')':
        raise BuildFailedException('This setup.py seems to be wrong?')

    # Get the requirements-dev.txt file line by line, ready for insertion.
    requirements_dev = '\n'.join(
        ' ' * 4 * indent_size + "'" + x.strip() + "',"
        for x in (Path(__file__).parent /
                  'requirements-build.txt').read_text().split('\n') if x)

    # TODO: find a nicer way to embed this!
    new_setup = (setup[:-1].rstrip() + f'''
        extras_require={{
            'hdfs': ['hdfs>=2.0.0'],
            'pandas': ['pandas>=0.23.2'],
            'performance': ['matplotlib>=1.5.3'],
            'streaming': ['tornado>=4.3'],
            'test': [
{requirements_dev}
            ]
        }},
    )
''')

    setup_script.write_text(new_setup, encoding=encoding)
Ejemplo n.º 15
0
def analyze(project, logger):
    """ Applies the flake8 script to the sources of the given project. """
    logger.info("Executing flake8 on project sources.")

    verbose = project.get_property("verbose")
    project.set_property_if_unset("flake8_verbose_output", verbose)

    command = ExternalCommandBuilder('flake8', project)
    command.use_argument('--ignore={0}').formatted_with_truthy_property('flake8_ignore')
    command.use_argument('--max-line-length={0}').formatted_with_property('flake8_max_line_length')
    command.use_argument('--exclude={0}').formatted_with_truthy_property('flake8_exclude_patterns')

    include_test_sources = project.get_property("flake8_include_test_sources")
    include_scripts = project.get_property("flake8_include_scripts")

    result = command.run_on_production_source_files(logger,
                                                    include_test_sources=include_test_sources,
                                                    include_scripts=include_scripts)

    count_of_warnings = len(result.report_lines)
    count_of_errors = len(result.error_report_lines)

    if count_of_errors > 0:
        logger.error('Errors while running flake8, see {0}'.format(result.error_report_file))

    if count_of_warnings > 0:
        if project.get_property("flake8_break_build"):
            error_message = "flake8 found {0} warning(s)".format(count_of_warnings)
            raise BuildFailedException(error_message)
        else:
            logger.warn("flake8 found %d warning(s).", count_of_warnings)
Ejemplo n.º 16
0
def pdoc_compile_docs(project, logger, reactor):
    logger.info("Generating pdoc documentation")

    if not project.get_property("pdoc_module_name"):
        raise BuildFailedException("'pdoc_module_name' must be specified")

    pdoc_command_args = project.get_property("pdoc_command_args", [])
    pdoc_output_dir = project.expand_path("$pdoc_output_dir")

    command_and_arguments = ["pdoc"] + pdoc_command_args
    if "--html" in pdoc_command_args:
        command_and_arguments += ["--html-dir", pdoc_output_dir]
    command_and_arguments += [project.get_property("pdoc_module_name")]

    source_directory = project.expand_path("$pdoc_source")
    environment = {
        "PYTHONPATH": source_directory,
        "PATH": reactor.pybuilder_venv.environ["PATH"]
    }

    logger.debug("Executing pdoc as: %s", command_and_arguments)
    reactor.pybuilder_venv.execute_command(command_and_arguments,
                                           outfile_name=project.expand_path(
                                               "$dir_reports", "pdoc"),
                                           env=environment,
                                           cwd=pdoc_output_dir)
Ejemplo n.º 17
0
def build_entry_points_string(project):
    console_scripts = project.get_property('distutils_console_scripts')
    entry_points = project.get_property('distutils_entry_points')
    if console_scripts is not None and entry_points is not None:
        raise BuildFailedException("'distutils_console_scripts' cannot be combined with 'distutils_entry_points'")

    if entry_points is None:
        entry_points = dict()

    if console_scripts is not None:
        entry_points['console_scripts'] = console_scripts

    if len(entry_points) == 0:
        return '{}'

    indent = 8
    result = "{\n"

    for k in sorted(entry_points.keys()):
        result += " " * (indent + 4)
        result += "'%s': %s" % (k, build_string_from_array(as_list(entry_points[k]), indent + 8)) + ",\n"

    result = result[:-2] + "\n"
    result += (" " * indent) + "}"

    return result
Ejemplo n.º 18
0
def run_command(phase, project, logger):
    command_line = project.get_property('%s_command' % phase)

    if not command_line:
        return

    process_handle = Popen(command_line, stdout=PIPE, stderr=PIPE, shell=True)
    stdout, stderr = process_handle.communicate()
    stdout, stderr = stdout.decode(
        sys.stdout.encoding or 'utf-8'), stderr.decode(sys.stderr.encoding
                                                       or 'utf-8')
    process_return_code = process_handle.returncode

    _write_command_report(project, stdout, stderr, command_line, phase,
                          process_return_code)

    if project.get_property('%s_propagate_stdout' % phase) and stdout:
        _log_quoted_output(logger, '', stdout, phase)

    if project.get_property('%s_propagate_stderr' % phase) and stderr:
        _log_quoted_output(logger, 'error', stderr, phase)

    if process_return_code != 0:
        raise BuildFailedException(
            'exec plugin command {0} for {1} exited with nonzero code {2}'.
            format(command_line, phase, process_return_code))
Ejemplo n.º 19
0
def execute_distutils(project, logger, distutils_commands, clean=False):
    reports_dir = project.expand_path("$dir_reports", "distutils")
    if not os.path.exists(reports_dir):
        os.mkdir(reports_dir)

    setup_script = project.expand_path("$dir_dist", "setup.py")

    for command in distutils_commands:
        logger.debug("Executing distutils command %s", command)
        if is_string(command):
            output_file_path = os.path.join(reports_dir,
                                            command.replace("/", ""))
        else:
            output_file_path = os.path.join(
                reports_dir, "__".join(command).replace("/", ""))
        with open(output_file_path, "w") as output_file:
            commands = [sys.executable, setup_script]
            if project.get_property("verbose"):
                commands.append("-v")
            if clean:
                commands.extend(["clean", "--all"])
            if is_string(command):
                commands.extend(command.split())
            else:
                commands.extend(command)
            return_code = _run_process_and_wait(
                commands, project.expand_path("$dir_dist"), output_file)
            if return_code != 0:
                raise BuildFailedException(
                    "Error while executing setup command %s, see %s for details"
                    % (command, output_file_path))
Ejemplo n.º 20
0
def execute_distutils(project, logger, distutils_commands, clean=False):
    reports_dir = _prepare_reports_dir(project)
    setup_script = project.expand_path("$dir_dist", "setup.py")

    for command in distutils_commands:
        logger.debug("Executing distutils command %s", command)
        if is_string(command):
            out_file = os.path.join(reports_dir, safe_log_file_name(command))
        else:
            out_file = os.path.join(reports_dir, safe_log_file_name("__".join(command)))
        with open(out_file, "w") as out_f:
            commands = [sys.executable, setup_script]
            if project.get_property("verbose"):
                commands.append("-v")
            if clean:
                commands.extend(["clean", "--all"])
            if is_string(command):
                commands.extend(command.split())
            else:
                commands.extend(command)
            return_code = _run_process_and_wait(commands, project.expand_path("$dir_dist"), out_f)
            if return_code != 0:
                raise BuildFailedException(
                    "Error while executing setup command %s. See %s for full details:\n%s",
                    command, out_file, tail_log(out_file))
Ejemplo n.º 21
0
def analyze(project, logger):
    """ Applies the frosted script to the sources of the given project. """
    logger.info("Executing frosted on project sources.")

    verbose = project.get_property("verbose")
    project.set_property_if_unset("frosted_verbose_output", verbose)

    command = ExternalCommandBuilder('frosted', project)
    for ignored_error_code in project.get_property('frosted_ignore', []):
        command.use_argument('--ignore={0}'.format(ignored_error_code))

    include_test_sources = project.get_property("frosted_include_test_sources")
    include_scripts = project.get_property("frosted_include_scripts")

    result = command.run_on_production_source_files(
        logger,
        include_test_sources=include_test_sources,
        include_scripts=include_scripts)

    count_of_warnings = len(result.report_lines)
    count_of_errors = len(result.error_report_lines)

    if count_of_errors > 0:
        logger.error('Errors while running frosted, see {0}'.format(
            result.error_report_file))

    if count_of_warnings > 0:
        if project.get_property("frosted_break_build"):
            error_message = "frosted found {0} warning(s)".format(
                count_of_warnings)
            raise BuildFailedException(error_message)
        else:
            logger.warn("frosted found %d warning(s).", count_of_warnings)
Ejemplo n.º 22
0
def run_setup_commands(project, logger, commands):
    reports_dir = project.expand_path("$dir_reports/distutils")
    if not os.path.exists(reports_dir):
        os.mkdir(reports_dir)

    setup_script = project.expand_path("$dir_dist/setup.py")

    for command in commands:
        logger.debug("Executing distutils command %s", command)

        output_file_path = os.path.join(reports_dir, command.replace("/", ""))

        with open(output_file_path, "w") as output_file:
            commandexec = [sys.executable, setup_script]
            commandexec.extend(command.split())
            working_dir = project.expand_path("$dir_dist")
            process = subprocess.Popen(commandexec,
                                       cwd=working_dir,
                                       stdout=output_file,
                                       stderr=output_file,
                                       shell=False)
            return_code = process.wait()
            if return_code != 0:
                raise BuildFailedException(
                    "Error while executing setup command %s, see %s for details"
                    % (command, output_file_path))
Ejemplo n.º 23
0
def run_coverage(project, logger, reactor, execution_prefix, execution_name, target_task, shortest_plan=False):
    logger.info("Collecting coverage information")

    if project.get_property("%s_fork" % execution_prefix) is not None:
        logger.warn(
            "%s_fork is deprecated, coverage always runs in its own fork", execution_prefix)

    if project.get_property("%s_reload_modules" % execution_prefix) is not None:
        logger.warn(
            "%s_reload_modules is deprecated - modules are no longer reloaded", execution_prefix)

    if project.get_property("%s_branch_threshold_warn" % execution_prefix) == 0:
        logger.warn("%s_branch_threshold_warn is 0 and branch coverage will not be checked", execution_prefix)

    if project.get_property("%s_branch_partial_threshold_warn" % execution_prefix) == 0:
        logger.warn("%s_branch_partial_threshold_warn is 0 and partial branch coverage will not be checked",
                    execution_prefix)

    logger.debug("Forking process to do %s analysis", execution_name)
    exit_code, _ = fork_process(logger,
                                target=do_coverage,
                                args=(
                                    project, logger, reactor, execution_prefix, execution_name,
                                    target_task, shortest_plan))
    if exit_code and project.get_property("%s_break_build" % execution_prefix):
        raise BuildFailedException(
            "Forked %s process indicated failure with error code %d" % (execution_name, exit_code))
Ejemplo n.º 24
0
def run_sonar_analysis():
    import subprocess
    result = subprocess.run(
        ['sonar-scanner', '-X'],
        shell=True)  #sonar-scanner must have a link on /bin directory
    if result.returncode != 0:
        raise BuildFailedException("Sonar analysis failed.")
Ejemplo n.º 25
0
def version_from_git_tag(project, logger):
    """ Set project version according git tags"""
    # get git info
    tags, last_commit, repo_is_dirty = _get_repo_info(
        project.get_property('semver_git_tag_repo_dir') if project.
        get_property('semver_git_tag_repo_dir') else project.basedir)
    # get last tag which satisfies SemVer
    last_semver_tag = None
    semver_regex = semver._REGEX  # pylint: disable=protected-access
    tag_list = []
    for tag in reversed(tags):
        tag_list.append(tag.name)
    logger.debug("All git tags: %s." % ','.join(tag_list))
    for tag in reversed(tags):
        match = semver_regex.match(tag.name)
        if match:
            if ((not last_semver_tag)
                    or (semver.compare(tag.name, last_semver_tag.name) == 1)):
                last_semver_tag = tag
    if not last_semver_tag:
        logger.warn("No SemVer git tag found. "
                    "Consider removing plugin pybuilder_semver_git_tag.")
        return
    else:
        logger.info("Found SemVer tag: %s" % last_semver_tag.name)
    # get last commit for HEAD
    # if dirty or last commit isn't equal last tag commit
    # - increase version and add .dev
    if last_commit != last_semver_tag.commit or repo_is_dirty:
        if repo_is_dirty:
            logger.debug("Repo is marked as dirty - use dev version.")
        else:
            logger.debug("Last tag %s has commit %s, "
                         "but last commit is %s - use dev version." %
                         (last_semver_tag.name, str(
                             last_semver_tag.commit), str(last_commit)))
        increase_part = project.get_property('semver_git_tag_increment_part')
        if increase_part == 'major':
            project.version = _add_dev(semver.bump_major(last_semver_tag.name))
        elif increase_part == 'minor':
            project.version = _add_dev(semver.bump_minor(last_semver_tag.name))
        elif increase_part == 'patch':
            project.version = _add_dev(semver.bump_patch(last_semver_tag.name))
        else:
            raise BuildFailedException(
                "Incorrect value for `semver_git_tag_increment_part` property. "
                "Has to be in (`major`, `minor`, `patch`), but %s passed." %
                project.get_property('semver_git_tag_increment_part'))
    # if not dirty and last commit is equal last tag commit
    # - it's release tag
    else:
        project.version = last_semver_tag.name
    # DISTRIBUTION_PROPERTY is also be affected
    project.set_property(
        DISTRIBUTION_PROPERTY,
        "$dir_target/dist/{0}-{1}".format(project.name, project.version))
    logger.info(
        "Project version was changed to: %s, dist_version: %s, %s: %s" %
        (project.version, project.dist_version, DISTRIBUTION_PROPERTY,
         project.get_property(DISTRIBUTION_PROPERTY)))
Ejemplo n.º 26
0
def _get_repo(repo_path):
    try:
        repo = git.Repo(repo_path)
    except (git.InvalidGitRepositoryError, git.NoSuchPathError):
        raise BuildFailedException(
            "Directory `%s` isn't git repository root." % repo_path)
    return repo
Ejemplo n.º 27
0
def build_binary_distribution(project, logger):
    reports_dir = project.expand_path("$dir_reports/distutils")
    if not os.path.exists(reports_dir):
        os.mkdir(reports_dir)

    setup_script = project.expand_path("$dir_dist/setup.py")

    logger.info("Building binary distribution in %s",
                project.expand_path("$dir_dist"))

    commands = as_list(project.get_property("distutils_commands"))

    for command in commands:
        logger.debug("Executing distutils command %s", command)
        output_file_path = os.path.join(reports_dir, command.replace("/", ""))
        with open(output_file_path, "w") as output_file:
            commands = [sys.executable, setup_script]
            commands.extend(command.split())
            process = subprocess.Popen(commands,
                                       cwd=project.expand_path("$dir_dist"),
                                       stdout=output_file,
                                       stderr=output_file,
                                       shell=False)
            return_code = process.wait()
            if return_code != 0:
                raise BuildFailedException(
                    "Error while executing setup command %s, see %s for details" % (command, output_file_path))
Ejemplo n.º 28
0
def _execute_twine(project, logger, command, work_dir, out_file):
    logger.debug("Executing Twine %s", command)
    with open(out_file, "w") as out_f:
        commands = [sys.executable, "-m", "twine"] + command
        return_code = _run_process_and_wait(commands, work_dir, out_f)
        if return_code != 0:
            raise BuildFailedException(
                "Error while executing Twine %s. See %s for full details:\n%s", command, out_file, tail_log(out_file))
Ejemplo n.º 29
0
 def write_report_and_ensure_all_tests_passed(self):
     self.project.write_report("integrationtest.json",
                               render_report(self.test_report))
     self.logger.info("Executed %d integration tests.", self.tests_executed)
     if self.tests_failed:
         raise BuildFailedException(
             "%d of %d integration tests failed." %
             (self.tests_failed, self.tests_executed))
def get_artifact_manager(project: Project) -> ArtifactManager:
    manager_id = project.get_property(ARTIFACT_MANAGER, "S3")
    global artifact_managers
    manager = artifact_managers.get(manager_id)
    if not manager:
        raise BuildFailedException(
            f"Failed to find appropriate artifact manager for {manager_id}")
    return manager