Example #1
0
def fail_on_eslint(*args):
    """
    For our tests, we need the call for diff-quality running eslint reports
    to fail, since that is what is going to fail when we pass in a
    percentage ("p") requirement.
    """
    if "eslint" in args[0]:
        raise BuildFailure('Subprocess return code: 1')
    else:
        return
Example #2
0
File: docs.py Project: ponty/paved
def showhtml():
    """Open your web browser and display the generated html documentation.
    """
    import webbrowser

    # copy from paver
    opts = options
    docroot = path(opts.get('docroot', 'docs'))
    if not docroot.exists():
        raise BuildFailure("Sphinx documentation root (%s) does not exist."
                           % docroot)
    builddir = docroot / opts.get("builddir", ".build")
    # end of copy
    
    builddir=builddir / 'html'
    if not builddir.exists():
        raise BuildFailure("Sphinx build directory (%s) does not exist."
                           % builddir)

    webbrowser.open(builddir / 'index.html')
Example #3
0
def run_pylint(options):
    """
    Run pylint on system code. When violations limit is passed in,
    fail the task if too many violations are found.
    """
    num_violations = 0
    violations_limit = int(getattr(options, 'limit', -1))
    errors = getattr(options, 'errors', False)
    systems = getattr(options, 'system', '').split(',') or ALL_SYSTEMS

    # Make sure the metrics subdirectory exists
    Env.METRICS_DIR.makedirs_p()

    for system in systems:
        # Directory to put the pylint report in.
        # This makes the folder if it doesn't already exist.
        report_dir = (Env.REPORT_DIR / system).makedirs_p()

        flags = []
        if errors:
            flags.append("--errors-only")

        apps_list = ' '.join(top_python_dirs(system))

        pythonpath_prefix = (
            "PYTHONPATH={system}/djangoapps:common/djangoapps:common/lib".
            format(system=system))

        sh("{pythonpath_prefix} pylint {flags} --msg-template={msg_template} {apps} | "
           "tee {report_dir}/pylint.report".format(
               pythonpath_prefix=pythonpath_prefix,
               flags=" ".join(flags),
               msg_template=
               '"{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"',
               apps=apps_list,
               report_dir=report_dir))

        num_violations += _count_pylint_violations(
            "{report_dir}/pylint.report".format(report_dir=report_dir))

    # Print number of violations to log
    violations_count_str = "Number of pylint violations: " + str(
        num_violations)
    print violations_count_str

    # Also write the number of violations to a file
    with open(Env.METRICS_DIR / "pylint", "w") as f:
        f.write(violations_count_str)

    # Fail number of violations is greater than the limit
    if num_violations > violations_limit > -1:
        raise BuildFailure("Failed. Too many pylint violations. "
                           "The limit is {violations_limit}.".format(
                               violations_limit=violations_limit))
Example #4
0
def verify_files_exist(files):
    """
    Verify that the files were created.
    This will us help notice/prevent breakages due to
    changes to the bash script file.
    """
    for file_name in files:
        file_path = os.path.join(CACHE_FOLDER, file_name)
        if not os.path.isfile(file_path):
            msg = u"Did not find expected file: {}".format(file_path)
            raise BuildFailure(msg)
Example #5
0
def _get_last_report_line(filename):
    """
    Returns the last line of a given file. Used for getting output from quality output files.
    """
    file_not_found_message = "The following log file could not be found: {file}".format(file=filename)
    if os.path.isfile(filename):
        with open(filename, 'r') as report_file:
            lines = report_file.readlines()
            return lines[len(lines) - 1]
    else:
        # Raise a build error if the file is not found
        raise BuildFailure(file_not_found_message)
def fail_on_eslint(*args, **kwargs):
    """
    For our tests, we need the call for diff-quality running eslint reports
    to fail, since that is what is going to fail when we pass in a
    percentage ("p") requirement.
    """
    if "eslint" in args[0]:  # lint-amnesty, pylint: disable=no-else-raise
        raise BuildFailure('Subprocess return code: 1')
    else:
        if kwargs.get('capture', False):
            return uuid4().hex
        else:
            return
Example #7
0
File: docs.py Project: ponty/paved
def showpdf(options, info):
    """Display the generated pdf documentation.
    """

    pdfdir = pdfdir_path()
    if not pdfdir.exists():
        raise BuildFailure("Sphinx PDF build directory (%s) does not exist."
                           % pdfdir)

    pdf = find_pdf_file()
    if not pdf:
        raise BuildFailure("Sphinx PDF build directory (%s) has no pdf files."
                           % pdfdir)
    info('opening %s' % pdf)
    if sys.platform == "win32":
        # TODO: test
        sh('start "%s"' % pdf)
    elif sys.platform == "darwin":
        # TODO: test
        sh('open "%s"' % pdf)
    elif sys.platform == "linux2":
        sh('xdg-open "%s"' % pdf)
Example #8
0
def run_pylint(options):
    """
    Run pylint on system code. When violations limit is passed in,
    fail the task if too many violations are found.
    """
    lower_violations_limit, upper_violations_limit, errors, systems = _parse_pylint_options(options)
    errors = getattr(options, 'errors', False)
    systems = getattr(options, 'system', ALL_SYSTEMS).split(',')

    num_violations, violations_list = _get_pylint_violations(systems, errors)

    # Print number of violations to log
    violations_count_str = "Number of pylint violations: " + str(num_violations)
    print violations_count_str

    # Also write the number of violations to a file
    with open(Env.METRICS_DIR / "pylint", "w") as f:
        f.write(violations_count_str)

    # Fail when number of violations is less than the lower limit,
    # which likely means that pylint did not run successfully.
    # If pylint *did* run successfully, then great! Modify the lower limit.
    if num_violations < lower_violations_limit > -1:
        raise BuildFailure(
            "FAILURE: Too few pylint violations. "
            "Expected to see at least {lower_limit} pylint violations. "
            "Either pylint is not running correctly -or- "
            "the limits should be lowered and/or the lower limit should be removed.".format(
                lower_limit=lower_violations_limit
            )
        )

    # Fail when number of violations is greater than the upper limit.
    if num_violations > upper_violations_limit > -1:
        raise BuildFailure(
            "FAILURE: Too many pylint violations. "
            "The limit is {upper_limit}.".format(upper_limit=upper_violations_limit)
        )
Example #9
0
def run_eslint(options):
    """
    Runs eslint on static asset directories.
    If limit option is passed, fails build if more violations than the limit are found.
    """

    eslint_report_dir = (Env.REPORT_DIR / "eslint")
    eslint_report = eslint_report_dir / "eslint.report"
    _prepare_report_dir(eslint_report_dir)
    violations_limit = int(getattr(options, 'limit', -1))

    sh(
        "nodejs --max_old_space_size=4096 node_modules/.bin/eslint "
        "--ext .js --ext .jsx --format=compact . | tee {eslint_report}".format(
            eslint_report=eslint_report
        ),
        ignore_error=True
    )

    try:
        num_violations = int(_get_count_from_last_line(eslint_report, "eslint"))
    except TypeError:
        raise BuildFailure(
            "FAILURE: Number of eslint violations could not be found in {eslint_report}".format(
                eslint_report=eslint_report
            )
        )

    # Record the metric
    _write_metric(num_violations, (Env.METRICS_DIR / "eslint"))

    # Fail if number of violations is greater than the limit
    if num_violations > violations_limit > -1:
        raise BuildFailure(
            "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format(
                count=num_violations, violations_limit=violations_limit
            )
        )
Example #10
0
def run_jshint(options):
    """
    Runs jshint on static asset directories
    """

    violations_limit = int(getattr(options, 'limit', -1))

    jshint_report_dir = (Env.REPORT_DIR / "jshint")
    jshint_report = jshint_report_dir / "jshint.report"
    _prepare_report_dir(jshint_report_dir)

    sh(
        "jshint . --config .jshintrc >> {jshint_report}".format(
            jshint_report=jshint_report
        ),
        ignore_error=True
    )

    try:
        num_violations = int(_get_count_from_last_line(jshint_report, "jshint"))
    except TypeError:
        raise BuildFailure(
            "Error. Number of jshint violations could not be found in {jshint_report}".format(
                jshint_report=jshint_report
            )
        )

    # Record the metric
    _write_metric(num_violations, (Env.METRICS_DIR / "jshint"))

    # Fail if number of violations is greater than the limit
    if num_violations > violations_limit > -1:
        raise BuildFailure(
            "JSHint Failed. Too many violations ({count}).\nThe limit is {violations_limit}.".format(
                count=num_violations, violations_limit=violations_limit
            )
        )
Example #11
0
def run_quality(options):
    """
    Build the html diff quality reports, and print the reports to the console.
    :param: b, the branch to compare against, defaults to origin/master
    :param: p, diff-quality will fail if the quality percentage calculated is
        below this percentage. For example, if p is set to 80, and diff-quality finds
        quality of the branch vs the compare branch is less than 80%, then this task will fail.
        This threshold would be applied to both pep8 and pylint.
    """

    # Directory to put the diff reports in.
    # This makes the folder if it doesn't already exist.
    dquality_dir = (Env.REPORT_DIR / "diff_quality").makedirs_p()
    diff_quality_percentage_failure = False

    # Set the string, if needed, to be used for the diff-quality --compare-branch switch.
    compare_branch = getattr(options, 'compare_branch', None)
    compare_branch_string = ''
    if compare_branch:
        compare_branch_string = '--compare-branch={0}'.format(compare_branch)

    # Set the string, if needed, to be used for the diff-quality --fail-under switch.
    diff_threshold = int(getattr(options, 'percentage', -1))
    percentage_string = ''
    if diff_threshold > -1:
        percentage_string = '--fail-under={0}'.format(diff_threshold)

    # Generate diff-quality html report for pep8, and print to console
    # If pep8 reports exist, use those
    # Otherwise, `diff-quality` will call pep8 itself

    pep8_files = get_violations_reports("pep8")
    pep8_reports = u' '.join(pep8_files)

    try:
        sh(
            "diff-quality --violations=pep8 {pep8_reports} {percentage_string} "
            "{compare_branch_string} --html-report {dquality_dir}/diff_quality_pep8.html".format(
                pep8_reports=pep8_reports,
                percentage_string=percentage_string,
                compare_branch_string=compare_branch_string,
                dquality_dir=dquality_dir
            )
        )
    except BuildFailure, error_message:
        if is_percentage_failure(error_message):
            diff_quality_percentage_failure = True
        else:
            raise BuildFailure(error_message)
Example #12
0
def get_file_from_s3(bucket_name, zipfile_name, path):
    """
    Get the file from s3 and save it to disk.
    """
    print(u"Retrieving {} from bucket {}.".format(zipfile_name, bucket_name))
    conn = boto.connect_s3(anon=True)
    bucket = conn.get_bucket(bucket_name)
    key = boto.s3.key.Key(bucket=bucket, name=zipfile_name)
    if not key.exists():
        msg = u"Did not find expected file {} in the S3 bucket {}".format(
            zipfile_name, bucket_name)
        raise BuildFailure(msg)

    zipfile_path = os.path.join(path, zipfile_name)
    key.get_contents_to_filename(zipfile_path)
Example #13
0
    def runpipe():
        kwargs = {'shell': True, 'cwd': cwd}
        if capture:
            kwargs['stderr'] = subprocess.STDOUT
            kwargs['stdout'] = subprocess.PIPE
        p = subprocess.Popen(command, **kwargs)
        p_stdout = p.communicate()[0]
        if p_stdout is not None:
            p_stdout = p_stdout.decode(sys.getdefaultencoding())
        if p.returncode and not ignore_error:
            if capture and p_stdout is not None:
                error(p_stdout)
            raise BuildFailure("Subprocess return code: %d" % p.returncode)

        if capture:
            return p_stdout
Example #14
0
def node_prereqs_installation():
    """
    Configures npm and installs Node prerequisites
    """
    cb_error_text = "Subprocess return code: 1"

    # Error handling around a race condition that produces "cb() never called" error. This
    # evinces itself as `cb_error_text` and it ought to disappear when we upgrade
    # npm to 3 or higher. TODO: clean this up when we do that.
    try:
        sh('npm install')
    except BuildFailure, error_text:
        if cb_error_text in error_text:
            print "npm install error detected. Retrying..."
            sh('npm install')
        else:
            raise BuildFailure(error_text)
Example #15
0
def run_quality(options):
    """
    Build the html diff quality reports, and print the reports to the console.
    :param: p, diff-quality will fail if the quality percentage calculated is
        below this percentage. For example, if p is set to 80, and diff-quality finds
        quality of the branch vs master is less than 80%, then this task will fail.
        This threshold would be applied to both pep8 and pylint.
    """

    # Directory to put the diff reports in.
    # This makes the folder if it doesn't already exist.
    dquality_dir = (Env.REPORT_DIR / "diff_quality").makedirs_p()
    diff_quality_percentage_failure = False

    # Set the string, if needed, to be used for the diff-quality --fail-under switch.
    diff_threshold = int(getattr(options, 'percentage', -1))
    percentage_string = ''
    if diff_threshold > -1:
        percentage_string = '--fail-under={0}'.format(diff_threshold)

    # Generate diff-quality html report for pep8, and print to console
    # If pep8 reports exist, use those
    # Otherwise, `diff-quality` will call pep8 itself

    pep8_files = []
    for subdir, _dirs, files in os.walk(os.path.join(Env.REPORT_DIR)):
        for f in files:
            if f == "pep8.report":
                pep8_files.append(os.path.join(subdir, f))

    pep8_reports = u' '.join(pep8_files)

    try:
        sh("diff-quality --violations=pep8 {pep8_reports} {percentage_string} "
           "--html-report {dquality_dir}/diff_quality_pep8.html".format(
               pep8_reports=pep8_reports,
               percentage_string=percentage_string,
               dquality_dir=dquality_dir))
    except BuildFailure, error_message:
        if is_percentage_failure(error_message):
            diff_quality_percentage_failure = True
        else:
            raise BuildFailure(error_message)
Example #16
0
def node_prereqs_installation():
    """
    Configures npm and installs Node prerequisites
    """
    @timeout(limit=600)
    def _run_npm_command(npm_command, npm_log_file):
        """
        helper function for running the npm installation with a timeout.
        The implementation of Paver's `sh` function returns before the forked
        actually returns. Using a Popen object so that we can ensure that
        the forked process has returned
        """
        proc = subprocess.Popen(npm_command, stderr=npm_log_file)
        proc.wait()

    # NPM installs hang sporadically. Log the installation process so that we
    # determine if any packages are chronic offenders.
    shard_str = os.getenv('SHARD', None)
    if shard_str:
        npm_log_file_path = '{}/npm-install.{}.log'.format(
            Env.GEN_LOG_DIR, shard_str)
    else:
        npm_log_file_path = '{}/npm-install.log'.format(Env.GEN_LOG_DIR)
    npm_log_file = io.open(npm_log_file_path, 'wb')
    npm_command = 'npm install --verbose'.split()

    cb_error_text = "Subprocess return code: 1"

    # Error handling around a race condition that produces "cb() never called" error. This
    # evinces itself as `cb_error_text` and it ought to disappear when we upgrade
    # npm to 3 or higher. TODO: clean this up when we do that.
    try:
        _run_npm_command(npm_command, npm_log_file)
    except TimeoutException:
        print "NPM installation took too long. Exiting..."
        print "Check {} for more information".format(npm_log_file_path)
        sys.exit(1)
    except BuildFailure, error_text:
        if cb_error_text in error_text:
            print "npm install error detected. Retrying..."
            _run_npm_command(npm_command, npm_log_file)
        else:
            raise BuildFailure(error_text)
Example #17
0
def run_stylelint(options):
    """
    Runs stylelint on Sass files.
    If limit option is passed, fails build if more violations than the limit are found.
    """
    violations_limit = int(getattr(options, 'limit', -1))
    num_violations = _get_stylelint_violations()

    # Record the metric
    _write_metric(num_violations, (Env.METRICS_DIR / "stylelint"))

    # Fail if number of violations is greater than the limit
    if num_violations > violations_limit > -1:
        raise BuildFailure(
            "Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}."
            .format(
                count=num_violations,
                violations_limit=violations_limit,
            ))
Example #18
0
def run_pep8(options):
    """
    Run pep8 on system code.
    Fail the task if any violations are found.
    """
    violations_limit = int(getattr(options, 'limit', -1))

    sh('pep8 . | tee {report_dir}/pep8.report'.format(report_dir=REPORTS_DIR))

    num_violations, __ = _count_pep8_violations(
        '{report_dir}/pep8.report'.format(report_dir=REPORTS_DIR)
    )

    violations_message = violation_message('pep8', violations_limit, num_violations)
    print violations_message

    # Fail if number of violations is greater than the limit
    if num_violations > violations_limit > -1:
        raise BuildFailure(violations_message)
Example #19
0
def node_prereqs_installation():
    """
    Configures npm and installs Node prerequisites
    """
    cb_error_text = "Subprocess return code: 1"
    sh("test `npm config get registry` = \"{reg}\" || "
       "(echo setting registry; npm config set registry"
       " {reg})".format(reg=NPM_REGISTRY))

    # Error handling around a race condition that produces "cb() never called" error. This
    # evinces itself as `cb_error_text` and it ought to disappear when we upgrade
    # npm to 3 or higher. TODO: clean this up when we do that.
    try:
        sh('npm install')
    except BuildFailure, error_text:
        if cb_error_text in error_text:
            print "npm install error detected. Retrying..."
            sh('npm install')
        else:
            raise BuildFailure(error_text)
Example #20
0
def wl_test_config():

    # Make sure environment variables are set.
    env_vars = ['GLOBAL_PASSWORD']
    for env_var in env_vars:
        try:
            os.environ[env_var]
        except Exception:
            raise BuildFailure("Please set the environment variable :" +
                               env_var)

    # Set environment variables for screen shots.
    os.environ['NEEDLE_OUTPUT_DIR'] = SCREENSHOT_DIR
    os.environ['NEEDLE_BASELINE_DIR'] = BASELINE_DIR
    os.environ['UPLOAD_FILE_DIR'] = UPLOAD_FILE_DIR

    # Create log directory
    LOG_DIR.makedirs_p()

    # Create report directory
    REPORT_DIR.makedirs_p()
Example #21
0
def _get_stylelint_violations():
    """
    Returns the number of Stylelint violations.
    """
    stylelint_report_dir = (Env.REPORT_DIR / "stylelint")
    stylelint_report = stylelint_report_dir / "stylelint.report"
    _prepare_report_dir(stylelint_report_dir)
    formatter = 'node_modules/stylelint-formatter-pretty'

    sh("stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}"
       .format(
           formatter=formatter,
           stylelint_report=stylelint_report,
       ),
       ignore_error=True)

    try:
        return int(_get_count_from_last_line(stylelint_report, "stylelint"))
    except TypeError:
        raise BuildFailure(
            "Error. Number of stylelint violations could not be found in {stylelint_report}"
            .format(stylelint_report=stylelint_report))
Example #22
0
def run_xsscommitlint():
    """
    Runs xss-commit-linter.sh on the current branch.
    """
    xsscommitlint_script = "xss-commit-linter.sh"
    xsscommitlint_report_dir = (Env.REPORT_DIR / "xsscommitlint")
    xsscommitlint_report = xsscommitlint_report_dir / "xsscommitlint.report"
    _prepare_report_dir(xsscommitlint_report_dir)

    sh(
        "{repo_root}/scripts/{xsscommitlint_script} | tee {xsscommitlint_report}".format(
            repo_root=Env.REPO_ROOT,
            xsscommitlint_script=xsscommitlint_script,
            xsscommitlint_report=xsscommitlint_report,
        ),
        ignore_error=True
    )

    xsscommitlint_count = _get_xsscommitlint_count(xsscommitlint_report)

    try:
        num_violations = int(xsscommitlint_count)
    except TypeError:
        raise BuildFailure(
            "FAILURE: Number of {xsscommitlint_script} violations could not be found in {xsscommitlint_report}".format(
                xsscommitlint_script=xsscommitlint_script, xsscommitlint_report=xsscommitlint_report
            )
        )

    # Print number of violations to log.
    violations_count_str = "Number of {xsscommitlint_script} violations: {num_violations}\n".format(
        xsscommitlint_script=xsscommitlint_script, num_violations=num_violations
    )

    # Record the metric
    metrics_report = (Env.METRICS_DIR / "xsscommitlint")
    _write_metric(violations_count_str, metrics_report)
    # Output report to console.
    sh("cat {metrics_report}".format(metrics_report=metrics_report), ignore_error=True)
Example #23
0
def run_pep8(options):  # pylint: disable=unused-argument
    """
    Run pep8 on system code.
    Fail the task if any violations are found.
    """
    (count, violations_list) = _get_pep8_violations()
    violations_list = ''.join(violations_list)

    # Print number of violations to log
    violations_count_str = "Number of pep8 violations: {count}".format(count=count)
    print violations_count_str
    print violations_list

    # Also write the number of violations to a file
    with open(Env.METRICS_DIR / "pep8", "w") as f:
        f.write(violations_count_str + '\n\n')
        f.write(violations_list)

    # Fail if any violations are found
    if count:
        failure_string = "FAILURE: Too many pep8 violations. " + violations_count_str
        failure_string += "\n\nViolations:\n{violations_list}".format(violations_list=violations_list)
        raise BuildFailure(failure_string)
Example #24
0
def node_prereqs_installation():
    """
    Configures npm and installs Node prerequisites
    """

    # NPM installs hang sporadically. Log the installation process so that we
    # determine if any packages are chronic offenders.
    shard_str = os.getenv('SHARD', None)
    if shard_str:
        npm_log_file_path = '{}/npm-install.{}.log'.format(
            Env.GEN_LOG_DIR, shard_str)
    else:
        npm_log_file_path = '{}/npm-install.log'.format(Env.GEN_LOG_DIR)
    npm_log_file = io.open(npm_log_file_path, 'wb')
    npm_command = 'npm install --verbose'.split()

    cb_error_text = "Subprocess return code: 1"

    # Error handling around a race condition that produces "cb() never called" error. This
    # evinces itself as `cb_error_text` and it ought to disappear when we upgrade
    # npm to 3 or higher. TODO: clean this up when we do that.
    try:
        # The implementation of Paver's `sh` function returns before the forked
        # actually returns. Using a Popen object so that we can ensure that
        # the forked process has returned
        proc = subprocess.Popen(npm_command, stderr=npm_log_file)
        proc.wait()
    except BuildFailure as error_text:
        if cb_error_text in error_text:
            print("npm install error detected. Retrying...")
            proc = subprocess.Popen(npm_command, stderr=npm_log_file)
            proc.wait()
        else:
            raise BuildFailure(error_text)
    print(u"Successfully installed NPM packages. Log found at {}".format(
        npm_log_file_path))
Example #25
0
def _create_bootstrap(script_name,
                      packages_to_install,
                      paver_command_line,
                      install_paver=True,
                      more_text="",
                      dest_dir='.',
                      no_site_packages=None,
                      system_site_packages=None,
                      unzip_setuptools=False,
                      distribute=None,
                      index_url=None,
                      trusted_host=None,
                      no_index=False,
                      find_links=None,
                      prefer_easy_install=False):
    # configure package installation template
    install_cmd_options = []
    if index_url:
        install_cmd_options.extend(['--index-url', index_url])
    if trusted_host:
        install_cmd_options.extend(['--trusted-host', trusted_host])
    if no_index:
        install_cmd_options.extend(['--no-index'])
    if find_links:
        for link in find_links:
            install_cmd_options.extend(['--find-links', link])
    install_cmd_tmpl = (_easy_install_tmpl if prefer_easy_install else
                        _pip_then_easy_install_tmpl)
    confd_install_cmd_tmpl = (install_cmd_tmpl % {
        'bin_dir_var': 'bin_dir',
        'cmd_options': install_cmd_options
    })
    # make copy to local scope to add paver to packages to install
    packages_to_install = packages_to_install[:]
    if install_paver:
        packages_to_install.insert(0, 'paver==%s' % setup_meta['version'])
    install_cmd = confd_install_cmd_tmpl % {'packages': packages_to_install}

    options = ""
    # if deprecated 'no_site_packages' was specified and 'system_site_packages'
    # wasn't, set it from that value
    if system_site_packages is None and no_site_packages is not None:
        system_site_packages = not no_site_packages
    if system_site_packages is not None:
        options += ("    options.system_site_packages = %s\n" %
                    bool(system_site_packages))
    if unzip_setuptools:
        options += "    options.unzip_setuptools = True\n"
    if distribute is not None:
        options += "    options.use_distribute = %s\n" % bool(distribute)
    options += "\n"

    extra_text = """def adjust_options(options, args):
    args[:] = ['%s']
%s
def after_install(options, home_dir):
    if sys.platform == 'win32':
        bin_dir = join(home_dir, 'Scripts')
    else:
        bin_dir = join(home_dir, 'bin')
%s""" % (dest_dir, options, install_cmd)
    if paver_command_line:
        command_list = list(paver_command_line.split())
        extra_text += "    subprocess.call([join(bin_dir, 'paver'),%s)" % repr(
            command_list)[1:]

    extra_text += more_text
    if has_virtualenv:
        bootstrap_contents = venv.create_bootstrap_script(extra_text)
    else:
        raise BuildFailure(VIRTUALENV_MISSING_ERROR)
    fn = script_name

    debug("Bootstrap script extra text: " + extra_text)

    def write_script():
        open(fn, "w").write(bootstrap_contents)

    dry("Write bootstrap script %s" % fn, write_script)
Example #26
0
def run_quality(options):
    """
    Build the html diff quality reports, and print the reports to the console.
    :param: b, the branch to compare against, defaults to origin/master
    :param: p, diff-quality will fail if the quality percentage calculated is
        below this percentage. For example, if p is set to 80, and diff-quality finds
        quality of the branch vs the compare branch is less than 80%, then this task will fail.
        This threshold would be applied to both pep8 and pylint.
    """
    # Directory to put the diff reports in.
    # This makes the folder if it doesn't already exist.
    dquality_dir = (Env.REPORT_DIR / "diff_quality").makedirs_p()

    # Save the pass variable. It will be set to false later if failures are detected.
    diff_quality_percentage_pass = True

    def _pep8_output(count, violations_list, is_html=False):
        """
        Given a count & list of pep8 violations, pretty-print the pep8 output.
        If `is_html`, will print out with HTML markup.
        """
        if is_html:
            lines = ['<body>\n']
            sep = '-------------<br/>\n'
            title = "<h1>Quality Report: pep8</h1>\n"
            violations_bullets = ''.join([
                HTML('<li>{violation}</li><br/>\n').format(violation=violation)
                for violation in violations_list
            ])
            violations_str = HTML('<ul>\n{bullets}</ul>\n').format(
                bullets=HTML(violations_bullets))
            violations_count_str = "<b>Violations</b>: {count}<br/>\n"
            fail_line = "<b>FAILURE</b>: pep8 count should be 0<br/>\n"
        else:
            lines = []
            sep = '-------------\n'
            title = "Quality Report: pep8\n"
            violations_str = ''.join(violations_list)
            violations_count_str = "Violations: {count}\n"
            fail_line = "FAILURE: pep8 count should be 0\n"

        violations_count_str = violations_count_str.format(count=count)

        lines.extend(
            [sep, title, sep, violations_str, sep, violations_count_str])

        if count > 0:
            lines.append(fail_line)
        lines.append(sep + '\n')
        if is_html:
            lines.append('</body>')

        return ''.join(lines)

    # Run pep8 directly since we have 0 violations on master
    (count, violations_list) = _get_pep8_violations()

    # Print number of violations to log
    print _pep8_output(count, violations_list)

    # Also write the number of violations to a file
    with open(dquality_dir / "diff_quality_pep8.html", "w") as f:
        f.write(_pep8_output(count, violations_list, is_html=True))

    if count > 0:
        diff_quality_percentage_pass = False

    # ----- Set up for diff-quality pylint call -----
    # Set the string, if needed, to be used for the diff-quality --compare-branch switch.
    compare_branch = getattr(options, 'compare_branch', None)
    compare_branch_string = u''
    if compare_branch:
        compare_branch_string = u'--compare-branch={0}'.format(compare_branch)

    # Set the string, if needed, to be used for the diff-quality --fail-under switch.
    diff_threshold = int(getattr(options, 'percentage', -1))
    percentage_string = u''
    if diff_threshold > -1:
        percentage_string = u'--fail-under={0}'.format(diff_threshold)

    # Generate diff-quality html report for pylint, and print to console
    # If pylint reports exist, use those
    # Otherwise, `diff-quality` will call pylint itself

    pylint_files = get_violations_reports("pylint")
    pylint_reports = u' '.join(pylint_files)

    eslint_files = get_violations_reports("eslint")
    eslint_reports = u' '.join(eslint_files)

    pythonpath_prefix = (
        "PYTHONPATH=$PYTHONPATH:lms:lms/djangoapps:cms:cms/djangoapps:"
        "common:common/djangoapps:common/lib")

    # run diff-quality for pylint.
    if not run_diff_quality(violations_type="pylint",
                            prefix=pythonpath_prefix,
                            reports=pylint_reports,
                            percentage_string=percentage_string,
                            branch_string=compare_branch_string,
                            dquality_dir=dquality_dir):
        diff_quality_percentage_pass = False

    # run diff-quality for eslint.
    if not run_diff_quality(violations_type="eslint",
                            prefix=pythonpath_prefix,
                            reports=eslint_reports,
                            percentage_string=percentage_string,
                            branch_string=compare_branch_string,
                            dquality_dir=dquality_dir):
        diff_quality_percentage_pass = False

    # If one of the quality runs fails, then paver exits with an error when it is finished
    if not diff_quality_percentage_pass:
        raise BuildFailure("Diff-quality failure(s).")
Example #27
0
def run_xsslint(options):
    """
    Runs xss_linter.py on the codebase
    """

    thresholds_option = getattr(options, 'thresholds', '{}')
    try:
        violation_thresholds = json.loads(thresholds_option)
    except ValueError:
        violation_thresholds = None
    if isinstance(violation_thresholds, dict) is False or \
            any(key not in ("total", "rules") for key in violation_thresholds.keys()):

        raise BuildFailure(
            """Error. Thresholds option "{thresholds_option}" was not supplied using proper format.\n"""
            """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """
            """with property names in double-quotes.""".format(
                thresholds_option=thresholds_option))

    xsslint_script = "xss_linter.py"
    xsslint_report_dir = (Env.REPORT_DIR / "xsslint")
    xsslint_report = xsslint_report_dir / "xsslint.report"
    _prepare_report_dir(xsslint_report_dir)

    sh("{repo_root}/scripts/{xsslint_script} --rule-totals >> {xsslint_report}"
       .format(
           repo_root=Env.REPO_ROOT,
           xsslint_script=xsslint_script,
           xsslint_report=xsslint_report,
       ),
       ignore_error=True)

    xsslint_counts = _get_xsslint_counts(xsslint_report)

    try:
        metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format(
            xsslint_script=xsslint_script,
            num_violations=int(xsslint_counts['total']))
        if 'rules' in xsslint_counts and any(xsslint_counts['rules']):
            metrics_str += "\n"
            rule_keys = sorted(xsslint_counts['rules'].keys())
            for rule in rule_keys:
                metrics_str += "{rule} violations: {count}\n".format(
                    rule=rule, count=int(xsslint_counts['rules'][rule]))
    except TypeError:
        raise BuildFailure(
            "Error. Number of {xsslint_script} violations could not be found in {xsslint_report}"
            .format(xsslint_script=xsslint_script,
                    xsslint_report=xsslint_report))

    metrics_report = (Env.METRICS_DIR / "xsslint")
    # Record the metric
    _write_metric(metrics_str, metrics_report)
    # Print number of violations to log.
    sh("cat {metrics_report}".format(metrics_report=metrics_report),
       ignore_error=True)

    error_message = ""

    # Test total violations against threshold.
    if 'total' in violation_thresholds.keys():
        if violation_thresholds['total'] < xsslint_counts['total']:
            error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format(
                count=xsslint_counts['total'],
                violations_limit=violation_thresholds['total'])

    # Test rule violations against thresholds.
    if 'rules' in violation_thresholds:
        threshold_keys = sorted(violation_thresholds['rules'].keys())
        for threshold_key in threshold_keys:
            if threshold_key not in xsslint_counts['rules']:
                error_message += (
                    "\nNumber of {xsslint_script} violations for {rule} could not be found in "
                    "{xsslint_report}.").format(xsslint_script=xsslint_script,
                                                rule=threshold_key,
                                                xsslint_report=xsslint_report)
            elif violation_thresholds['rules'][threshold_key] < xsslint_counts[
                    'rules'][threshold_key]:
                error_message += \
                    "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format(
                        rule=threshold_key, count=xsslint_counts['rules'][threshold_key],
                        violations_limit=violation_thresholds['rules'][threshold_key],
                    )

    if error_message is not "":
        raise BuildFailure(
            "XSSLinter Failed.\n{error_message}\n"
            "See {xsslint_report} or run the following command to hone in on the problem:\n"
            "  ./scripts/xss-commit-linter.sh -h".format(
                error_message=error_message, xsslint_report=xsslint_report))
Example #28
0
        sh(
            "{pythonpath_prefix} diff-quality --violations=pylint "
            "{pylint_reports} {percentage_string} {compare_branch_string} "
            "--html-report {dquality_dir}/diff_quality_pylint.html ".format(
                pythonpath_prefix=pythonpath_prefix,
                pylint_reports=pylint_reports,
                percentage_string=percentage_string,
                compare_branch_string=compare_branch_string,
                dquality_dir=dquality_dir,
            )
        )
    except BuildFailure, error_message:
        if is_percentage_failure(error_message):
            diff_quality_percentage_failure = True
        else:
            raise BuildFailure(error_message)

    # If one of the diff-quality runs fails, then paver exits with an error when it is finished
    if diff_quality_percentage_failure:
        raise BuildFailure("Diff-quality failure(s).")


def is_percentage_failure(error_message):
    """
    When diff-quality is run with a threshold percentage, it ends with an exit code of 1. This bubbles up to
    paver with a subprocess return code error. If the subprocess exits with anything other than 1, raise
    a paver exception.
    """
    if "Subprocess return code: 1" not in error_message:
        return False
    else:
Example #29
0
def run_pylint(options):
    """
    Run pylint on system code. When violations limit is passed in,
    fail the task if too many violations are found.
    """
    lower_violations_limit, upper_violations_limit, errors, systems = _parse_pylint_options(
        options)

    # Make sure the metrics subdirectory exists
    Env.METRICS_DIR.makedirs_p()

    num_violations = 0
    for system in systems:
        # Directory to put the pylint report in.
        # This makes the folder if it doesn't already exist.
        report_dir = (Env.REPORT_DIR / system).makedirs_p()

        flags = []
        if errors:
            flags.append("--errors-only")

        apps_list = ' '.join(top_python_dirs(system))

        pythonpath_prefix = (
            "PYTHONPATH={system}/djangoapps:common/djangoapps:common/lib".
            format(system=system))

        sh("{pythonpath_prefix} pylint {flags} --msg-template={msg_template} {apps} | "
           "tee {report_dir}/pylint.report".format(
               pythonpath_prefix=pythonpath_prefix,
               flags=" ".join(flags),
               msg_template=
               '"{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"',
               apps=apps_list,
               report_dir=report_dir))

        num_violations += _count_pylint_violations(
            "{report_dir}/pylint.report".format(report_dir=report_dir))

    # Print number of violations to log
    violations_count_str = "Number of pylint violations: " + str(
        num_violations)
    print violations_count_str

    # Also write the number of violations to a file
    with open(Env.METRICS_DIR / "pylint", "w") as f:
        f.write(violations_count_str)

    # Fail when number of violations is less than the lower limit,
    # which likely means that pylint did not run successfully.
    # If pylint *did* run successfully, then great! Modify the lower limit.
    if num_violations < lower_violations_limit > -1:
        raise BuildFailure(
            "Failed. Too few pylint violations. "
            "Expected to see at least {lower_limit} pylint violations. "
            "Either pylint is not running correctly -or- "
            "the limits should be lowered and/or the lower limit should be removed."
            .format(lower_limit=lower_violations_limit))

    # Fail when number of violations is greater than the upper limit.
    if num_violations > upper_violations_limit > -1:
        raise BuildFailure("Failed. Too many pylint violations. "
                           "The limit is {upper_limit}.".format(
                               upper_limit=upper_violations_limit))
Example #30
0
           "--html-report {dquality_dir}/diff_quality_pylint.html ".format(
               pythonpath_prefix=pythonpath_prefix,
               pylint_reports=pylint_reports,
               percentage_string=percentage_string,
               compare_branch_string=compare_branch_string,
               dquality_dir=dquality_dir,
           ))
    except BuildFailure, error_message:
        if is_percentage_failure(error_message):
            diff_quality_percentage_failure = True
        else:
            raise BuildFailure(error_message)

    # If one of the diff-quality runs fails, then paver exits with an error when it is finished
    if diff_quality_percentage_failure:
        raise BuildFailure("Diff-quality failure(s).")


def is_percentage_failure(error_message):
    """
    When diff-quality is run with a threshold percentage, it ends with an exit code of 1. This bubbles up to
    paver with a subprocess return code error. If the subprocess exits with anything other than 1, raise
    a paver exception.
    """
    if "Subprocess return code: 1" not in error_message:
        return False
    else:
        return True


def get_violations_reports(violations_type):