Exemplo n.º 1
0
    def _lint_py_files(self, config_pylint, config_pycodestyle):
        """Prints a list of lint errors in the given list of Python files.

        Args:
            config_pylint: str. Path to the .pylintrc file.
            config_pycodestyle: str. Path to the tox.ini file.

        Return:
            summary_messages: list(str). Summary messages of lint check.
        """
        files_to_lint = self.all_filepaths
        start_time = time.time()
        are_there_errors = False
        summary_messages = []

        num_py_files = len(files_to_lint)

        python_utils.PRINT('Linting %s Python files' % num_py_files)

        _batch_size = 50
        current_batch_start_index = 0
        stdout = python_utils.string_io()

        while current_batch_start_index < len(files_to_lint):
            # Note that this index is an exclusive upper bound -- i.e.,
            # the current batch of files ranges from 'start_index' to
            # 'end_index - 1'.
            current_batch_end_index = min(
                current_batch_start_index + _batch_size, len(files_to_lint))
            current_files_to_lint = files_to_lint[
                current_batch_start_index:current_batch_end_index]
            if self.verbose_mode_enabled:
                python_utils.PRINT(
                    'Linting Python files %s to %s...' %
                    (current_batch_start_index + 1, current_batch_end_index))

            with linter_utils.redirect_stdout(stdout):
                # This line invokes Pylint and prints its output
                # to the target stdout.
                pylinter = lint.Run(current_files_to_lint + [config_pylint],
                                    exit=False).linter
                # These lines invoke Pycodestyle and print its output
                # to the target stdout.
                style_guide = pycodestyle.StyleGuide(
                    config_file=config_pycodestyle)
                pycodestyle_report = style_guide.check_files(
                    paths=current_files_to_lint)

            if pylinter.msg_status != 0 or pycodestyle_report.get_count() != 0:
                summary_message = stdout.getvalue()
                python_utils.PRINT(summary_message)
                summary_messages.append(summary_message)
                are_there_errors = True

            current_batch_start_index = current_batch_end_index

        if are_there_errors:
            summary_message = ('%s    Python linting failed' %
                               (_MESSAGE_TYPE_FAILED))
        else:
            summary_message = ('%s   %s Python files linted (%.1f secs)' %
                               (_MESSAGE_TYPE_SUCCESS, num_py_files,
                                time.time() - start_time))

        python_utils.PRINT(summary_message)
        summary_messages.append(summary_message)

        python_utils.PRINT('Python linting finished.')
        return summary_messages
Exemplo n.º 2
0
def managed_webdriver_server(chrome_version=None):
    """Returns context manager to start/stop the Webdriver server gracefully.

    This context manager updates Google Chrome before starting the server.

    Args:
        chrome_version: str|None. The version of Google Chrome to run the tests
            on. If None, then the currently-installed version of Google Chrome
            is used instead.

    Yields:
        psutil.Process. The Webdriver process.
    """
    if chrome_version is None:
        # Although there are spaces between Google and Chrome in the path, we
        # don't need to escape them for Popen (as opposed to on the terminal, in
        # which case we would need to escape them for the command to run).
        chrome_command = (
            '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
            if common.is_mac_os() else 'google-chrome')
        try:
            output = subprocess.check_output([chrome_command, '--version'])
        except OSError:
            # For the error message on macOS, we need to add the backslashes in.
            # This is because it is likely that a user will try to run the
            # command on their terminal and, as mentioned above, the macOS
            # chrome version command has spaces in the path which need to be
            # escaped for successful terminal use.
            raise Exception(
                'Failed to execute "%s --version" command. This is used to '
                'determine the chromedriver version to use. Please set the '
                'chromedriver version manually using --chrome_driver_version '
                'flag. To determine the chromedriver version to be used, '
                'please follow the instructions mentioned in the following '
                'URL:\n'
                'https://chromedriver.chromium.org/downloads/version-selection'
                % chrome_command.replace(' ', r'\ '))

        installed_version_parts = b''.join(re.findall(rb'[0-9.]', output))
        installed_version = '.'.join(
            installed_version_parts.decode('utf-8').split('.')[:-1])
        response = python_utils.url_open(
            'https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s' %
            (installed_version))
        chrome_version = response.read().decode('utf-8')

    python_utils.PRINT('\n\nCHROME VERSION: %s' % chrome_version)
    subprocess.check_call([
        common.NODE_BIN_PATH,
        common.WEBDRIVER_MANAGER_BIN_PATH,
        'update',
        '--versions.chrome',
        chrome_version,
    ])

    with python_utils.ExitStack() as exit_stack:
        if common.is_windows_os():
            # NOTE: webdriver-manager (version 13.0.0) uses `os.arch()` to
            # determine the architecture of the operating system, however, this
            # function can only be used to determine the architecture of the
            # machine that compiled `node`. In the case of Windows, we are using
            # the portable version, which was compiled on `ia32` machine so that
            # is the value returned by this `os.arch` function. Unfortunately,
            # webdriver-manager seems to assume that Windows wouldn't run on the
            # ia32 architecture, so its help function used to determine download
            # link returns null for this, which means that the application has
            # no idea about where to download the correct version.
            #
            # https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L16
            # https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/geckodriver.ts#L21
            # https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L167
            # https://github.com/nodejs/node/issues/17036
            regex_pattern = re.escape('this.osArch = os.arch();')
            arch = 'x64' if common.is_x64_architecture() else 'x86'
            replacement_string = 'this.osArch = "%s";' % arch
            exit_stack.enter_context(
                common.inplace_replace_file_context(
                    common.CHROME_PROVIDER_FILE_PATH, regex_pattern,
                    replacement_string))
            exit_stack.enter_context(
                common.inplace_replace_file_context(
                    common.GECKO_PROVIDER_FILE_PATH, regex_pattern,
                    replacement_string))

        # OK to use shell=True here because we are passing string literals and
        # constants, so there is no risk of a shell-injection attack.
        proc = exit_stack.enter_context(
            managed_process([
                common.NODE_BIN_PATH,
                common.WEBDRIVER_MANAGER_BIN_PATH,
                'start',
                '--versions.chrome',
                chrome_version,
                '--quiet',
                '--standalone',
            ],
                            human_readable_name='Webdriver manager',
                            shell=True))

        common.wait_for_port_to_be_in_use(4444)

        yield proc
Exemplo n.º 3
0
def main(personal_access_token):
    """Collects necessary info and dumps it to disk.

    Args:
        personal_access_token: str. The personal access token for the
            GitHub id of user.
    """
    if not common.is_current_branch_a_release_branch():
        raise Exception(
            'This script should only be run from the latest release branch.')
    g = github.Github(personal_access_token)
    repo = g.get_organization('oppia').get_repo('oppia')

    common.check_blocking_bug_issue_count(repo)
    common.check_prs_for_current_release_are_released(repo)

    current_release = get_current_version_tag(repo)
    current_release_tag = current_release.name
    base_commit = current_release.commit.sha
    new_commits = get_extra_commits_in_new_release(base_commit, repo)
    new_release_logs = gather_logs(base_commit)

    for index, log in enumerate(new_release_logs):
        is_cherrypicked = all(
            [log.sha1 != commit.sha for commit in new_commits])
        if is_cherrypicked:
            del new_release_logs[index]

    past_logs = gather_logs(FIRST_OPPIA_COMMIT, stop=base_commit)
    issue_links = extract_issues(new_release_logs)
    feconf_version_changes = check_versions(current_release_tag)
    setup_changes = check_setup_scripts(current_release_tag)
    storage_changes = check_storage_models(current_release_tag)

    pr_numbers = extract_pr_numbers(new_release_logs)
    prs = get_prs_from_pr_numbers(pr_numbers, repo)
    categorized_pr_titles = get_changelog_categories(prs)

    with python_utils.open_file(release_constants.RELEASE_SUMMARY_FILEPATH,
                                'w') as out:
        out.write('## Collected release information\n')

        if feconf_version_changes:
            out.write('\n### Feconf version changes:\nThis indicates that a '
                      'migration may be needed\n\n')
            for var in feconf_version_changes:
                out.write('* %s\n' % var)

        if setup_changes:
            out.write('\n### Changed setup scripts:\n')
            for var in setup_changes.keys():
                out.write('* %s\n' % var)

        if storage_changes:
            out.write('\n### Changed storage models:\n')
            for item in storage_changes:
                out.write('* %s\n' % item)

        past_authors = {log.email: log.author for log in past_logs}
        release_authors = {(log.author, log.email) for log in new_release_logs}

        new_authors = sorted(
            set([(name, email) for name, email in release_authors
                 if email not in past_authors]))
        existing_authors = sorted(
            set([(name, email) for name, email in release_authors
                 if email in past_authors]))
        new_author_names = [name for name, _ in new_authors]
        existing_author_names = [name for name, _ in existing_authors]

        # TODO(apb7): duplicate author handling due to email changes.
        out.write('\n### New Authors:\n')
        for name, email in new_authors:
            out.write('* %s <%s>\n' % (name, email))

        out.write('\n### Existing Authors:\n')
        for name, email in existing_authors:
            out.write('* %s <%s>\n' % (name, email))

        out.write('\n### New Contributors:\n')
        for name, email in new_authors:
            out.write('* %s <%s>\n' % (name, email))

        # Generate the author sections of the email.
        out.write('\n### Email C&P Blurbs about authors:\n')
        new_author_comma_list = (
            '%s, and %s' %
            (', '.join(new_author_names[:-1]), new_author_names[-1]))
        existing_author_comma_list = (
            '%s, and %s' %
            (', '.join(existing_author_names[:-1]), existing_author_names[-1]))
        out.write(
            '``Please welcome %s for whom this release marks their first '
            'contribution to Oppia!``\n\n' % new_author_comma_list)
        out.write(
            '``Thanks to %s, our returning contributors who made this release '
            'possible.``\n' % existing_author_comma_list)

        if personal_access_token:
            out.write('\n### Changelog:\n')
            for category in categorized_pr_titles:
                out.write('%s\n' % category)
                for pr_title in categorized_pr_titles[category]:
                    out.write('* %s\n' % pr_title)
                out.write('\n')

        out.write('\n### Commit History:\n')
        for name, title in [(log.author, log.message.split('\n\n')[0])
                            for log in new_release_logs]:
            out.write('* %s\n' % title)

        if issue_links:
            out.write('\n### Issues mentioned in commits:\n')
            for link in issue_links:
                out.write('* [%s](%s)\n' % (link, link))

    python_utils.PRINT('Done. Summary file generated in %s' %
                       (release_constants.RELEASE_SUMMARY_FILEPATH))
Exemplo n.º 4
0
        THIRD_PARTY_PATH, skip_files_list)
    python_utils.PRINT('')
    python_utils.PRINT(
        '------------------------------------------------------')
    python_utils.PRINT('    Number of files in third-party folder: %d' %
                       (number_of_files_in_third_party))
    python_utils.PRINT('')
    if number_of_files_in_third_party > THIRD_PARTY_SIZE_LIMIT:
        python_utils.PRINT(
            '    ERROR: The third-party folder size exceeded the %d files'
            ' limit.' % THIRD_PARTY_SIZE_LIMIT)
        python_utils.PRINT(
            '------------------------------------------------------')
        python_utils.PRINT('')
        sys.exit(1)
    else:
        python_utils.PRINT(
            '    The size of third-party folder is within the limits.')
        python_utils.PRINT(
            '------------------------------------------------------')
        python_utils.PRINT('')
        python_utils.PRINT('Done!')
        python_utils.PRINT('')


if __name__ == '__main__':
    python_utils.PRINT('Running third-party size check')
    _check_third_party_size()
    python_utils.PRINT('Third-party folder size check passed.')
    python_utils.PRINT('')
Exemplo n.º 5
0
def run_tests(args):
    """Run the scripts to start end-to-end tests."""
    oppia_instance_is_already_running = is_oppia_server_already_running()

    if oppia_instance_is_already_running:
        sys.exit(1)
    setup_and_install_dependencies(args.skip_install)

    common.start_redis_server()
    atexit.register(cleanup)

    dev_mode = not args.prod_env

    if args.skip_build:
        build.modify_constants(prod_env=args.prod_env)
    else:
        build_js_files(
            dev_mode, deparallelize_terser=args.deparallelize_terser,
            source_maps=args.source_maps)
    version = args.chrome_driver_version or get_chrome_driver_version()
    python_utils.PRINT('\n\nCHROMEDRIVER VERSION: %s\n\n' % version)
    start_webdriver_manager(version)

    # TODO(#11549): Move this to top of the file.
    import contextlib2
    managed_dev_appserver = common.managed_dev_appserver(
        'app.yaml' if args.prod_env else 'app_dev.yaml',
        port=GOOGLE_APP_ENGINE_PORT, log_level=args.server_log_level,
        clear_datastore=True, skip_sdk_update_check=True,
        env={'PORTSERVER_ADDRESS': PORTSERVER_SOCKET_FILEPATH})

    with contextlib2.ExitStack() as stack:
        stack.enter_context(common.managed_elasticsearch_dev_server())
        if constants.EMULATOR_MODE:
            stack.enter_context(common.managed_firebase_auth_emulator())
        stack.enter_context(managed_dev_appserver)

        python_utils.PRINT('Waiting for servers to come up...')

        # Wait for the servers to come up.
        common.wait_for_port_to_be_open(feconf.ES_LOCALHOST_PORT)
        common.wait_for_port_to_be_open(WEB_DRIVER_PORT)
        common.wait_for_port_to_be_open(GOOGLE_APP_ENGINE_PORT)
        python_utils.PRINT('Servers have come up.')
        python_utils.PRINT(
            'Note: If ADD_SCREENSHOT_REPORTER is set to true in '
            'core/tests/protractor.conf.js, you can view screenshots '
            'of the failed tests in ../protractor-screenshots/')

        commands = [common.NODE_BIN_PATH]
        if args.debug_mode:
            commands.append('--inspect-brk')
        # This flag ensures tests fail if waitFor calls time out.
        commands.append('--unhandled-rejections=strict')
        commands.append(PROTRACTOR_BIN_PATH)
        commands.extend(get_e2e_test_parameters(
            args.sharding_instances, args.suite, dev_mode))

        p = subprocess.Popen(commands, stdout=subprocess.PIPE)
        output_lines = []
        while True:
            nextline = p.stdout.readline()
            if len(nextline) == 0 and p.poll() is not None:
                break
            if isinstance(nextline, str):
                # This is a failsafe line in case we get non-unicode input,
                # but the tests provide all strings as unicode.
                nextline = nextline.decode('utf-8')  # pragma: nocover
            output_lines.append(nextline.rstrip())
            # Replaces non-ASCII characters with '?'.
            sys.stdout.write(nextline.encode('ascii', errors='replace'))

        return output_lines, p.returncode
Exemplo n.º 6
0
def open_new_tab_in_browser_if_possible(url):
    """Opens the given URL in a new browser tab, if possible."""
    if USER_PREFERENCES['open_new_tab_in_browser'] is None:
        python_utils.PRINT(
            '\nDo you want the url to be opened in the browser? '
            'Confirm by entering y/ye/yes.')
        USER_PREFERENCES['open_new_tab_in_browser'] = python_utils.INPUT()
    if USER_PREFERENCES['open_new_tab_in_browser'] not in ['y', 'ye', 'yes']:
        python_utils.PRINT('Please open the following link in browser: %s' %
                           url)
        return
    browser_cmds = ['chromium-browser', 'google-chrome', 'firefox']
    for cmd in browser_cmds:
        if subprocess.call(['which', cmd]) == 0:
            subprocess.check_call([cmd, url])
            return
    python_utils.PRINT(
        '******************************************************************')
    python_utils.PRINT(
        'WARNING: Unable to open browser. Please manually open the following')
    python_utils.PRINT('URL in a browser window, then press Enter to confirm.')
    python_utils.PRINT('')
    python_utils.PRINT('    %s' % url)
    python_utils.PRINT('')
    python_utils.PRINT(
        'NOTE: To get rid of this message, open scripts/common.py and fix')
    python_utils.PRINT(
        'the function open_new_tab_in_browser_if_possible() to work on your')
    python_utils.PRINT('system.')
    python_utils.INPUT()
Exemplo n.º 7
0
def main(args=None):
    """Main method for pre commit linter script that lints Python, JavaScript,
    HTML, and CSS files.
    """
    parsed_args = _PARSER.parse_args(args=args)
    # File extension to be linted.
    file_extension_types = _get_file_extensions(
        parsed_args.only_check_file_extensions)
    # Default mode is non-verbose mode, if arguments contains --verbose flag it
    # will be made True, which will represent verbose mode.
    verbose_mode_enabled = bool(parsed_args.verbose)
    all_filepaths = _get_all_filepaths(parsed_args.path, parsed_args.files)

    install_third_party_libs.main()

    python_utils.PRINT('Starting Linter....')

    if len(all_filepaths) == 0:
        python_utils.PRINT('---------------------------')
        python_utils.PRINT('No files to check.')
        python_utils.PRINT('---------------------------')
        return

    read_files(all_filepaths)
    categorize_files(all_filepaths)

    # Prepare custom tasks.
    custom_max_concurrent_runs = 25
    custom_concurrent_count = min(
        multiprocessing.cpu_count(), custom_max_concurrent_runs)
    custom_semaphore = threading.Semaphore(custom_concurrent_count)

    # Prepare third_party tasks.
    third_party_max_concurrent_runs = 2
    third_party_concurrent_count = min(
        multiprocessing.cpu_count(), third_party_max_concurrent_runs)
    third_party_semaphore = threading.Semaphore(third_party_concurrent_count)

    custom_linters = []
    third_party_linters = []
    for file_extension_type in file_extension_types:
        custom_linter, third_party_linter = _get_linters_for_file_extension(
            file_extension_type, verbose_mode_enabled=verbose_mode_enabled)
        custom_linters += custom_linter
        third_party_linters += third_party_linter

    # Create tasks.
    tasks_custom = []
    tasks_third_party = []

    for linter in custom_linters:
        task_custom = concurrent_task_utils.create_task(
            linter.perform_all_lint_checks, verbose_mode_enabled,
            custom_semaphore, name='custom')
        tasks_custom.append(task_custom)

    for linter in third_party_linters:
        task_third_party = concurrent_task_utils.create_task(
            linter.perform_all_lint_checks, verbose_mode_enabled,
            third_party_semaphore, name='third_party')
        tasks_third_party.append(task_third_party)

    # Execute tasks.
    # Here we set Concurrency limit for custom task to 25 because we need to
    # parallelize the tasks to work on full capacity of CPU.
    # Concurrency limit for third party tasks is set to 2 because these
    # third party libraries have their own ways to lint at their fastest
    # (ie. might parallelize on their own)

    # Concurrency limit: 25.
    concurrent_task_utils.execute_tasks(tasks_custom, custom_semaphore)

    # Concurrency limit: 2.
    concurrent_task_utils.execute_tasks(
        tasks_third_party, third_party_semaphore)

    lint_messages = []

    # Prepare semaphore for locking mechanism.
    semaphore = threading.Semaphore(1)

    for task in tasks_custom:
        semaphore.acquire()
        _get_task_output(lint_messages, task, semaphore)

    for task in tasks_third_party:
        semaphore.acquire()
        _get_task_output(lint_messages, task, semaphore)

    lint_messages += codeowner_linter.check_codeowner_file(
        FILE_CACHE, verbose_mode_enabled)

    lint_messages += (
        third_party_typings_linter.check_third_party_libs_type_defs(
            verbose_mode_enabled))

    lint_messages += app_dev_linter.check_skip_files_in_app_dev_yaml(
        FILE_CACHE, verbose_mode_enabled)

    lint_messages += webpack_config_linter.check_webpack_config_file(
        FILE_CACHE, verbose_mode_enabled)

    errors_stacktrace = concurrent_task_utils.ALL_ERRORS
    if errors_stacktrace:
        _print_errors_stacktrace(errors_stacktrace)

    if any([
            message.startswith(linter_utils.FAILED_MESSAGE_PREFIX) for
            message in lint_messages]) or errors_stacktrace:
        _print_summary_of_error_messages(lint_messages)
        python_utils.PRINT('---------------------------')
        python_utils.PRINT('Checks Not Passed.')
        python_utils.PRINT('---------------------------')
        sys.exit(1)
    else:
        python_utils.PRINT('---------------------------')
        python_utils.PRINT('All Checks Passed.')
        python_utils.PRINT('---------------------------')
Exemplo n.º 8
0
def main(args=None):
    """Starts up a development server running Oppia."""
    parsed_args = _PARSER.parse_args(args=args)

    # Runs cleanup function on exit.
    atexit.register(cleanup)

    # Check that there isn't a server already running.
    if common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
        common.print_each_string_after_two_new_lines([
            'WARNING',
            'Could not start new server. There is already an existing server',
            'running at port %s.' %
            python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)
        ])

    build_args = ['--prod_env'] if parsed_args.prod_env else []
    if parsed_args.maintenance_mode:
        build_args.append('--maintenance_mode')
    if parsed_args.source_maps:
        build_args.append('--source_maps')
    build.main(args=build_args)
    app_yaml_filepath = 'app.yaml' if parsed_args.prod_env else 'app_dev.yaml'

    # Set up a local dev instance.
    # TODO(sll): Do this in a new shell.
    # To turn emailing on, add the option '--enable_sendmail=yes' and change the
    # relevant settings in feconf.py. Be careful with this -- you do not want to
    # spam people accidentally.
    background_processes = []
    if not parsed_args.prod_env:
        # In prod mode webpack is launched through scripts/build.py
        python_utils.PRINT('Compiling webpack...')
        webpack_config_file = (build.WEBPACK_DEV_SOURCE_MAPS_CONFIG
                               if parsed_args.source_maps else
                               build.WEBPACK_DEV_CONFIG)
        background_processes.append(
            subprocess.Popen([
                common.NODE_BIN_PATH,
                os.path.join(common.NODE_MODULES_PATH, 'webpack', 'bin',
                             'webpack.js'), '--config', webpack_config_file,
                '--watch'
            ]))

        # Give webpack few seconds to do the initial compilation.
        time.sleep(10)

    common.start_redis_server()

    # TODO(#11549): Move this to top of the file.
    import contextlib2
    managed_dev_appserver = common.managed_dev_appserver(
        app_yaml_filepath,
        clear_datastore=not parsed_args.save_datastore,
        enable_console=parsed_args.enable_console,
        enable_host_checking=not parsed_args.disable_host_checking,
        automatic_restart=not parsed_args.no_auto_restart,
        skip_sdk_update_check=True,
        port=PORT_NUMBER_FOR_GAE_SERVER)

    with contextlib2.ExitStack() as stack:
        python_utils.PRINT('Starting ElasticSearch development server.')
        stack.enter_context(common.managed_elasticsearch_dev_server())
        if constants.EMULATOR_MODE:
            python_utils.PRINT('Starting Firebase emulators')
            stack.enter_context(common.managed_firebase_auth_emulator())
        python_utils.PRINT('Starting GAE development server')
        stack.enter_context(managed_dev_appserver)

        # Wait for the servers to come up.
        common.wait_for_port_to_be_open(PORT_NUMBER_FOR_GAE_SERVER)
        common.wait_for_port_to_be_open(feconf.ES_LOCALHOST_PORT)

        # Launch a browser window.
        if common.is_linux_os() and not parsed_args.no_browser:
            detect_virtualbox_pattern = re.compile('.*VBOX.*')
            if list(
                    filter(detect_virtualbox_pattern.match,
                           os.listdir('/dev/disk/by-id/'))):
                common.print_each_string_after_two_new_lines([
                    'INFORMATION',
                    'Setting up a local development server. You can access '
                    'this server',
                    'by navigating to localhost:%s in a browser window.' %
                    python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)
                ])
            else:
                common.print_each_string_after_two_new_lines([
                    'INFORMATION',
                    'Setting up a local development server at localhost:%s. ' %
                    python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
                    'Opening a default browser window pointing to this server'
                ])
                time.sleep(5)
                background_processes.append(
                    subprocess.Popen([
                        'xdg-open',
                        'http://localhost:%s/' %
                        python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)
                    ]))
        elif common.is_mac_os() and not parsed_args.no_browser:
            common.print_each_string_after_two_new_lines([
                'INFORMATION',
                'Setting up a local development server at localhost:%s. ' %
                python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
                'Opening a default browser window pointing to this server.'
            ])
            time.sleep(5)
            background_processes.append(
                subprocess.Popen([
                    'open',
                    'http://localhost:%s/' %
                    python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)
                ]))
        else:
            common.print_each_string_after_two_new_lines([
                'INFORMATION',
                'Setting up a local development server. You can access this ',
                'server by navigating to localhost:%s in a browser window.' %
                python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)
            ])

        python_utils.PRINT('Done!')

        for process in background_processes:
            process.wait()
Exemplo n.º 9
0
def main():
    """Install third-party libraries for Oppia."""
    setup.main(args=[])
    setup_gae.main(args=[])
    # These system python libraries are REQUIRED to start the development server
    # and cannot be added to oppia_tools because the dev_appserver python script
    # looks for them in the default system paths when it is run. Therefore, we
    # must install these libraries to the developer's computer.
    system_pip_dependencies = [('enum34', common.ENUM_VERSION),
                               ('protobuf', common.PROTOBUF_VERSION)]
    local_pip_dependencies = [
        ('coverage', common.COVERAGE_VERSION, common.OPPIA_TOOLS_DIR),
        ('pylint', common.PYLINT_VERSION, common.OPPIA_TOOLS_DIR),
        ('Pillow', common.PILLOW_VERSION, common.OPPIA_TOOLS_DIR),
        ('pylint-quotes', common.PYLINT_QUOTES_VERSION,
         common.OPPIA_TOOLS_DIR),
        ('webtest', common.WEBTEST_VERSION, common.OPPIA_TOOLS_DIR),
        ('isort', common.ISORT_VERSION, common.OPPIA_TOOLS_DIR),
        ('pycodestyle', common.PYCODESTYLE_VERSION, common.OPPIA_TOOLS_DIR),
        ('esprima', common.ESPRIMA_VERSION, common.OPPIA_TOOLS_DIR),
        ('PyGithub', common.PYGITHUB_VERSION, common.OPPIA_TOOLS_DIR),
        ('protobuf', common.PROTOBUF_VERSION, common.OPPIA_TOOLS_DIR),
        ('psutil', common.PSUTIL_VERSION, common.OPPIA_TOOLS_DIR),
        ('pip-tools', common.PIP_TOOLS_VERSION, common.OPPIA_TOOLS_DIR),
        ('setuptools', common.SETUPTOOLS_VERSION, common.OPPIA_TOOLS_DIR),
    ]

    for package, version, path in local_pip_dependencies:
        ensure_pip_library_is_installed(package, version, path)

    for package, version in system_pip_dependencies:
        ensure_system_python_libraries_are_installed(package, version)
    # Do a little surgery on configparser in pylint-1.9.4 to remove dependency
    # on ConverterMapping, which is not implemented in some Python
    # distributions.
    pylint_newlines = []
    with python_utils.open_file(PYLINT_CONFIGPARSER_FILEPATH, 'r') as f:
        for line in f.readlines():
            if line.strip() == 'ConverterMapping,':
                continue
            if line.strip().endswith('"ConverterMapping",'):
                pylint_newlines.append(line[:line.find('"ConverterMapping"')] +
                                       '\n')
            else:
                pylint_newlines.append(line)
    with python_utils.open_file(PYLINT_CONFIGPARSER_FILEPATH, 'w+') as f:
        f.writelines(pylint_newlines)

    # Do similar surgery on configparser in pylint-quotes-0.1.8 to remove
    # dependency on ConverterMapping.
    pq_newlines = []
    with python_utils.open_file(PQ_CONFIGPARSER_FILEPATH, 'r') as f:
        for line in f.readlines():
            if line.strip() == 'ConverterMapping,':
                continue
            if line.strip() == '"ConverterMapping",':
                continue
            pq_newlines.append(line)
    with python_utils.open_file(PQ_CONFIGPARSER_FILEPATH, 'w+') as f:
        f.writelines(pq_newlines)

    # Download and install required JS and zip files.
    python_utils.PRINT('Installing third-party JS libraries and zip files.')
    install_third_party.main(args=[])

    # The following steps solves the problem of multiple google paths confusing
    # the python interpreter. Namely, there are two modules named google/, one
    # that is installed with google cloud libraries and another that comes with
    # the Google Cloud SDK. Python cannot import from both paths simultaneously
    # so we must combine the two modules into one. We solve this by copying the
    # Google Cloud SDK libraries that we need into the correct google
    # module directory in the 'third_party/python_libs' directory.
    python_utils.PRINT(
        'Copying Google Cloud SDK modules to third_party/python_libs...')
    correct_google_path = os.path.join(common.THIRD_PARTY_PYTHON_LIBS_DIR,
                                       'google')
    if not os.path.isdir(correct_google_path):
        os.mkdir(correct_google_path)

    if not os.path.isdir(os.path.join(correct_google_path, 'appengine')):
        shutil.copytree(
            os.path.join(common.GOOGLE_APP_ENGINE_SDK_HOME, 'google',
                         'appengine'),
            os.path.join(correct_google_path, 'appengine'))

    if not os.path.isdir(os.path.join(correct_google_path, 'net')):
        shutil.copytree(
            os.path.join(common.GOOGLE_APP_ENGINE_SDK_HOME, 'google', 'net'),
            os.path.join(correct_google_path, 'net'))

    if not os.path.isdir(os.path.join(correct_google_path, 'pyglib')):
        shutil.copytree(
            os.path.join(common.GOOGLE_APP_ENGINE_SDK_HOME,
                         'google', 'pyglib'),
            os.path.join(correct_google_path, 'pyglib'))

    # The following for loop populates all of the google modules with
    # the correct __init__.py files if they do not exist. This solves the bug
    # mentioned below where namespace packages sometimes install modules without
    # __init__.py files (python requires modules to have __init__.py files in
    # in order to recognize them as modules and import them):
    # https://github.com/googleapis/python-ndb/issues/518
    python_utils.PRINT(
        'Checking that all google library modules contain __init__.py files...'
    )
    for path_list in os.walk(correct_google_path):
        root_path = path_list[0]
        if not root_path.endswith('__pycache__'):
            with python_utils.open_file(os.path.join(root_path, '__init__.py'),
                                        'a'):
                # If the file doesn't exist, it is created. If it does exist,
                # this open does nothing.
                pass

    # Compile protobuf files.
    python_utils.PRINT('Installing buf and protoc binary.')
    install_buf_and_protoc()
    python_utils.PRINT('Compiling protobuf files.')
    compile_protobuf_files(PROTO_FILES_PATHS)

    if common.is_windows_os():
        tweak_yarn_executable()

    # Install third-party node modules needed for the build process.
    subprocess.check_call([get_yarn_command(), 'install', '--pure-lockfile'])

    # Install pre-commit script.
    python_utils.PRINT('Installing pre-commit hook for git')
    pre_commit_hook.main(args=['--install'])

    # TODO(#8112): Once pre_commit_linter is working correctly, this
    # condition should be removed.
    if not common.is_windows_os():
        # Install pre-push script.
        python_utils.PRINT('Installing pre-push hook for git')
        pre_push_hook.main(args=['--install'])
def execute_branch_cut(target_version, hotfix_number):
    """Creates & pushes the new release branch to Github.

    Args:
        target_version: str. The release version.
        hotfix_number: int. The number for the hotfix branch.

    Raises:
        Exception: Travis tests are failing on the branch from which
            the new branch is cut.
    """

    # Construct the new branch name.
    if not hotfix_number:
        new_branch_type, new_branch_name = _get_release_branch_type_and_name(
            target_version)
    else:
        new_branch_type, new_branch_name = _get_hotfix_branch_type_and_name(
            target_version, hotfix_number)

    # Do prerequisite checks.
    common.require_cwd_to_be_oppia()
    common.verify_local_repo_is_clean()
    common.verify_current_branch_name('develop')

    # Update the local repo.
    remote_alias = common.get_remote_alias(release_constants.REMOTE_URL)
    subprocess.check_call(['git', 'pull', remote_alias, 'develop'])

    verify_target_branch_does_not_already_exist(remote_alias, new_branch_name)

    if not hotfix_number:
        branch_to_check = 'develop'
    elif hotfix_number == 1:
        branch_to_check = 'release-%s' % target_version
    else:
        branch_to_check = 'release-%s-hotfix-%s' % (target_version,
                                                    hotfix_number - 1)
    # The release coordinator should verify that tests are passing on
    # the parent branch before checking out the new branch.
    common.open_new_tab_in_browser_if_possible(
        'https://travis-ci.com/oppia/oppia/branches')
    python_utils.PRINT(
        'Please confirm: are Travis checks passing on %s? (y/n) ' %
        (branch_to_check))
    answer = python_utils.INPUT().lower()
    if answer not in release_constants.AFFIRMATIVE_CONFIRMATIONS:
        raise Exception('Tests should pass on %s before this script is run.' %
                        (branch_to_check))

    # Cut a new release or hotfix branch.
    if new_branch_type == release_constants.BRANCH_TYPE_HOTFIX:
        verify_hotfix_number_is_one_ahead_of_previous_hotfix_number(
            remote_alias, target_version, hotfix_number)
        if hotfix_number == 1:
            branch_to_cut_from = 'release-%s' % target_version
        else:
            branch_to_cut_from = 'release-%s-hotfix-%s' % (target_version,
                                                           hotfix_number - 1)
        python_utils.PRINT('Cutting a new hotfix branch: %s' % new_branch_name)
        subprocess.check_call(
            ['git', 'checkout', '-b', new_branch_name, branch_to_cut_from])
    else:
        verify_target_version_compatible_with_latest_release(target_version)
        python_utils.PRINT('Cutting a new release branch: %s' %
                           new_branch_name)
        subprocess.check_call(['git', 'checkout', '-b', new_branch_name])

    # Push the new release branch to GitHub.
    python_utils.PRINT('Pushing new %s branch to GitHub.' % new_branch_type)
    subprocess.check_call(['git', 'push', remote_alias, new_branch_name])

    python_utils.PRINT('')
    python_utils.PRINT(
        'New %s branch successfully cut. You are now on branch %s' %
        (new_branch_type, new_branch_name))
    python_utils.PRINT('Done!')

    common.ask_user_to_confirm(
        'Ask Sean (or Ben, if Sean isn\'t available) to create '
        'a new branch protection rule by:\n'
        '1. Going to this page: https://github.com/oppia/oppia/'
        'settings/branch_protection_rules/new.\n'
        '2. Typing in the full branch name %s.\n'
        '3. Checking the box: Restrict who can push to matching '
        'branches (then add the oppia/release-coordinators team)\n' %
        (new_branch_name))
Exemplo n.º 11
0
def main(args=None):
    """Run the tests."""
    parsed_args = _PARSER.parse_args(args=args)

    # Make sure that third-party libraries are up-to-date before running tests,
    # otherwise import errors may result.
    install_third_party_libs.main()

    for directory in DIRS_TO_ADD_TO_SYS_PATH:
        if not os.path.exists(os.path.dirname(directory)):
            raise Exception('Directory %s does not exist.' % directory)

        # The directories should only be inserted starting at index 1. See
        # https://stackoverflow.com/a/10095099 and
        # https://stackoverflow.com/q/10095037 for more details.
        sys.path.insert(1, directory)

    import dev_appserver
    dev_appserver.fix_sys_path()

    if parsed_args.generate_coverage_report:
        python_utils.PRINT('Checking whether coverage is installed in %s' %
                           common.OPPIA_TOOLS_DIR)
        if not os.path.exists(
                os.path.join(common.OPPIA_TOOLS_DIR,
                             'coverage-%s' % common.COVERAGE_VERSION)):
            raise Exception(
                'Coverage is not installed, please run the start script.')

        pythonpath_components = [COVERAGE_DIR]
        if os.environ.get('PYTHONPATH'):
            pythonpath_components.append(os.environ.get('PYTHONPATH'))

        os.environ['PYTHONPATH'] = os.pathsep.join(pythonpath_components)

    if parsed_args.test_target and parsed_args.test_path:
        raise Exception(
            'At most one of test_path and test_target should be specified.')
    if parsed_args.test_path and '.' in parsed_args.test_path:
        raise Exception('The delimiter in test_path should be a slash (/)')
    if parsed_args.test_target and '/' in parsed_args.test_target:
        raise Exception('The delimiter in test_target should be a dot (.)')

    if parsed_args.test_target:
        if '_test' in parsed_args.test_target:
            all_test_targets = [parsed_args.test_target]
        else:
            python_utils.PRINT('')
            python_utils.PRINT(
                '---------------------------------------------------------')
            python_utils.PRINT(
                'WARNING : test_target flag should point to the test file.')
            python_utils.PRINT(
                '---------------------------------------------------------')
            python_utils.PRINT('')
            time.sleep(3)
            python_utils.PRINT('Redirecting to its corresponding test file...')
            all_test_targets = [parsed_args.test_target + '_test']
    else:
        include_load_tests = not parsed_args.exclude_load_tests
        all_test_targets = _get_all_test_targets(
            test_path=parsed_args.test_path,
            include_load_tests=include_load_tests)

    # Prepare tasks.
    max_concurrent_runs = 25
    concurrent_count = min(multiprocessing.cpu_count(), max_concurrent_runs)
    semaphore = threading.Semaphore(concurrent_count)

    task_to_taskspec = {}
    tasks = []
    for test_target in all_test_targets:
        test = TestingTaskSpec(test_target,
                               parsed_args.generate_coverage_report)
        task = concurrent_task_utils.create_task(test.run,
                                                 parsed_args.verbose,
                                                 semaphore,
                                                 name=test_target)
        task_to_taskspec[task] = test
        tasks.append(task)

    task_execution_failed = False
    try:
        concurrent_task_utils.execute_tasks(tasks, semaphore)
    except Exception:
        task_execution_failed = True

    for task in tasks:
        if task.exception:
            concurrent_task_utils.log(
                python_utils.convert_to_bytes(task.exception.args[0]))

    python_utils.PRINT('')
    python_utils.PRINT('+------------------+')
    python_utils.PRINT('| SUMMARY OF TESTS |')
    python_utils.PRINT('+------------------+')
    python_utils.PRINT('')

    # Check we ran all tests as expected.
    total_count = 0
    total_errors = 0
    total_failures = 0
    for task in tasks:
        spec = task_to_taskspec[task]

        if not task.finished:
            python_utils.PRINT('CANCELED  %s' % spec.test_target)
            test_count = 0
        elif (task.exception and 'No tests were run'
              in python_utils.convert_to_bytes(task.exception.args[0])):
            python_utils.PRINT('ERROR     %s: No tests found.' %
                               spec.test_target)
            test_count = 0
        elif task.exception:
            exc_str = python_utils.convert_to_bytes(task.exception.args[0])
            python_utils.PRINT(exc_str[exc_str.find('='):exc_str.rfind('-')])

            tests_failed_regex_match = re.search(
                r'Test suite failed: ([0-9]+) tests run, ([0-9]+) errors, '
                '([0-9]+) failures',
                python_utils.convert_to_bytes(task.exception.args[0]))

            try:
                test_count = int(tests_failed_regex_match.group(1))
                errors = int(tests_failed_regex_match.group(2))
                failures = int(tests_failed_regex_match.group(3))
                total_errors += errors
                total_failures += failures
                python_utils.PRINT('FAILED    %s: %s errors, %s failures' %
                                   (spec.test_target, errors, failures))
            except AttributeError:
                # There was an internal error, and the tests did not run (The
                # error message did not match `tests_failed_regex_match`).
                test_count = 0
                total_errors += 1
                python_utils.PRINT('')
                python_utils.PRINT(
                    '------------------------------------------------------')
                python_utils.PRINT('    WARNING: FAILED TO RUN %s' %
                                   spec.test_target)
                python_utils.PRINT('')
                python_utils.PRINT(
                    '    This is most likely due to an import error.')
                python_utils.PRINT(
                    '------------------------------------------------------')
        else:
            try:
                tests_run_regex_match = re.search(
                    r'Ran ([0-9]+) tests? in ([0-9\.]+)s', task.output)
                test_count = int(tests_run_regex_match.group(1))
                test_time = float(tests_run_regex_match.group(2))
                python_utils.PRINT('SUCCESS   %s: %d tests (%.1f secs)' %
                                   (spec.test_target, test_count, test_time))
            except Exception:
                python_utils.PRINT('An unexpected error occurred. '
                                   'Task output:\n%s' % task.output)

        total_count += test_count

    python_utils.PRINT('')
    if total_count == 0:
        raise Exception('WARNING: No tests were run.')

    python_utils.PRINT('Ran %s test%s in %s test class%s.' %
                       (total_count, '' if total_count == 1 else 's',
                        len(tasks), '' if len(tasks) == 1 else 'es'))

    if total_errors or total_failures:
        python_utils.PRINT('(%s ERRORS, %s FAILURES)' %
                           (total_errors, total_failures))
    else:
        python_utils.PRINT('All tests passed.')

    if task_execution_failed:
        raise Exception('Task execution failed.')
    elif total_errors or total_failures:
        raise Exception('%s errors, %s failures' %
                        (total_errors, total_failures))

    if parsed_args.generate_coverage_report:
        subprocess.check_call(
            [sys.executable, COVERAGE_MODULE_PATH, 'combine'])
        process = subprocess.Popen([
            sys.executable, COVERAGE_MODULE_PATH, 'report',
            '--omit="%s*","third_party/*","/usr/share/*"' %
            common.OPPIA_TOOLS_DIR, '--show-missing'
        ],
                                   stdout=subprocess.PIPE)

        report_stdout, _ = process.communicate()
        python_utils.PRINT(report_stdout)

        coverage_result = re.search(
            r'TOTAL\s+(\d+)\s+(\d+)\s+(?P<total>\d+)%\s+', report_stdout)
        if coverage_result.group('total') != '100':
            raise Exception('Backend test coverage is not 100%')

    python_utils.PRINT('')
    python_utils.PRINT('Done!')
Exemplo n.º 12
0
    def _check_bad_patterns(self):
        """This function is used for detecting bad patterns."""
        if self.verbose_mode_enabled:
            python_utils.PRINT('Starting Pattern Checks')
            python_utils.PRINT('----------------------------------------')
        total_files_checked = 0
        total_error_count = 0
        summary_messages = []
        all_filepaths = [
            filepath for filepath in self.all_filepaths
            if not (filepath.endswith('general_purpose_linter.py'))
        ]
        failed = False
        stdout = sys.stdout
        with linter_utils.redirect_stdout(stdout):
            for filepath in all_filepaths:
                file_content = FILE_CACHE.read(filepath)
                total_files_checked += 1
                for pattern in BAD_PATTERNS:
                    if (pattern in file_content and
                            not is_filepath_excluded_for_bad_patterns_check(
                                pattern, filepath)):
                        failed = True
                        python_utils.PRINT(
                            '%s --> %s' %
                            (filepath, BAD_PATTERNS[pattern]['message']))
                        python_utils.PRINT('')
                        total_error_count += 1

                for regexp in BAD_PATTERNS_REGEXP:
                    if check_bad_pattern_in_file(filepath, file_content,
                                                 regexp):
                        failed = True
                        total_error_count += 1

                temp_failed, temp_count = check_file_type_specific_bad_pattern(
                    filepath, file_content)
                failed = failed or temp_failed
                total_error_count += temp_count

                if filepath == 'constants.ts':
                    for pattern in REQUIRED_STRINGS_CONSTANTS:
                        if pattern not in file_content:
                            failed = True
                            python_utils.PRINT(
                                '%s --> %s' %
                                (filepath, REQUIRED_STRINGS_CONSTANTS[pattern]
                                 ['message']))
                            python_utils.PRINT('')
                            total_error_count += 1
            if failed:
                summary_message = ('%s Pattern check failed, see errors above '
                                   'for patterns that should be removed.' %
                                   (_MESSAGE_TYPE_FAILED))
                summary_messages.append(summary_message)
            else:
                summary_message = '%s Pattern checks passed' % (
                    _MESSAGE_TYPE_SUCCESS)
                summary_messages.append(summary_message)

            python_utils.PRINT('')
            if total_files_checked == 0:
                python_utils.PRINT('There are no files to be checked.')
            else:
                python_utils.PRINT('(%s files checked, %s errors found)' %
                                   (total_files_checked, total_error_count))
                python_utils.PRINT(summary_message)
        return summary_messages
Exemplo n.º 13
0
def run_tests(args):
    """Run the scripts to start end-to-end tests."""
    if is_oppia_server_already_running():
        sys.exit(1)

    install_third_party_libraries(args.skip_install)

    with python_utils.ExitStack() as stack:
        dev_mode = not args.prod_env

        if args.skip_build:
            build.modify_constants(prod_env=args.prod_env)
        else:
            build_js_files(dev_mode,
                           deparallelize_terser=args.deparallelize_terser,
                           source_maps=args.source_maps)
        stack.callback(build.set_constants_to_default)

        stack.enter_context(servers.managed_redis_server())
        stack.enter_context(servers.managed_elasticsearch_dev_server())
        if constants.EMULATOR_MODE:
            stack.enter_context(servers.managed_firebase_auth_emulator())
            stack.enter_context(
                servers.managed_cloud_datastore_emulator(clear_datastore=True))

        app_yaml_path = 'app.yaml' if args.prod_env else 'app_dev.yaml'
        stack.enter_context(
            servers.managed_dev_appserver(
                app_yaml_path,
                port=GOOGLE_APP_ENGINE_PORT,
                log_level=args.server_log_level,
                # Automatic restart can be disabled since we don't expect code
                # changes to happen while the e2e tests are running.
                automatic_restart=False,
                skip_sdk_update_check=True,
                env={
                    **os.environ,
                    'PORTSERVER_ADDRESS':
                    common.PORTSERVER_SOCKET_FILEPATH,
                }))

        stack.enter_context(
            servers.managed_webdriver_server(
                chrome_version=args.chrome_driver_version))

        proc = stack.enter_context(
            servers.managed_protractor_server(
                suite_name=args.suite,
                dev_mode=dev_mode,
                debug_mode=args.debug_mode,
                sharding_instances=args.sharding_instances,
                stdout=subprocess.PIPE))

        python_utils.PRINT(
            'Servers have come up.\n'
            'Note: If ADD_SCREENSHOT_REPORTER is set to true in '
            'core/tests/protractor.conf.js, you can view screenshots of the '
            'failed tests in ../protractor-screenshots/')

        output_lines = []
        while True:
            # Keep reading lines until an empty string is returned. Empty
            # strings signal that the process has ended.
            for line in iter(proc.stdout.readline, b''):
                if isinstance(line, str):
                    # Although our unit tests always provide unicode strings,
                    # the actual server needs this failsafe since it can output
                    # non-unicode strings.
                    line = line.encode('utf-8')  # pragma: nocover
                output_lines.append(line.rstrip())
                # Replaces non-ASCII characters with '?'.
                common.write_stdout_safe(line.decode('ascii',
                                                     errors='replace'))
            # The poll() method returns None while the process is running,
            # otherwise it returns the return code of the process (an int).
            if proc.poll() is not None:
                break

        return output_lines, proc.returncode
Exemplo n.º 14
0
    def _lint_py_files_for_python3_compatibility(self):
        """Prints a list of Python 3 compatibility errors in the given list of
        Python files.

        Returns:
            summary_messages: list(str). Summary of lint check.
        """
        files_to_lint = self.all_filepaths
        start_time = time.time()
        any_errors = False
        stdout = python_utils.string_io()
        summary_messages = []

        files_to_lint_for_python3_compatibility = [
            file_name for file_name in files_to_lint
            if not re.match(r'^.*python_utils.*\.py$', file_name)
        ]
        num_py_files = len(files_to_lint_for_python3_compatibility)
        if not files_to_lint_for_python3_compatibility:
            python_utils.PRINT('')
            python_utils.PRINT(
                'There are no Python files to lint for Python 3 compatibility.'
            )
            return []

        python_utils.PRINT(
            'Linting %s Python files for Python 3 compatibility.' %
            (num_py_files))

        _batch_size = 50
        current_batch_start_index = 0

        while current_batch_start_index < len(
                files_to_lint_for_python3_compatibility):
            # Note that this index is an exclusive upper bound -- i.e.,
            # the current batch of files ranges from 'start_index' to
            # 'end_index - 1'.
            current_batch_end_index = min(
                current_batch_start_index + _batch_size,
                len(files_to_lint_for_python3_compatibility))
            current_files_to_lint = files_to_lint_for_python3_compatibility[
                current_batch_start_index:current_batch_end_index]
            if self.verbose_mode_enabled:
                python_utils.PRINT(
                    'Linting Python files for Python 3 compatibility %s to %s..'
                    % (current_batch_start_index + 1, current_batch_end_index))

            with linter_utils.redirect_stdout(stdout):
                # This line invokes Pylint and prints its output
                # to the target stdout.
                python_utils.PRINT('Messages for Python 3 support:')
                pylinter_for_python3 = lint.Run(current_files_to_lint +
                                                ['--py3k'],
                                                exit=False).linter

            if pylinter_for_python3.msg_status != 0:
                summary_message = stdout.getvalue()
                python_utils.PRINT(summary_message)
                summary_messages.append(summary_message)
                any_errors = True

            current_batch_start_index = current_batch_end_index

        if any_errors:
            summary_message = (
                '%s    Python linting for Python 3 compatibility failed' %
                _MESSAGE_TYPE_FAILED)
        else:
            summary_message = (
                '%s   %s Python files linted for Python 3 compatibility '
                '(%.1f secs)' % (_MESSAGE_TYPE_SUCCESS, num_py_files,
                                 (time.time() - start_time)))

        python_utils.PRINT(summary_message)
        summary_messages.append(summary_message)

        python_utils.PRINT(
            'Python linting for Python 3 compatibility finished.')
        return summary_messages
Exemplo n.º 15
0
    def _lint_html_files(self):
        """This function is used to check HTML files for linting errors."""
        node_path = os.path.join(common.NODE_PATH, 'bin', 'node')
        htmllint_path = os.path.join('node_modules', 'htmllint-cli', 'bin',
                                     'cli.js')

        error_summary = []
        total_error_count = 0
        summary_messages = []
        stdout = sys.stdout
        htmllint_cmd_args = [node_path, htmllint_path, '--rc=.htmllintrc']
        html_files_to_lint = self.html_filepaths
        if self.verbose_mode_enabled:
            python_utils.PRINT('Starting HTML linter...')
            python_utils.PRINT('----------------------------------------')
        python_utils.PRINT('')
        if not self.verbose_mode_enabled:
            python_utils.PRINT('Linting HTML files.')
        for filepath in html_files_to_lint:
            proc_args = htmllint_cmd_args + [filepath]
            if self.verbose_mode_enabled:
                python_utils.PRINT('Linting %s file' % filepath)
            with linter_utils.redirect_stdout(stdout):
                proc = subprocess.Popen(proc_args,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE)

                encoded_linter_stdout, _ = proc.communicate()
                linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
                # This line splits the output of the linter and extracts digits
                # from it. The digits are stored in a list. The second last
                # digit in the list represents the number of errors in the file.
                error_count = ([
                    int(s) for s in linter_stdout.split() if s.isdigit()
                ][-2])
                if error_count:
                    error_summary.append(error_count)
                    python_utils.PRINT(linter_stdout)
                    summary_messages.append(
                        self._get_trimmed_error_output(linter_stdout))

        with linter_utils.redirect_stdout(stdout):
            if self.verbose_mode_enabled:
                python_utils.PRINT('----------------------------------------')
            for error_count in error_summary:
                total_error_count += error_count
            total_files_checked = len(html_files_to_lint)
            if total_error_count:
                python_utils.PRINT('(%s files checked, %s errors found)' %
                                   (total_files_checked, total_error_count))
                summary_message = (
                    '%s HTML linting failed, fix the HTML files listed above'
                    '.' % linter_utils.FAILED_MESSAGE_PREFIX)
                summary_messages.append(summary_message)
            else:
                summary_message = ('%s HTML linting passed' %
                                   (linter_utils.SUCCESS_MESSAGE_PREFIX))
                summary_messages.append(summary_message)

            python_utils.PRINT('')
            python_utils.PRINT(summary_message)
            python_utils.PRINT('HTML linting finished.')
            python_utils.PRINT('')

        return summary_messages
Exemplo n.º 16
0
def main():
    """Install third-party libraries for Oppia."""
    setup.main(args=[])
    setup_gae.main(args=[])
    pip_dependencies = [
        ('coverage', common.COVERAGE_VERSION, common.OPPIA_TOOLS_DIR),
        ('pylint', common.PYLINT_VERSION, common.OPPIA_TOOLS_DIR),
        ('Pillow', common.PILLOW_VERSION, common.OPPIA_TOOLS_DIR),
        ('pylint-quotes', common.PYLINT_QUOTES_VERSION, common.OPPIA_TOOLS_DIR),
        ('webtest', common.WEBTEST_VERSION, common.OPPIA_TOOLS_DIR),
        ('isort', common.ISORT_VERSION, common.OPPIA_TOOLS_DIR),
        ('pycodestyle', common.PYCODESTYLE_VERSION, common.OPPIA_TOOLS_DIR),
        ('esprima', common.ESPRIMA_VERSION, common.OPPIA_TOOLS_DIR),
        ('PyGithub', common.PYGITHUB_VERSION, common.OPPIA_TOOLS_DIR),
        ('psutil', common.PSUTIL_VERSION, common.OPPIA_TOOLS_DIR),
        ('pip-tools', common.PIP_TOOLS_VERSION, common.OPPIA_TOOLS_DIR)
    ]

    for package, version, path in pip_dependencies:
        ensure_pip_library_is_installed(package, version, path)

    # Do a little surgery on configparser in pylint-1.9.4 to remove dependency
    # on ConverterMapping, which is not implemented in some Python
    # distributions.
    pylint_newlines = []
    with python_utils.open_file(PYLINT_CONFIGPARSER_FILEPATH, 'r') as f:
        for line in f.readlines():
            if line.strip() == 'ConverterMapping,':
                continue
            if line.strip().endswith('"ConverterMapping",'):
                pylint_newlines.append(
                    line[:line.find('"ConverterMapping"')] + '\n')
            else:
                pylint_newlines.append(line)
    with python_utils.open_file(PYLINT_CONFIGPARSER_FILEPATH, 'w+') as f:
        f.writelines(pylint_newlines)

    # Do similar surgery on configparser in pylint-quotes-0.1.8 to remove
    # dependency on ConverterMapping.
    pq_newlines = []
    with python_utils.open_file(PQ_CONFIGPARSER_FILEPATH, 'r') as f:
        for line in f.readlines():
            if line.strip() == 'ConverterMapping,':
                continue
            if line.strip() == '"ConverterMapping",':
                continue
            pq_newlines.append(line)
    with python_utils.open_file(PQ_CONFIGPARSER_FILEPATH, 'w+') as f:
        f.writelines(pq_newlines)

    # Download and install required JS and zip files.
    python_utils.PRINT('Installing third-party JS libraries and zip files.')
    install_third_party.main(args=[])

    if common.is_windows_os():
        tweak_yarn_executable()

    # Install third-party node modules needed for the build process.
    subprocess.check_call([get_yarn_command(), 'install', '--pure-lockfile'])

    # Install pre-commit script.
    python_utils.PRINT('Installing pre-commit hook for git')
    pre_commit_hook.main(args=['--install'])

    # TODO(#8112): Once pre_commit_linter is working correctly, this
    # condition should be removed.
    if not common.is_windows_os():
        # Install pre-push script.
        python_utils.PRINT('Installing pre-push hook for git')
        pre_push_hook.main(args=['--install'])
Exemplo n.º 17
0
    def handle_starttag(self, tag, attrs):
        """Handle start tag of a HTML line.

        Args:
            tag: str. start tag of a HTML line.
            attrs: list(str). list of attributes in the start tag.
        """
        line_number, column_number = self.getpos()
        # Check the indentation of the tag.
        expected_indentation = self.indentation_level * self.indentation_width
        tag_line = self.file_lines[line_number - 1].lstrip()
        opening_tag = '<' + tag

        # Check the indentation for content of style tag.
        if tag_line.startswith(opening_tag) and tag == 'style':
            # Getting next line after style tag.
            next_line = self.file_lines[line_number]
            next_line_expected_indentation = (self.indentation_level +
                                              1) * self.indentation_width
            next_line_column_number = len(next_line) - len(next_line.lstrip())

            if next_line_column_number != next_line_expected_indentation:
                summary_message = (
                    '%s --> Expected indentation '
                    'of %s, found indentation of %s '
                    'for content of %s tag on line %s ' %
                    (self.filepath, next_line_expected_indentation,
                     next_line_column_number, tag, line_number + 1))
                self.summary_messages.append(summary_message)
                python_utils.PRINT(summary_message)
                python_utils.PRINT('')
                self.failed = True

        if tag_line.startswith(opening_tag) and (column_number !=
                                                 expected_indentation):
            summary_message = ('%s --> Expected indentation '
                               'of %s, found indentation of %s '
                               'for %s tag on line %s ' %
                               (self.filepath, expected_indentation,
                                column_number, tag, line_number))
            self.summary_messages.append(summary_message)
            python_utils.PRINT(summary_message)
            python_utils.PRINT('')
            self.failed = True

        if tag not in self.void_elements:
            self.tag_stack.append((tag, line_number, column_number))
            self.indentation_level += 1

        if self.debug:
            python_utils.PRINT('DEBUG MODE: Start tag_stack')
            python_utils.PRINT(self.tag_stack)

        # Check the indentation of the attributes of the tag.
        indentation_of_first_attribute = (column_number + len(tag) + 2)
        starttag_text = self.get_starttag_text()

        # Check whether the values of all attributes are placed
        # in double quotes.
        for attr, value in attrs:
            # Not all attributes will have a value.
            # Therefore the check should run only for those
            # attributes which have a value.
            if value:
                # &quot; is rendered as a double quote by the parser.
                if '&quot;' in starttag_text:
                    expected_value = value
                    rendered_text = starttag_text.replace('&quot;', '"')
                else:
                    expected_value = '"' + value + '"'
                    rendered_text = starttag_text

                if not expected_value in rendered_text:
                    self.failed = True
                    summary_message = (
                        '%s --> The value %s of attribute '
                        '%s for the tag %s on line %s should '
                        'be enclosed within double quotes.' %
                        (self.filepath, value, attr, tag, line_number))
                    self.summary_messages.append(summary_message)
                    python_utils.PRINT(summary_message)
                    python_utils.PRINT('')

        for line_num, line in enumerate(starttag_text.splitlines()):
            if line_num == 0:
                continue

            leading_spaces_count = len(line) - len(line.lstrip())
            list_of_attrs = []

            for attr, _ in attrs:
                list_of_attrs.append(attr)

            if not line.lstrip().startswith(tuple(list_of_attrs)):
                continue
            if indentation_of_first_attribute != leading_spaces_count:
                line_num_of_error = line_number + line_num
                summary_message = (
                    '%s --> Attribute for tag %s on line '
                    '%s should align with the leftmost '
                    'attribute on line %s ' %
                    (self.filepath, tag, line_num_of_error, line_number))
                self.summary_messages.append(summary_message)
                python_utils.PRINT(summary_message)
                python_utils.PRINT('')
                self.failed = True
Exemplo n.º 18
0
def install_skulpt(parsed_args):
    """Download and install Skulpt. Skulpt is built using a Python script
    included within the Skulpt repository (skulpt.py). This script normally
    requires GitPython, however the patches to it below
    (with the fileinput.replace) lead to it no longer being required. The Python
    script is used to avoid having to manually recreate the Skulpt dist build
    process in install_third_party.py. Note that skulpt.py will issue a
    warning saying its dist command will not work properly without GitPython,
    but it does actually work due to the patches.
    """
    no_skulpt = parsed_args.nojsrepl or parsed_args.noskulpt

    python_utils.PRINT('Checking whether Skulpt is installed in third_party')
    if not os.path.exists(
            os.path.join(common.THIRD_PARTY_DIR,
                         'static/skulpt-0.10.0')) and not no_skulpt:
        if not os.path.exists(
                os.path.join(common.OPPIA_TOOLS_DIR, 'skulpt-0.10.0')):
            python_utils.PRINT('Downloading Skulpt')
            skulpt_filepath = os.path.join(common.OPPIA_TOOLS_DIR,
                                           'skulpt-0.10.0', 'skulpt',
                                           'skulpt.py')
            os.chdir(common.OPPIA_TOOLS_DIR)
            os.mkdir('skulpt-0.10.0')
            os.chdir('skulpt-0.10.0')
            subprocess.check_call(
                ['git', 'clone', 'https://github.com/skulpt/skulpt'])
            os.chdir('skulpt')

            # Use a specific Skulpt release.
            subprocess.check_call(['git', 'checkout', '0.10.0'])

            python_utils.PRINT('Compiling Skulpt')
            # The Skulpt setup function needs to be tweaked. It fails without
            # certain third party commands. These are only used for unit tests
            # and generating documentation and are not necessary when building
            # Skulpt.
            for line in fileinput.input(files=[skulpt_filepath], inplace=True):
                # Inside this loop the STDOUT will be redirected to the file,
                # skulpt.py. The end='' is needed to avoid double line breaks.
                python_utils.PRINT(line.replace('ret = test()', 'ret = 0'),
                                   end='')

            for line in fileinput.input(files=[skulpt_filepath], inplace=True):
                # Inside this loop the STDOUT will be redirected to the file,
                # skulpt.py. The end='' is needed to avoid double line breaks.
                python_utils.PRINT(line.replace('  doc()', '  pass#doc()'),
                                   end='')

            for line in fileinput.input(files=[skulpt_filepath], inplace=True):
                # This and the next command disable unit and compressed unit
                # tests for the compressed distribution of Skulpt. These
                # tests don't work on some Ubuntu environments and cause a
                # libreadline dependency issue.
                python_utils.PRINT(line.replace('ret = os.system(\'{0}',
                                                'ret = 0 #os.system(\'{0}'),
                                   end='')

            for line in fileinput.input(files=[skulpt_filepath], inplace=True):
                python_utils.PRINT(line.replace('ret = rununits(opt=True)',
                                                'ret = 0'),
                                   end='')

            # NB: Check call cannot be used because the commands above make the
            # git tree for skulpt dirty.
            subprocess.call([sys.executable, skulpt_filepath, 'dist'])

            # Return to the Oppia root folder.
            os.chdir(common.CURR_DIR)

        # Move the build directory to the static resources folder.
        shutil.copytree(
            os.path.join(common.OPPIA_TOOLS_DIR, 'skulpt-0.10.0/skulpt/dist/'),
            os.path.join(common.THIRD_PARTY_DIR, 'static/skulpt-0.10.0'))
    def _check_bad_patterns(self):
        """This function is used for detecting bad patterns."""
        if self.verbose_mode_enabled:
            python_utils.PRINT('Starting Pattern Checks')
            python_utils.PRINT('----------------------------------------')
        total_files_checked = 0
        total_error_count = 0
        summary_messages = []
        all_filepaths = [
            filepath for filepath in self.all_filepaths
            if not (filepath.endswith('general_purpose_linter.py') or
                    (filepath.endswith('general_purpose_linter_test.py')))
        ]
        failed = False
        stdout = sys.stdout
        with linter_utils.redirect_stdout(stdout):
            for filepath in all_filepaths:
                file_content = self.file_cache.readlines(filepath)
                total_files_checked += 1
                for pattern in BAD_PATTERNS:
                    if is_filepath_excluded_for_bad_patterns_check(
                            pattern, filepath):
                        continue
                    for line_num, line in enumerate(file_content):
                        if pattern in line:
                            failed = True
                            summary_message = (
                                '%s --> Line %s: %s' %
                                (filepath, line_num + 1,
                                 BAD_PATTERNS[pattern]['message']))
                            summary_messages.append(summary_message)
                            python_utils.PRINT(summary_message)
                            python_utils.PRINT('')
                            total_error_count += 1

                for regexp in BAD_PATTERNS_REGEXP:
                    bad_pattern_check_failed, bad_pattern_summary_messages = (
                        check_bad_pattern_in_file(filepath, file_content,
                                                  regexp))
                    if bad_pattern_check_failed:
                        summary_messages.extend(bad_pattern_summary_messages)
                        total_error_count += 1

                (file_type_specific_bad_pattern_failed, temp_count,
                 bad_pattern_summary_messages) = (
                     check_file_type_specific_bad_pattern(
                         filepath, file_content))
                failed = (failed or file_type_specific_bad_pattern_failed
                          or bad_pattern_check_failed)
                total_error_count += temp_count
                summary_messages.extend(bad_pattern_summary_messages)

                if filepath == 'constants.ts':
                    for pattern in REQUIRED_STRINGS_CONSTANTS:
                        if pattern not in file_content:
                            failed = True
                            summary_message = (
                                '%s --> %s' %
                                (filepath, REQUIRED_STRINGS_CONSTANTS[pattern]
                                 ['message']))
                            python_utils.PRINT(summary_message)
                            summary_messages.append(summary_message)
                            python_utils.PRINT('')
                            total_error_count += 1
            if failed:
                summary_message = ('%s Pattern check failed, see errors above '
                                   'for patterns that should be removed.' %
                                   (linter_utils.FAILED_MESSAGE_PREFIX))
                summary_messages.append(summary_message)
            else:
                summary_message = '%s Pattern checks passed' % (
                    linter_utils.SUCCESS_MESSAGE_PREFIX)
                summary_messages.append(summary_message)

            python_utils.PRINT('')
            if total_files_checked == 0:
                python_utils.PRINT('There are no files to be checked.')
            else:
                python_utils.PRINT('(%s files checked, %s errors found)' %
                                   (total_files_checked, total_error_count))
                python_utils.PRINT(summary_message)
        return summary_messages
Exemplo n.º 20
0
def main(args=None):
    """Install third-party libraries for Oppia."""
    parsed_args = _PARSER.parse_args(args=args)

    setup.main(args=[])
    setup_gae.main(args=[])
    pip_dependencies = [
        ('coverage', common.COVERAGE_VERSION, common.OPPIA_TOOLS_DIR),
        ('pylint', '1.9.4', common.OPPIA_TOOLS_DIR),
        ('Pillow', '6.0.0', common.OPPIA_TOOLS_DIR),
        ('pylint-quotes', '0.1.8', common.OPPIA_TOOLS_DIR),
        ('webtest', '2.0.33', common.OPPIA_TOOLS_DIR),
        ('isort', '4.3.20', common.OPPIA_TOOLS_DIR),
        ('pycodestyle', '2.5.0', common.OPPIA_TOOLS_DIR),
        ('esprima', '4.0.1', common.OPPIA_TOOLS_DIR),
        ('browsermob-proxy', '0.8.0', common.OPPIA_TOOLS_DIR),
        ('selenium', '3.13.0', common.OPPIA_TOOLS_DIR),
        ('PyGithub', '1.43.7', common.OPPIA_TOOLS_DIR),
        ('pygsheets', '2.0.2', common.OPPIA_TOOLS_DIR),
    ]

    for package, version, path in pip_dependencies:
        ensure_pip_library_is_installed(package, version, path)

    # Do a little surgery on configparser in pylint-1.9.4 to remove dependency
    # on ConverterMapping, which is not implemented in some Python
    # distributions.
    pylint_newlines = []
    with python_utils.open_file(PYLINT_CONFIGPARSER_FILEPATH, 'r') as f:
        for line in f.readlines():
            if line.strip() == 'ConverterMapping,':
                continue
            if line.strip().endswith('"ConverterMapping",'):
                pylint_newlines.append(line[:line.find('"ConverterMapping"')] +
                                       '\n')
            else:
                pylint_newlines.append(line)
    with python_utils.open_file(PYLINT_CONFIGPARSER_FILEPATH, 'w+') as f:
        f.writelines(pylint_newlines)

    # Do similar surgery on configparser in pylint-quotes-0.1.8 to remove
    # dependency on ConverterMapping.
    pq_newlines = []
    with python_utils.open_file(PQ_CONFIGPARSER_FILEPATH, 'r') as f:
        for line in f.readlines():
            if line.strip() == 'ConverterMapping,':
                continue
            if line.strip() == '"ConverterMapping",':
                continue
            pq_newlines.append(line)
    with python_utils.open_file(PQ_CONFIGPARSER_FILEPATH, 'w+') as f:
        f.writelines(pq_newlines)

    # Download and install required JS and zip files.
    python_utils.PRINT('Installing third-party JS libraries and zip files.')
    install_third_party.main(args=[])

    if common.is_windows_os():
        tweak_yarn_executable()

    # Install third-party node modules needed for the build process.
    subprocess.check_call([get_yarn_command()])

    install_skulpt(parsed_args)

    # Install pre-commit script.
    python_utils.PRINT('Installing pre-commit hook for git')
    pre_commit_hook.main(args=['--install'])

    # TODO(#8112): Once pre_commit_linter is working correctly, this
    # condition should be removed.
    if not common.is_windows_os():
        # Install pre-push script.
        python_utils.PRINT('Installing pre-push hook for git')
        pre_push_hook.main(args=['--install'])
Exemplo n.º 21
0
def main(args=None):
    """Starts up a development server running Oppia."""
    parsed_args = _PARSER.parse_args(args=args)

    # Runs cleanup function on exit.
    atexit.register(cleanup)

    # Check that there isn't a server already running.
    if common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
        common.print_each_string_after_two_new_lines([
            'WARNING',
            'Could not start new server. There is already an existing server',
            'running at port %s.'
            % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])

    clear_datastore_arg = (
        '' if parsed_args.save_datastore else '--clear_datastore=true')
    enable_console_arg = (
        '--enable_console=true' if parsed_args.enable_console else '')
    no_auto_restart = (
        '--automatic_restart=no' if parsed_args.no_auto_restart else '')

    if parsed_args.prod_env:
        constants_env_variable = '"DEV_MODE": false'
        for line in fileinput.input(
                files=[os.path.join('assets', 'constants.ts')], inplace=True):
            # Inside this loop the STDOUT will be redirected to the file,
            # constants.ts. The end='' is needed to avoid double line breaks.
            python_utils.PRINT(
                re.sub(
                    r'"DEV_MODE": .*', constants_env_variable, line), end='')
        build.main(args=['--prod_env', '--enable_watcher'])
        app_yaml_filepath = 'app.yaml'
    else:
        constants_env_variable = '"DEV_MODE": true'
        for line in fileinput.input(
                files=[os.path.join('assets', 'constants.ts')], inplace=True):
            # Inside this loop the STDOUT will be redirected to the file,
            # constants.ts. The end='' is needed to avoid double line breaks.
            python_utils.PRINT(
                re.sub(
                    r'"DEV_MODE": .*', constants_env_variable, line), end='')
        build.main(args=['--enable_watcher'])
        app_yaml_filepath = 'app_dev.yaml'

    # Set up a local dev instance.
    # TODO(sll): do this in a new shell.
    # To turn emailing on, add the option '--enable_sendmail=yes' and change the
    # relevant settings in feconf.py. Be careful with this -- you do not want to
    # spam people accidentally.
    background_processes = []
    if not parsed_args.prod_env:
        if common.OS_NAME == 'Windows':
            node_bin_path = [common.NODE_PATH, 'node']
        else:
            node_bin_path = [common.NODE_PATH, 'bin', 'node']
        background_processes.append(subprocess.Popen([
            os.path.join(*node_bin_path),
            os.path.join(common.NODE_MODULES_PATH, 'gulp', 'bin', 'gulp.js'),
            'watch']))

        # In prod mode webpack is launched through scripts/build.py
        python_utils.PRINT('Compiling webpack...')
        background_processes.append(subprocess.Popen([
            'node',
            os.path.join(
                common.NODE_MODULES_PATH, 'webpack', 'bin', 'webpack.js'),
            '--config', 'webpack.dev.config.ts', '--watch']))
        # Give webpack few seconds to do the initial compilation.
        time.sleep(10)

    python_utils.PRINT('Starting GAE development server')
    background_processes.append(subprocess.Popen(
        'python %s/dev_appserver.py %s %s %s --admin_host 0.0.0.0 --admin_port '
        '8000 --host 0.0.0.0 --port %s --skip_sdk_update_check true %s' % (
            common.GOOGLE_APP_ENGINE_HOME, clear_datastore_arg,
            enable_console_arg, no_auto_restart,
            python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
            app_yaml_filepath), shell=True))

    # Wait for the servers to come up.
    while not common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
        time.sleep(1)

    # Launch a browser window.
    if common.OS_NAME == 'Linux' and not parsed_args.no_browser:
        detect_virtualbox_pattern = re.compile('.*VBOX.*')
        if list(filter(
                detect_virtualbox_pattern.match,
                os.listdir('/dev/disk/by-id/'))):
            common.print_each_string_after_two_new_lines([
                'INFORMATION',
                'Setting up a local development server. You can access this '
                'server',
                'by navigating to localhost:%s in a browser window.'
                % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
        else:
            common.print_each_string_after_two_new_lines([
                'INFORMATION',
                'Setting up a local development server at localhost:%s. '
                % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
                'Opening a default browser window pointing to this server'])
            time.sleep(5)
            background_processes.append(
                subprocess.Popen([
                    'xdg-open', 'http://localhost:%s/'
                    % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))
    elif common.OS_NAME == 'Darwin' and not parsed_args.no_browser:
        common.print_each_string_after_two_new_lines([
            'INFORMATION',
            'Setting up a local development server at localhost:%s. '
            % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
            'Opening a default browser window pointing to this server.'])
        time.sleep(5)
        background_processes.append(
            subprocess.Popen([
                'open', 'http://localhost:%s/'
                % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))
    else:
        common.print_each_string_after_two_new_lines([
            'INFORMATION',
            'Setting up a local development server. You can access this server',
            'by navigating to localhost:%s in a browser window.'
            % python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])

    python_utils.PRINT('Done!')

    for process in background_processes:
        process.wait()
Exemplo n.º 22
0
def check_coverage_changes():
    """Checks if the denylist for not fully covered files needs to be changed
    by:
    - File renaming
    - File deletion

    Raises:
        Exception. LCOV_FILE_PATH doesn't exist.
    """
    if not os.path.exists(LCOV_FILE_PATH):
        raise Exception('Expected lcov file to be available at {}, but the'
                        ' file does not exist.'.format(LCOV_FILE_PATH))

    stanzas = get_stanzas_from_lcov_file()
    remaining_denylisted_files = list(NOT_FULLY_COVERED_FILENAMES)
    errors = ''

    for stanza in stanzas:
        file_name = stanza.file_name
        total_lines = stanza.total_lines
        covered_lines = stanza.covered_lines
        if any(
                fnmatch.fnmatch(stanza.file_path, pattern)
                for pattern in EXCLUDED_DIRECTORIES):
            continue
        if file_name not in remaining_denylisted_files:
            if total_lines != covered_lines:
                errors += (
                    '\033[1m{}\033[0m seems to be not completely tested.'
                    ' Make sure it\'s fully covered.\n'.format(file_name))
        else:
            if total_lines == covered_lines:
                errors += ('\033[1m{}\033[0m seems to be fully covered!'
                           ' Before removing it manually from the denylist'
                           ' in the file'
                           ' scripts/check_frontend_test_coverage.py, please'
                           ' make sure you\'ve followed the unit tests rules'
                           ' correctly on:'
                           ' https://github.com/oppia/oppia/wiki/Frontend'
                           '-unit-tests-guide#rules\n'.format(file_name))

            remaining_denylisted_files.remove(file_name)

    if remaining_denylisted_files:
        for test_name in remaining_denylisted_files:
            errors += ('\033[1m{}\033[0m is in the frontend test coverage'
                       ' denylist but it doesn\'t exist anymore. If you have'
                       ' renamed it, please make sure to remove the old file'
                       ' name and add the new file name in the denylist in'
                       ' the file scripts/check_frontend_test_coverage.py.\n'.
                       format(test_name))

    if errors:
        python_utils.PRINT('------------------------------------')
        python_utils.PRINT('Frontend Coverage Checks Not Passed.')
        python_utils.PRINT('------------------------------------')
        sys.exit(errors)
    else:
        python_utils.PRINT('------------------------------------')
        python_utils.PRINT('All Frontend Coverage Checks Passed.')
        python_utils.PRINT('------------------------------------')

    check_not_fully_covered_filenames_list_is_sorted()
Exemplo n.º 23
0
def _check_third_party_size():
    """Checks if the third-party size limit has been exceeded."""
    skip_files_list = _get_skip_files_list()
    number_of_files_in_third_party = _check_size_in_dir(
        THIRD_PARTY_PATH, skip_files_list)
    python_utils.PRINT('')
    python_utils.PRINT(
        '------------------------------------------------------')
    python_utils.PRINT('    Number of files in third-party folder: %d' %
                       (number_of_files_in_third_party))
    python_utils.PRINT('')
    if number_of_files_in_third_party > THIRD_PARTY_SIZE_LIMIT:
        python_utils.PRINT(
            '    ERROR: The third-party folder size exceeded the %d files'
            ' limit.' % THIRD_PARTY_SIZE_LIMIT)
        python_utils.PRINT(
            '------------------------------------------------------')
        python_utils.PRINT('')
        sys.exit(1)
    else:
        python_utils.PRINT(
            '    The size of third-party folder is within the limits.')
        python_utils.PRINT(
            '------------------------------------------------------')
        python_utils.PRINT('')
        python_utils.PRINT('Done!')
        python_utils.PRINT('')
Exemplo n.º 24
0
def check_travis_and_circleci_tests(current_branch_name):
    """Checks if all travis and circleci tests are passing on release/test
    branch.

    Args:
        current_branch_name: str. The name of current branch.

    Raises:
        Exception. The latest commit on release/test branch locally does not
            match the latest commit on local fork or upstream.
        Exception. The travis or circleci tests are failing on release/test
            branch.
    """
    local_sha = subprocess.check_output(
        ['git', 'rev-parse', current_branch_name])
    origin_sha = subprocess.check_output(
        ['git', 'rev-parse',
         'origin/%s' % current_branch_name])
    upstream_sha = subprocess.check_output([
        'git', 'rev-parse',
        '%s/%s' % (common.get_remote_alias(
            release_constants.REMOTE_URL), current_branch_name)
    ])
    if local_sha != origin_sha:
        raise Exception('The latest commit on release branch locally does '
                        'not match the latest commit on your local fork.')
    if local_sha != upstream_sha:
        raise Exception('The latest commit on release branch locally does '
                        'not match the latest commit on Oppia repo.')

    python_utils.PRINT('\nEnter your GitHub username.\n')
    github_username = python_utils.INPUT().lower().strip()

    travis_url = 'https://travis-ci.org/%s/oppia/branches' % github_username
    circleci_url = 'https://circleci.com/gh/%s/workflows/oppia' % (
        github_username)

    try:
        python_utils.url_open(travis_url)
    except Exception:
        travis_url = 'https://travis-ci.com/oppia/oppia/branches'

    try:
        python_utils.url_open(circleci_url)
    except Exception:
        circleci_url = 'https://circleci.com/gh/oppia/workflows/oppia'

    common.open_new_tab_in_browser_if_possible(travis_url)
    python_utils.PRINT('Are all travis tests passing on branch %s?\n' %
                       current_branch_name)
    travis_tests_passing = python_utils.INPUT().lower()
    if travis_tests_passing not in release_constants.AFFIRMATIVE_CONFIRMATIONS:
        raise Exception('Please fix the travis tests before deploying.')

    common.open_new_tab_in_browser_if_possible(circleci_url)
    python_utils.PRINT('Are all circleci tests passing on branch %s?\n' %
                       current_branch_name)
    circleci_tests_passing = python_utils.INPUT().lower()
    if circleci_tests_passing not in (
            release_constants.AFFIRMATIVE_CONFIRMATIONS):
        raise Exception('Please fix the circleci tests before deploying.')
Exemplo n.º 25
0
def managed_process(command_args,
                    human_readable_name='Process',
                    shell=False,
                    timeout_secs=60,
                    **popen_kwargs):
    """Context manager for starting and stopping a process gracefully.

    Args:
        command_args: list(int|str). A sequence of program arguments, where the
            program to execute is the first item. Ints are allowed in order to
            accomodate e.g. port numbers.
        human_readable_name: str. The human-readable name of the process. Used
            by the function's logging logic to improve readability.
        shell: bool. Whether the command should be run inside of its own shell.
            WARNING: Executing shell commands that incorporate unsanitized input
            from an untrusted source makes a program vulnerable to
            [shell injection](https://w.wiki/_Ac2), a serious security flaw
            which can result in arbitrary command execution. For this reason,
            the use of `shell=True` is **strongly discouraged** in cases where
            the command string is constructed from external input.
        timeout_secs: int. The time allotted for the managed process and its
            descendants to terminate themselves. After the timeout, any
            remaining processes will be killed abruptly.
        **popen_kwargs: dict(str: *). Same kwargs as `subprocess.Popen`.

    Yields:
        psutil.Process. The process managed by the context manager.
    """
    # TODO(#11549): Move this to top of the file.
    if common.PSUTIL_DIR not in sys.path:
        sys.path.insert(1, common.PSUTIL_DIR)
    import psutil

    get_proc_info = lambda p: ('%s(name="%s", pid=%d)' %
                               (human_readable_name, p.name(), p.pid)
                               if p.is_running() else '%s(pid=%d)' %
                               (human_readable_name, p.pid))

    stripped_args = (('%s' % arg).strip() for arg in command_args)
    non_empty_args = (s for s in stripped_args if s)

    command = ' '.join(non_empty_args) if shell else list(non_empty_args)
    human_readable_command = command if shell else ' '.join(command)
    msg = 'Starting new %s: %s' % (human_readable_name, human_readable_command)
    python_utils.PRINT(msg)
    popen_proc = psutil.Popen(command, shell=shell, **popen_kwargs)

    try:
        yield popen_proc
    finally:
        python_utils.PRINT('Stopping %s...' % get_proc_info(popen_proc))
        procs_still_alive = [popen_proc]
        try:
            if popen_proc.is_running():
                # Children must be terminated before the parent, otherwise they
                # may become zombie processes.
                procs_still_alive = (popen_proc.children(recursive=True) +
                                     [popen_proc])

            procs_to_kill = []
            for proc in procs_still_alive:
                if proc.is_running():
                    logging.info('Terminating %s...' % get_proc_info(proc))
                    proc.terminate()
                    procs_to_kill.append(proc)
                else:
                    logging.info('%s has already ended.' % get_proc_info(proc))

            procs_gone, procs_still_alive = (psutil.wait_procs(
                procs_to_kill, timeout=timeout_secs))
            for proc in procs_still_alive:
                logging.warn('Forced to kill %s!' % get_proc_info(proc))
                proc.kill()
            for proc in procs_gone:
                logging.info('%s has already ended.' % get_proc_info(proc))
        except Exception:
            # NOTE: Raising an exception while exiting a context manager is bad
            # practice, so we log and suppress exceptions instead.
            logging.exception('Failed to stop %s gracefully!' %
                              get_proc_info(popen_proc))
Exemplo n.º 26
0
def execute_deployment():
    """Executes the deployment process after doing the prerequisite checks.

    Raises:
        Exception. App name is invalid.
        Exception. Custom version is used with production app.
        Exception. App name is not specified.
        Exception. The deployment script is not run from a release or test
            branch.
        Exception. The deployment script is run for prod server from a test
            branch.
        Exception. Current release version has '.' character.
        Exception. Last commit message is invalid.
        Exception. The mailgun API key is not added before deployment.
        Exception. Could not find third party directory.
        Exception. Invalid directory accessed during deployment.
    """
    parsed_args = _PARSER.parse_args()
    custom_version = None
    if parsed_args.app_name:
        app_name = parsed_args.app_name
        if app_name not in [APP_NAME_OPPIASERVER, APP_NAME_OPPIATESTSERVER
                            ] and ('migration' not in app_name):
            raise Exception('Invalid app name: %s' % app_name)
        if parsed_args.version and app_name == APP_NAME_OPPIASERVER:
            raise Exception('Cannot use custom version with production app.')
        # Note that custom_version may be None.
        custom_version = parsed_args.version
    else:
        raise Exception('No app name specified.')

    current_branch_name = common.get_current_branch_name()

    release_dir_name = 'deploy-%s-%s-%s' % (
        '-'.join('-'.join(app_name.split('.')).split(':')),
        current_branch_name, CURRENT_DATETIME.strftime('%Y%m%d-%H%M%S'))
    release_dir_path = os.path.join(os.getcwd(), '..', release_dir_name)

    deploy_data_path = os.path.join(os.getcwd(), os.pardir, 'release-scripts',
                                    'deploy_data', app_name)

    install_third_party_libs.main()

    if not (common.is_current_branch_a_release_branch() or
            (common.is_current_branch_a_test_branch())):
        raise Exception(
            'The deployment script must be run from a release or test branch.')
    if common.is_current_branch_a_test_branch() and (app_name in [
            APP_NAME_OPPIASERVER, APP_NAME_OPPIATESTSERVER
    ]):
        raise Exception('Test branch can only be deployed to backup server.')
    if custom_version is not None:
        current_release_version = custom_version.replace(DOT_CHAR, HYPHEN_CHAR)
    else:
        current_release_version = current_branch_name[
            len(common.RELEASE_BRANCH_NAME_PREFIX):].replace(
                DOT_CHAR, HYPHEN_CHAR)

    # This is required to compose the release_version_library_url
    # (defined in switch_version function) correctly.
    if '.' in current_release_version:
        raise Exception('Current release version has \'.\' character.')

    assert len(current_release_version) <= 25, (
        'The length of the "version" arg should be less than or '
        'equal to 25 characters.')

    # Do prerequisite checks.
    common.require_cwd_to_be_oppia()
    common.ensure_release_scripts_folder_exists_and_is_up_to_date()
    gcloud_adapter.require_gcloud_to_be_available()
    try:
        if app_name == APP_NAME_OPPIASERVER:
            check_release_doc()
            release_version_number = common.get_current_release_version_number(
                current_branch_name)
            last_commit_message = subprocess.check_output(
                'git log -1 --pretty=%B'.split())
            personal_access_token = common.get_personal_access_token()
            if not common.is_current_branch_a_hotfix_branch():
                if not last_commit_message.startswith(
                        'Update authors and changelog for v%s' %
                    (release_version_number)):
                    raise Exception('Invalid last commit message: %s.' %
                                    (last_commit_message))
                g = github.Github(personal_access_token)
                repo = g.get_organization('oppia').get_repo('oppia')
                common.check_blocking_bug_issue_count(repo)
                common.check_prs_for_current_release_are_released(repo)

            check_travis_and_circleci_tests(current_branch_name)
            update_configs.main(personal_access_token)
            with python_utils.open_file(common.FECONF_PATH, 'r') as f:
                feconf_contents = f.read()
                if ('MAILGUN_API_KEY' not in feconf_contents
                        or 'MAILGUN_API_KEY = None' in feconf_contents):
                    raise Exception(
                        'The mailgun API key must be added before deployment.')

        if not os.path.exists(THIRD_PARTY_DIR):
            raise Exception(
                'Could not find third_party directory at %s. Please run '
                'install_third_party_libs.py prior to running this script.' %
                THIRD_PARTY_DIR)

        current_git_revision = subprocess.check_output(
            ['git', 'rev-parse', 'HEAD']).strip()

        # Create a folder in which to save the release candidate.
        python_utils.PRINT('Ensuring that the release directory parent exists')
        common.ensure_directory_exists(os.path.dirname(release_dir_path))

        # Copy files to the release directory. Omits the .git subfolder.
        python_utils.PRINT('Copying files to the release directory')
        shutil.copytree(os.getcwd(),
                        release_dir_path,
                        ignore=shutil.ignore_patterns('.git'))

        # Change the current directory to the release candidate folder.
        with common.CD(release_dir_path):
            if not os.getcwd().endswith(release_dir_name):
                raise Exception(
                    'Invalid directory accessed during deployment: %s' %
                    os.getcwd())

            python_utils.PRINT('Changing directory to %s' % os.getcwd())

            python_utils.PRINT('Preprocessing release...')
            preprocess_release(app_name, deploy_data_path)

            update_and_check_indexes(app_name)
            build_scripts(parsed_args.maintenance_mode)
            deploy_application_and_write_log_entry(app_name,
                                                   current_release_version,
                                                   current_git_revision)

            python_utils.PRINT('Returning to oppia/ root directory.')

        switch_version(app_name, current_release_version)
        flush_memcache(app_name)
        check_breakage(app_name, current_release_version)

        python_utils.PRINT('Done!')
    finally:
        common.run_cmd([
            'git', 'checkout', '--', update_configs.LOCAL_FECONF_PATH,
            update_configs.LOCAL_CONSTANTS_PATH, APP_DEV_YAML_PATH
        ])
Exemplo n.º 27
0
def main(args=None):
    """Run the tests."""
    parsed_args = _PARSER.parse_args(args=args)

    setup.main(args=[])
    setup_gae.main(args=[])

    for directory in DIRS_TO_ADD_TO_SYS_PATH:
        if not os.path.exists(os.path.dirname(directory)):
            raise Exception('Directory %s does not exist.' % directory)
        sys.path.insert(0, directory)

    import dev_appserver
    dev_appserver.fix_sys_path()

    if parsed_args.generate_coverage_report:
        python_utils.PRINT('Checking whether coverage is installed in %s' %
                           common.OPPIA_TOOLS_DIR)
        if not os.path.exists(
                os.path.join(common.OPPIA_TOOLS_DIR, 'coverage-4.5.4')):
            python_utils.PRINT('Installing coverage')
            install_third_party_libs.pip_install(
                'coverage', '4.5.4',
                os.path.join(common.OPPIA_TOOLS_DIR, 'coverage-4.5.4'))

    build.main(args=[])

    python_utils.PRINT('Compiling webpack...')
    subprocess.call([
        os.path.join(common.NODE_MODULES_PATH, 'webpack', 'bin', 'webpack.js'),
        '--config', 'webpack.dev.config.ts'
    ])

    if parsed_args.test_target and parsed_args.test_path:
        raise Exception('At most one of test_path and test_target '
                        'should be specified.')
    if parsed_args.test_path and '.' in parsed_args.test_path:
        raise Exception('The delimiter in test_path should be a slash (/)')
    if parsed_args.test_target and '/' in parsed_args.test_target:
        raise Exception('The delimiter in test_target should be a dot (.)')

    if parsed_args.test_target:
        if '_test' in parsed_args.test_target:
            all_test_targets = [parsed_args.test_target]
        else:
            python_utils.PRINT('')
            python_utils.PRINT(
                '---------------------------------------------------------')
            python_utils.PRINT(
                'WARNING : test_target flag should point to the test file.')
            python_utils.PRINT(
                '---------------------------------------------------------')
            python_utils.PRINT('')
            time.sleep(3)
            python_utils.PRINT('Redirecting to its corresponding test file...')
            all_test_targets = [parsed_args.test_target + '_test']
    else:
        include_load_tests = not parsed_args.exclude_load_tests
        all_test_targets = _get_all_test_targets(
            test_path=parsed_args.test_path,
            include_load_tests=include_load_tests)

    # Prepare tasks.
    task_to_taskspec = {}
    tasks = []
    for test_target in all_test_targets:
        test = TestingTaskSpec(test_target,
                               parsed_args.generate_coverage_report)
        task = TaskThread(test.run, parsed_args.verbose, name=test_target)
        task_to_taskspec[task] = test
        tasks.append(task)

    task_execution_failed = False
    try:
        _execute_tasks(tasks)
    except Exception:
        task_execution_failed = True

    for task in tasks:
        if task.exception:
            log(python_utils.convert_to_bytes(task.exception))

    python_utils.PRINT('')
    python_utils.PRINT('+------------------+')
    python_utils.PRINT('| SUMMARY OF TESTS |')
    python_utils.PRINT('+------------------+')
    python_utils.PRINT('')

    # Check we ran all tests as expected.
    total_count = 0
    total_errors = 0
    total_failures = 0
    for task in tasks:
        spec = task_to_taskspec[task]

        if not task.finished:
            python_utils.PRINT('CANCELED  %s' % spec.test_target)
            test_count = 0
        elif 'No tests were run' in python_utils.convert_to_bytes(
                task.exception):
            python_utils.PRINT('ERROR     %s: No tests found.' %
                               spec.test_target)
            test_count = 0
        elif task.exception:
            exc_str = python_utils.convert_to_bytes(task.exception)
            python_utils.PRINT(exc_str[exc_str.find('='):exc_str.rfind('-')])

            tests_failed_regex_match = re.search(
                r'Test suite failed: ([0-9]+) tests run, ([0-9]+) errors, '
                '([0-9]+) failures',
                python_utils.convert_to_bytes(task.exception))

            try:
                test_count = int(tests_failed_regex_match.group(1))
                errors = int(tests_failed_regex_match.group(2))
                failures = int(tests_failed_regex_match.group(3))
                total_errors += errors
                total_failures += failures
                python_utils.PRINT('FAILED    %s: %s errors, %s failures' %
                                   (spec.test_target, errors, failures))
            except AttributeError:
                # There was an internal error, and the tests did not run (The
                # error message did not match `tests_failed_regex_match`).
                test_count = 0
                total_errors += 1
                python_utils.PRINT('')
                python_utils.PRINT(
                    '------------------------------------------------------')
                python_utils.PRINT('    WARNING: FAILED TO RUN %s' %
                                   spec.test_target)
                python_utils.PRINT('')
                python_utils.PRINT(
                    '    This is most likely due to an import error.')
                python_utils.PRINT(
                    '------------------------------------------------------')
        else:
            try:
                tests_run_regex_match = re.search(
                    r'Ran ([0-9]+) tests? in ([0-9\.]+)s', task.output)
                test_count = int(tests_run_regex_match.group(1))
                test_time = float(tests_run_regex_match.group(2))
                python_utils.PRINT('SUCCESS   %s: %d tests (%.1f secs)' %
                                   (spec.test_target, test_count, test_time))
            except Exception:
                python_utils.PRINT('An unexpected error occurred. '
                                   'Task output:\n%s' % task.output)

        total_count += test_count

    python_utils.PRINT('')
    if total_count == 0:
        raise Exception('WARNING: No tests were run.')
    else:
        python_utils.PRINT('Ran %s test%s in %s test class%s.' %
                           (total_count, '' if total_count == 1 else 's',
                            len(tasks), '' if len(tasks) == 1 else 'es'))

        if total_errors or total_failures:
            python_utils.PRINT('(%s ERRORS, %s FAILURES)' %
                               (total_errors, total_failures))
        else:
            python_utils.PRINT('All tests passed.')

    if task_execution_failed:
        raise Exception('Task execution failed.')
    elif total_errors or total_failures:
        raise Exception('%s errors, %s failures' %
                        (total_errors, total_failures))

    if parsed_args.generate_coverage_report:
        subprocess.call(['python', COVERAGE_PATH, 'combine'])
        subprocess.call([
            'python', COVERAGE_PATH, 'report',
            '--omit="%s*","third_party/*","/usr/share/*"' %
            common.OPPIA_TOOLS_DIR, '--show-missing'
        ])

        python_utils.PRINT('Generating xml coverage report...')
        subprocess.call(['python', COVERAGE_PATH, 'xml'])

    python_utils.PRINT('')
    python_utils.PRINT('Done!')
Exemplo n.º 28
0
def main(args=None):
    """Runs the script to setup Oppia."""
    unused_parsed_args = _PARSER.parse_args(args=args)
    test_python_version()

    # The second option allows this script to also be run from deployment
    # folders.
    if not os.getcwd().endswith('oppia') and not os.getcwd().endswith(
            'deploy-'):
        python_utils.PRINT('')
        python_utils.PRINT(
            'WARNING   This script should be run from the oppia/ root folder.')
        python_utils.PRINT('')
        raise Exception

    # Set COMMON_DIR to the absolute path of the directory above OPPIA_DIR. This
    # is necessary becaue COMMON_DIR (or subsequent variables which refer to it)
    # may use it in a situation where relative paths won't work as expected(such
    # as $PYTHONPATH).
    create_directory(common.OPPIA_TOOLS_DIR)
    create_directory(common.THIRD_PARTY_DIR)
    create_directory(common.NODE_MODULES_PATH)

    os_info = os.uname()
    if os_info[0] != 'Darwin' and os_info[0] != 'Linux':
        # Node is a requirement for all installation scripts. Here, we check if
        # the OS supports node.js installation; if not, we exit with an error.
        common.print_each_string_after_two_new_lines([
            'WARNING: Unsupported OS for installation of node.js.',
            'If you are running this script on Windows, see the instructions',
            'here regarding installation of node.js:',
            'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Windows'
            '%29',
            'STATUS: Installation completed except for node.js. Exiting.'
        ])
        raise Exception

    # Download and install node.js.
    python_utils.PRINT('Checking if node.js is installed in %s' %
                       common.OPPIA_TOOLS_DIR)
    if not os.path.exists(common.NODE_PATH):
        python_utils.PRINT('Installing Node.js')
        if os_info[0] == 'Darwin':
            if os_info[4] == 'x86_64':
                node_file_name = 'node-v10.15.3-darwin-x64'
            else:
                node_file_name = 'node-v10.15.3-darwin-x86'
        elif os_info[0] == 'Linux':
            if os_info[4] == 'x86_64':
                node_file_name = 'node-v10.15.3-linux-x64'
            else:
                node_file_name = 'node-v10.15.3-linux-x86'

        python_utils.url_retrieve(
            'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,
            filename='node-download.tgz')
        tar = tarfile.open(name='node-download.tgz')
        tar.extractall(path=common.OPPIA_TOOLS_DIR)
        tar.close()
        os.remove('node-download.tgz')
        os.rename(os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),
                  common.NODE_PATH)

    # Change ownership of node_modules.
    # Note: on some machines, these commands seem to take quite a long time.
    common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)
    common.recursive_chmod(common.NODE_MODULES_PATH, 0o744)

    # Download and install yarn.
    python_utils.PRINT('Checking if yarn is installed in %s' %
                       common.OPPIA_TOOLS_DIR)
    if not os.path.exists(common.YARN_PATH):
        python_utils.PRINT('Removing package-lock.json')
        clean.delete_file('package-lock.json')
        common.print_each_string_after_two_new_lines([
            'Installing yarn',
            'WARNING: Please note that Oppia uses Yarn to manage node packages',
            'do *NOT* use npm. For more information on how to use yarn,',
            'visit https://yarnpkg.com/en/docs/usage.'
        ])

        # NB: Update .yarnrc if the yarn version below is changed.
        yarn_version = 'v1.17.3'
        yarn_file_name = 'yarn-%s.tar.gz' % yarn_version
        python_utils.url_retrieve(
            'https://github.com/yarnpkg/yarn/releases/download/%s/%s' %
            (yarn_version, yarn_file_name),
            filename=yarn_file_name)
        tar = tarfile.open(name=yarn_file_name)
        tar.extractall(path=common.OPPIA_TOOLS_DIR)
        tar.close()
        os.remove(yarn_file_name)

    # Adjust path to support the default Chrome locations for Unix, Windows and
    # Mac OS.
    if os.environ.get('TRAVIS'):
        chrome_bin = '/usr/bin/chromium-browser'
    elif os.path.isfile('/usr/bin/google-chrome'):
        # Unix.
        chrome_bin = '/usr/bin/google-chrome'
    elif os.path.isfile('/usr/bin/chromium-browser'):
        # Unix.
        chrome_bin = '/usr/bin/chromium-browser'
    elif os.path.isfile(
            '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):
        # Windows.
        chrome_bin = (
            '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')
    elif os.path.isfile(
            '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):
        # WSL.
        chrome_bin = (
            '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')
    elif os.path.isfile(
            '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'):
        # Mac OS.
        chrome_bin = (
            '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
    else:
        python_utils.PRINT('Chrome is not found, stopping ...')
        raise Exception

    os.environ['CHROME_BIN'] = chrome_bin
    python_utils.PRINT('Environment setup completed.')
Exemplo n.º 29
0
def test_manifest_syntax(dependency_type, dependency_dict):
    """This checks syntax of the manifest.json dependencies.
    Display warning message when there is an error and terminate the program.

    Args:
        dependency_type: str. Dependency download format.
        dependency_dict: dict. A manifest.json dependency dict.
    """
    keys = list(dependency_dict.keys())
    mandatory_keys = DOWNLOAD_FORMATS_TO_MANIFEST_KEYS[dependency_type][
        'mandatory_keys']
    # Optional keys requires exactly one member of the pair
    # to be available as a key in the dependency_dict.
    optional_key_pairs = DOWNLOAD_FORMATS_TO_MANIFEST_KEYS[dependency_type][
        'optional_key_pairs']
    for key in mandatory_keys:
        if key not in keys:
            python_utils.PRINT('------------------------------------------')
            python_utils.PRINT('There is syntax error in this dependency')
            python_utils.PRINT(dependency_dict)
            python_utils.PRINT('This key is missing or misspelled: "%s".' %
                               key)
            python_utils.PRINT('Exiting')
            sys.exit(1)
    if optional_key_pairs:
        for optional_keys in optional_key_pairs:
            optional_keys_in_dict = [
                key for key in optional_keys if key in keys
            ]
            if len(optional_keys_in_dict) != 1:
                python_utils.PRINT(
                    '------------------------------------------')
                python_utils.PRINT('There is syntax error in this dependency')
                python_utils.PRINT(dependency_dict)
                python_utils.PRINT(
                    'Only one of these keys pair must be used: "%s".' %
                    ', '.join(optional_keys))
                python_utils.PRINT('Exiting')
                sys.exit(1)

    # Checks the validity of the URL corresponding to the file format.
    dependency_url = dependency_dict['url']
    if '#' in dependency_url:
        dependency_url = dependency_url.rpartition('#')[0]
    is_zip_file_format = dependency_type == _DOWNLOAD_FORMAT_ZIP
    is_tar_file_format = dependency_type == _DOWNLOAD_FORMAT_TAR
    if (dependency_url.endswith('.zip') and not is_zip_file_format
            or is_zip_file_format and not dependency_url.endswith('.zip')
            or dependency_url.endswith('.tar.gz') and not is_tar_file_format
            or is_tar_file_format and not dependency_url.endswith('.tar.gz')):
        python_utils.PRINT('------------------------------------------')
        python_utils.PRINT('There is syntax error in this dependency')
        python_utils.PRINT(dependency_dict)
        python_utils.PRINT('This url %s is invalid for %s file format.' %
                           (dependency_url, dependency_type))
        python_utils.PRINT('Exiting.')
        sys.exit(1)
Exemplo n.º 30
0
    def _check_that_all_jobs_are_listed_in_the_job_registry_file(self):
        """This function is used to check that all the one-off and audit jobs
        are registered in jobs_registry.py file.
        """
        def _get_jobs_class_names_in_filepath(filepath, base_class_name):
            """Returns a list of job class names in the given filepath which has
            the given base class.

            Args:
                filepath: str. The filepath of the jobs.
                base_class_name: str. The name of the base class.

            Returns:
                list(str). A list of subclasses of the given base class which
                exist in the given file.
            """
            class_names = []
            filepath_without_extension = filepath[:-len('.py')]
            module_path = filepath_without_extension.replace('/', '.')
            python_module = importlib.import_module(module_path)
            for name, clazz in inspect.getmembers(python_module,
                                                  predicate=inspect.isclass):
                all_base_classes = [
                    base_class.__name__
                    for base_class in (inspect.getmro(clazz))
                ]
                # Check that it's a subclass of 'BaseMapReduceOneOffJobManager'.
                if base_class_name in all_base_classes:
                    class_names.append(name)
            return class_names

        if self.verbose_mode_enabled:
            python_utils.PRINT('Starting job registry checks')
            python_utils.PRINT('----------------------------------------')
        summary_messages = []
        failed = False
        jobs_in_cron = [
            'DashboardStatsOneOffJob', 'UserDeletionOneOffJob',
            'UserQueryOneOffJob', 'VerifyUserDeletionOneOffJob'
        ]

        jobs_registry = importlib.import_module('core.jobs_registry')
        expected_one_off_jobs_set = set(
            [jobs.__name__ for jobs in jobs_registry.ONE_OFF_JOB_MANAGERS])
        expected_validation_jobs_set = set(
            [jobs.__name__ for jobs in jobs_registry.AUDIT_JOB_MANAGERS])

        one_off_jobs_list = []
        validation_jobs_list = []
        for filepath in self.all_filepaths:
            if filepath.endswith('prod_validation_jobs_one_off.py'):
                validation_jobs_list.extend(
                    _get_jobs_class_names_in_filepath(
                        filepath, 'ProdValidationAuditOneOffJob'))
            elif filepath.endswith('_jobs_one_off.py'):
                one_off_jobs_list.extend(
                    _get_jobs_class_names_in_filepath(
                        filepath, 'BaseMapReduceOneOffJobManager'))

        # Removing jobs which are used in cron.
        one_off_jobs_list = [
            job for job in one_off_jobs_list if job not in jobs_in_cron
        ]
        one_off_jobs_set = set(one_off_jobs_list)
        if len(one_off_jobs_list) != len(one_off_jobs_set):
            failed = True
            duplicate_one_off_job_names = (
                linter_utils.get_duplicates_from_list_of_strings(
                    one_off_jobs_list))
            summary_message = 'Found one-off jobs with duplicate names: %s' % (
                ', '.join(duplicate_one_off_job_names))
            python_utils.PRINT(summary_message)
            summary_messages.append(summary_message)

        if validation_jobs_list:
            # Removes the base validation job class the list.
            validation_jobs_list.remove('ProdValidationAuditOneOffJob')
        validation_jobs_set = set(validation_jobs_list)
        if len(validation_jobs_list) != len(validation_jobs_set):
            failed = True
            duplicate_validation_job_names = (
                linter_utils.get_duplicates_from_list_of_strings(
                    validation_jobs_list))
            summary_message = (
                'Found validation jobs with duplicate names: %s' %
                (', '.join(duplicate_validation_job_names)))
            python_utils.PRINT(summary_message)
            summary_messages.append(summary_message)

        non_registered_one_off_jobs = (one_off_jobs_set -
                                       expected_one_off_jobs_set)
        if non_registered_one_off_jobs:
            failed = True
            summary_message = (
                'Found one-off jobs not listed in jobs_registry file: %s' %
                (',\n'.join(sorted(non_registered_one_off_jobs))))
            python_utils.PRINT(summary_message)
            summary_messages.append(summary_message)

        non_registered_validation_jobs = (validation_jobs_set -
                                          expected_validation_jobs_set)
        if non_registered_validation_jobs:
            failed = True
            summary_message = (
                'Found validation jobs not listed in jobs_registry file: %s' %
                (',\n'.join(sorted(non_registered_validation_jobs))))
            python_utils.PRINT(summary_message)
            summary_messages.append(summary_message)

        summary_message = ('%s   Job registry check %s' %
                           ((_MESSAGE_TYPE_FAILED, 'failed') if failed else
                            (_MESSAGE_TYPE_SUCCESS, 'passed')))
        python_utils.PRINT(summary_message)
        summary_messages.append(summary_message)
        return summary_messages