예제 #1
0
    def swap_managed_cloud_datastore_emulator_io_operations(
            self, data_dir_exists):
        """Safely swaps IO operations used by managed_cloud_datastore_emulator.

        Args:
            data_dir_exists: bool. Return value of os.path.exists(DATA_DIR).

        Yields:
            tuple(CallCounter, CallCounter). CallCounter instances for rmtree
            and makedirs.
        """
        old_exists = os.path.exists
        old_rmtree = shutil.rmtree
        old_makedirs = os.makedirs

        is_data_dir = lambda p: p == common.CLOUD_DATASTORE_EMULATOR_DATA_DIR

        new_exists = (
            lambda p: data_dir_exists if is_data_dir(p) else old_exists(p))
        new_rmtree = test_utils.CallCounter(
            lambda p, **kw: None if is_data_dir(p) else old_rmtree(p, **kw))
        new_makedirs = test_utils.CallCounter(
            lambda p, **kw: None if is_data_dir(p) else old_makedirs(p, **kw))

        with python_utils.ExitStack() as exit_stack:
            exit_stack.enter_context(self.swap(os.path, 'exists', new_exists))
            exit_stack.enter_context(self.swap(shutil, 'rmtree', new_rmtree))
            exit_stack.enter_context(self.swap(os, 'makedirs', new_makedirs))
            yield new_rmtree, new_makedirs
예제 #2
0
    def test_failing_jobs(self):
        # Mocks GoogleCloudStorageInputReader() to fail a job.
        _mock_input_reader = lambda _, __: python_utils.divide(1, 0)

        input_reader_swap = self.swap(input_readers,
                                      'GoogleCloudStorageInputReader',
                                      _mock_input_reader)

        job_id = MockJobManagerOne.create_new()
        store_map_reduce_results = jobs.StoreMapReduceResults()

        with python_utils.ExitStack() as stack:
            captured_logs = stack.enter_context(
                self.capture_logging(min_level=logging.ERROR))
            stack.enter_context(input_reader_swap)
            stack.enter_context(
                self.assertRaisesRegexp(
                    Exception, r'Invalid status code change for job '
                    r'MockJobManagerOne-\w+-\w+: from new to failed'))

            store_map_reduce_results.run(job_id,
                                         'core.jobs_test.MockJobManagerOne',
                                         'output')

        # The first log message is ignored as it is the traceback.
        self.assertEqual(len(captured_logs), 1)
        self.assertTrue(captured_logs[0].startswith('Job %s failed at' %
                                                    job_id))
예제 #3
0
    def setUp(self):
        super(SyncFirebaseAccountsOneOffJobTests, self).setUp()
        self.exit_stack = python_utils.ExitStack()
        self.firebase_sdk_stub = (
            firebase_auth_services_test.FirebaseAdminSdkStub())

        self.firebase_sdk_stub.install(self)
        self.exit_stack.callback(self.firebase_sdk_stub.uninstall)
예제 #4
0
def managed_dev_appserver(app_yaml_path,
                          env=None,
                          log_level='info',
                          host='0.0.0.0',
                          port=8080,
                          admin_host='0.0.0.0',
                          admin_port=8000,
                          enable_host_checking=True,
                          automatic_restart=False,
                          skip_sdk_update_check=False):
    """Returns a context manager to start up and shut down a GAE dev appserver.

    Args:
        app_yaml_path: str. Path to the app.yaml file which defines the
            structure of the server.
        env: dict(str: str) or None. Defines the environment variables for the
            new process.
        log_level: str. The lowest log level generated by the application code
            and the development server. Expected values are: debug, info,
            warning, error, critical.
        host: str. The host name to which the app server should bind.
        port: int. The lowest port to which application modules should bind.
        admin_host: str. The host name to which the admin server should bind.
        admin_port: int. The port to which the admin server should bind.
        enable_host_checking: bool. Whether to enforce HTTP Host checking for
            application modules, API server, and admin server. Host checking
            protects against DNS rebinding attacks, so only disable after
            understanding the security implications.
        automatic_restart: bool. Whether to restart instances automatically when
            files relevant to their module are changed.
        skip_sdk_update_check: bool. Whether to skip checking for SDK updates.
            If false, uses .appcfg_nag to decide.

    Yields:
        psutil.Process. The dev_appserver process.
    """
    dev_appserver_args = [
        common.CURRENT_PYTHON_BIN, common.DEV_APPSERVER_PATH, '--host', host,
        '--port', port, '--admin_host', admin_host, '--admin_port', admin_port,
        '--enable_host_checking', 'true' if enable_host_checking else 'false',
        '--automatic_restart', 'true' if automatic_restart else 'false',
        '--skip_sdk_update_check',
        'true' if skip_sdk_update_check else 'false', '--log_level', log_level,
        '--dev_appserver_log_level', log_level, app_yaml_path
    ]
    # OK to use shell=True here because we are not passing anything that came
    # from an untrusted user, only other callers of the script, so there's no
    # risk of shell-injection attacks.
    with python_utils.ExitStack() as stack:
        proc = stack.enter_context(
            managed_process(dev_appserver_args,
                            human_readable_name='GAE Development Server',
                            shell=True,
                            env=env))
        common.wait_for_port_to_be_in_use(port)
        yield proc
예제 #5
0
def managed_cloud_datastore_emulator(clear_datastore=False):
    """Returns a context manager for the Cloud Datastore emulator.

    Args:
        clear_datastore: bool. Whether to delete the datastore's config and data
            before starting the emulator.

    Yields:
        psutil.Process. The emulator process.
    """
    emulator_hostport = '%s:%d' % (
        feconf.CLOUD_DATASTORE_EMULATOR_HOST,
        feconf.CLOUD_DATASTORE_EMULATOR_PORT)
    emulator_args = [
        common.GCLOUD_PATH, 'beta', 'emulators', 'datastore', 'start',
        '--project', feconf.OPPIA_PROJECT_ID,
        '--data-dir', common.CLOUD_DATASTORE_EMULATOR_DATA_DIR,
        '--host-port', emulator_hostport,
        '--no-store-on-disk', '--consistency=1.0', '--quiet',
    ]

    with python_utils.ExitStack() as stack:
        data_dir_exists = os.path.exists(
            common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
        if clear_datastore and data_dir_exists:
            # Replace it with an empty directory.
            shutil.rmtree(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
            os.makedirs(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
        elif not data_dir_exists:
            os.makedirs(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)

        # OK to use shell=True here because we are passing string literals and
        # constants, so there is no risk of a shell-injection attack.
        proc = stack.enter_context(managed_process(
            emulator_args, human_readable_name='Cloud Datastore Emulator',
            shell=True))

        common.wait_for_port_to_be_in_use(feconf.CLOUD_DATASTORE_EMULATOR_PORT)

        # Environment variables required to communicate with the emulator.
        stack.enter_context(common.swap_env(
            'DATASTORE_DATASET', feconf.OPPIA_PROJECT_ID))
        stack.enter_context(common.swap_env(
            'DATASTORE_EMULATOR_HOST', emulator_hostport))
        stack.enter_context(common.swap_env(
            'DATASTORE_EMULATOR_HOST_PATH', '%s/datastore' % emulator_hostport))
        stack.enter_context(common.swap_env(
            'DATASTORE_HOST', 'http://%s' % emulator_hostport))
        stack.enter_context(common.swap_env(
            'DATASTORE_PROJECT_ID', feconf.OPPIA_PROJECT_ID))
        stack.enter_context(common.swap_env(
            'DATASTORE_USE_PROJECT_ID_AS_APP_ID', 'true'))

        yield proc
예제 #6
0
def main(args=None):
    """Runs lighthouse checks and deletes reports."""
    parsed_args = _PARSER.parse_args(args=args)

    if parsed_args.mode == LIGHTHOUSE_MODE_ACCESSIBILITY:
        lighthouse_mode = LIGHTHOUSE_MODE_ACCESSIBILITY
        server_mode = SERVER_MODE_DEV
    elif parsed_args.mode == LIGHTHOUSE_MODE_PERFORMANCE:
        lighthouse_mode = LIGHTHOUSE_MODE_PERFORMANCE
        server_mode = SERVER_MODE_PROD
    else:
        raise Exception(
            'Invalid parameter passed in: \'%s\', please choose'
            'from \'accessibility\' or \'performance\'' % parsed_args.mode)

    if lighthouse_mode == LIGHTHOUSE_MODE_PERFORMANCE:
        python_utils.PRINT('Building files in production mode.')
        # We are using --source_maps here, so that we have at least one CI check
        # that builds using source maps in prod env. This is to ensure that
        # there are no issues while deploying oppia.
        build.main(args=['--prod_env', '--source_maps'])
    elif lighthouse_mode == LIGHTHOUSE_MODE_ACCESSIBILITY:
        build.main(args=[])
        run_webpack_compilation()

    with python_utils.ExitStack() as stack:
        stack.enter_context(common.inplace_replace_file_context(
            common.CONSTANTS_FILE_PATH,
            '"ENABLE_ACCOUNT_DELETION": .*',
            '"ENABLE_ACCOUNT_DELETION": true,'))

        stack.enter_context(servers.managed_redis_server())
        stack.enter_context(servers.managed_elasticsearch_dev_server())

        if constants.EMULATOR_MODE:
            stack.enter_context(servers.managed_firebase_auth_emulator())

        stack.enter_context(servers.managed_dev_appserver(
            APP_YAML_FILENAMES[server_mode],
            port=GOOGLE_APP_ENGINE_PORT,
            clear_datastore=True,
            log_level='critical',
            skip_sdk_update_check=True))

        run_lighthouse_puppeteer_script()
        run_lighthouse_checks(lighthouse_mode)
예제 #7
0
def main(args=None):
    """Run the tests."""
    parsed_args = _PARSER.parse_args(args=args)

    for directory in common.DIRS_TO_ADD_TO_SYS_PATH:
        if not os.path.exists(os.path.dirname(directory)):
            raise Exception('Directory %s does not exist.' % directory)

        # The directories should only be inserted starting at index 1. See
        # https://stackoverflow.com/a/10095099 and
        # https://stackoverflow.com/q/10095037 for more details.
        sys.path.insert(1, directory)

    common.fix_third_party_imports()

    if parsed_args.generate_coverage_report:
        python_utils.PRINT('Checking whether coverage is installed in %s' %
                           common.OPPIA_TOOLS_DIR)
        if not os.path.exists(
                os.path.join(common.OPPIA_TOOLS_DIR,
                             'coverage-%s' % common.COVERAGE_VERSION)):
            raise Exception(
                'Coverage is not installed, please run the start script.')

        pythonpath_components = [COVERAGE_DIR]
        if os.environ.get('PYTHONPATH'):
            pythonpath_components.append(os.environ.get('PYTHONPATH'))
        os.environ['PYTHONPATH'] = os.pathsep.join(pythonpath_components)

    test_specs_provided = sum([
        1 if argument else 0
        for argument in (parsed_args.test_target, parsed_args.test_path,
                         parsed_args.test_shard)
    ])

    if test_specs_provided > 1:
        raise Exception(
            'At most one of test_path, test_target and test_shard may '
            'be specified.')
    if parsed_args.test_path and '.' in parsed_args.test_path:
        raise Exception('The delimiter in test_path should be a slash (/)')
    if parsed_args.test_target and '/' in parsed_args.test_target:
        raise Exception('The delimiter in test_target should be a dot (.)')

    with python_utils.ExitStack() as stack:
        stack.enter_context(servers.managed_cloud_datastore_emulator())
        stack.enter_context(servers.managed_redis_server())
        if parsed_args.test_target:
            if '_test' in parsed_args.test_target:
                all_test_targets = [parsed_args.test_target]
            else:
                python_utils.PRINT('')
                python_utils.PRINT(
                    '---------------------------------------------------------'
                )
                python_utils.PRINT(
                    'WARNING : test_target flag should point to the test file.'
                )
                python_utils.PRINT(
                    '---------------------------------------------------------'
                )
                python_utils.PRINT('')
                time.sleep(3)
                python_utils.PRINT(
                    'Redirecting to its corresponding test file...')
                all_test_targets = [parsed_args.test_target + '_test']
        elif parsed_args.test_shard:
            validation_error = _check_shards_match_tests(
                include_load_tests=True)
            if validation_error:
                raise Exception(validation_error)
            all_test_targets = _get_all_test_targets_from_shard(
                parsed_args.test_shard)
        else:
            include_load_tests = not parsed_args.exclude_load_tests
            all_test_targets = _get_all_test_targets_from_path(
                test_path=parsed_args.test_path,
                include_load_tests=include_load_tests)

        # Prepare tasks.
        max_concurrent_runs = 25
        concurrent_count = min(multiprocessing.cpu_count(),
                               max_concurrent_runs)
        semaphore = threading.Semaphore(concurrent_count)

        task_to_taskspec = {}
        tasks = []
        for test_target in all_test_targets:
            test = TestingTaskSpec(test_target,
                                   parsed_args.generate_coverage_report)
            task = concurrent_task_utils.create_task(test.run,
                                                     parsed_args.verbose,
                                                     semaphore,
                                                     name=test_target,
                                                     report_enabled=False)
            task_to_taskspec[task] = test
            tasks.append(task)

        task_execution_failed = False
        try:
            concurrent_task_utils.execute_tasks(tasks, semaphore)
        except Exception:
            task_execution_failed = True

        for task in tasks:
            if task.exception:
                concurrent_task_utils.log(
                    python_utils.convert_to_bytes(task.exception.args[0]))

    python_utils.PRINT('')
    python_utils.PRINT('+------------------+')
    python_utils.PRINT('| SUMMARY OF TESTS |')
    python_utils.PRINT('+------------------+')
    python_utils.PRINT('')

    # Check we ran all tests as expected.
    total_count = 0
    total_errors = 0
    total_failures = 0
    for task in tasks:
        spec = task_to_taskspec[task]

        if not task.finished:
            python_utils.PRINT('CANCELED  %s' % spec.test_target)
            test_count = 0
        elif task.exception and 'No tests were run' in task.exception.args[0]:
            python_utils.PRINT('ERROR     %s: No tests found.' %
                               spec.test_target)
            test_count = 0
        elif task.exception:
            exc_str = task.exception.args[0]
            python_utils.PRINT(exc_str[exc_str.find('='):exc_str.rfind('-')])

            tests_failed_regex_match = re.search(
                r'Test suite failed: ([0-9]+) tests run, ([0-9]+) errors, '
                '([0-9]+) failures', task.exception.args[0])

            try:
                test_count = int(tests_failed_regex_match.group(1))
                errors = int(tests_failed_regex_match.group(2))
                failures = int(tests_failed_regex_match.group(3))
                total_errors += errors
                total_failures += failures
                python_utils.PRINT('FAILED    %s: %s errors, %s failures' %
                                   (spec.test_target, errors, failures))
            except AttributeError:
                # There was an internal error, and the tests did not run (The
                # error message did not match `tests_failed_regex_match`).
                test_count = 0
                total_errors += 1
                python_utils.PRINT('')
                python_utils.PRINT(
                    '------------------------------------------------------')
                python_utils.PRINT('    WARNING: FAILED TO RUN %s' %
                                   spec.test_target)
                python_utils.PRINT('')
                python_utils.PRINT(
                    '    This is most likely due to an import error.')
                python_utils.PRINT(
                    '------------------------------------------------------')
        else:
            try:
                tests_run_regex_match = re.search(
                    r'Ran ([0-9]+) tests? in ([0-9\.]+)s',
                    task.task_results[0].get_report()[0])
                test_count = int(tests_run_regex_match.group(1))
                test_time = float(tests_run_regex_match.group(2))
                python_utils.PRINT('SUCCESS   %s: %d tests (%.1f secs)' %
                                   (spec.test_target, test_count, test_time))
            except Exception:
                python_utils.PRINT('An unexpected error occurred. '
                                   'Task output:\n%s' %
                                   task.task_results[0].get_report()[0])

        total_count += test_count

    python_utils.PRINT('')
    if total_count == 0:
        raise Exception('WARNING: No tests were run.')

    python_utils.PRINT('Ran %s test%s in %s test class%s.' %
                       (total_count, '' if total_count == 1 else 's',
                        len(tasks), '' if len(tasks) == 1 else 'es'))

    if total_errors or total_failures:
        python_utils.PRINT('(%s ERRORS, %s FAILURES)' %
                           (total_errors, total_failures))
    else:
        python_utils.PRINT('All tests passed.')

    if task_execution_failed:
        raise Exception('Task execution failed.')
    elif total_errors or total_failures:
        raise Exception('%s errors, %s failures' %
                        (total_errors, total_failures))

    if parsed_args.generate_coverage_report:
        subprocess.check_call(
            [sys.executable, COVERAGE_MODULE_PATH, 'combine'])
        process = subprocess.Popen([
            sys.executable, COVERAGE_MODULE_PATH, 'report',
            '--omit="%s*","third_party/*","/usr/share/*"' %
            common.OPPIA_TOOLS_DIR, '--show-missing'
        ],
                                   stdout=subprocess.PIPE)

        report_stdout, _ = process.communicate()
        python_utils.PRINT(report_stdout)

        coverage_result = re.search(
            rb'TOTAL\s+(\d+)\s+(\d+)\s+(?P<total>\d+)%\s+', report_stdout)
        if (coverage_result.group('total') != '100'
                and not parsed_args.ignore_coverage):
            raise Exception('Backend test coverage is not 100%')

    python_utils.PRINT('')
    python_utils.PRINT('Done!')
예제 #8
0
def run_tests(args):
    """Run the scripts to start end-to-end tests."""
    if is_oppia_server_already_running():
        sys.exit(1)

    install_third_party_libraries(args.skip_install)

    with python_utils.ExitStack() as stack:
        dev_mode = not args.prod_env

        if args.skip_build:
            build.modify_constants(prod_env=args.prod_env)
        else:
            build_js_files(dev_mode,
                           deparallelize_terser=args.deparallelize_terser,
                           source_maps=args.source_maps)
        stack.callback(build.set_constants_to_default)

        stack.enter_context(servers.managed_redis_server())
        stack.enter_context(servers.managed_elasticsearch_dev_server())
        if constants.EMULATOR_MODE:
            stack.enter_context(servers.managed_firebase_auth_emulator())
            stack.enter_context(
                servers.managed_cloud_datastore_emulator(clear_datastore=True))

        app_yaml_path = 'app.yaml' if args.prod_env else 'app_dev.yaml'
        stack.enter_context(
            servers.managed_dev_appserver(
                app_yaml_path,
                port=GOOGLE_APP_ENGINE_PORT,
                log_level=args.server_log_level,
                skip_sdk_update_check=True,
                env={
                    **os.environ,
                    'PORTSERVER_ADDRESS':
                    common.PORTSERVER_SOCKET_FILEPATH,
                }))

        stack.enter_context(
            servers.managed_webdriver_server(
                chrome_version=args.chrome_driver_version))

        proc = stack.enter_context(
            servers.managed_protractor_server(
                suite_name=args.suite,
                dev_mode=dev_mode,
                debug_mode=args.debug_mode,
                sharding_instances=args.sharding_instances,
                stdout=subprocess.PIPE))

        python_utils.PRINT(
            'Servers have come up.\n'
            'Note: If ADD_SCREENSHOT_REPORTER is set to true in '
            'core/tests/protractor.conf.js, you can view screenshots of the '
            'failed tests in ../protractor-screenshots/')

        output_lines = []
        while True:
            # Keep reading lines until an empty string is returned. Empty
            # strings signal that the process has ended.
            for line in iter(proc.stdout.readline, b''):
                if isinstance(line, str):
                    # Although our unit tests always provide unicode strings,
                    # the actual server needs this failsafe since it can output
                    # non-unicode strings.
                    line = line.encode('utf-8')  # pragma: nocover
                output_lines.append(line.rstrip())
                # Replaces non-ASCII characters with '?'.
                common.write_stdout_safe(line.decode('ascii',
                                                     errors='replace'))
            # The poll() method returns None while the process is running,
            # otherwise it returns the return code of the process (an int).
            if proc.poll() is not None:
                break

        return output_lines, proc.returncode
예제 #9
0
def main(args=None):
    """Starts up a development server running Oppia."""
    parsed_args = _PARSER.parse_args(args=args)

    if common.is_port_in_use(PORT_NUMBER_FOR_GAE_SERVER):
        common.print_each_string_after_two_new_lines([
            'WARNING',
            'Could not start new server. There is already an existing server '
            'running at port %s.' % PORT_NUMBER_FOR_GAE_SERVER,
        ])

    # NOTE: The ordering of alert_on_exit() is important because we want the
    # alert to be printed _before_ the ExitStack unwinds, hence its placement as
    # the "latter" context (context managers exit in reverse-order).
    with python_utils.ExitStack() as stack, alert_on_exit():
        # ExitStack unwinds in reverse-order, so this will be the final action.
        stack.callback(notify_about_successful_shutdown)

        build_args = []
        if parsed_args.prod_env:
            build_args.append('--prod_env')
        if parsed_args.maintenance_mode:
            build_args.append('--maintenance_mode')
        if parsed_args.source_maps:
            build_args.append('--source_maps')
        build.main(args=build_args)
        stack.callback(build.set_constants_to_default)

        stack.enter_context(servers.managed_redis_server())
        stack.enter_context(servers.managed_elasticsearch_dev_server())

        if constants.EMULATOR_MODE:
            stack.enter_context(
                servers.managed_firebase_auth_emulator(
                    recover_users=parsed_args.save_datastore))
            stack.enter_context(
                servers.managed_cloud_datastore_emulator(
                    clear_datastore=not parsed_args.save_datastore))

        # NOTE: When prod_env=True the Webpack compiler is run by build.main().
        if not parsed_args.prod_env:
            stack.enter_context(
                servers.managed_webpack_compiler(
                    use_prod_env=False,
                    use_source_maps=parsed_args.source_maps,
                    watch_mode=True))

        app_yaml_path = 'app.yaml' if parsed_args.prod_env else 'app_dev.yaml'
        dev_appserver = stack.enter_context(
            servers.managed_dev_appserver(
                app_yaml_path,
                enable_host_checking=not parsed_args.disable_host_checking,
                automatic_restart=not parsed_args.no_auto_restart,
                skip_sdk_update_check=True,
                port=PORT_NUMBER_FOR_GAE_SERVER))

        managed_web_browser = (
            None if parsed_args.no_browser else
            servers.create_managed_web_browser(PORT_NUMBER_FOR_GAE_SERVER))

        if managed_web_browser is None:
            common.print_each_string_after_two_new_lines([
                'INFORMATION',
                'Local development server is ready! You can access it by '
                'navigating to http://localhost:%s/ in a web '
                'browser.' % PORT_NUMBER_FOR_GAE_SERVER,
            ])
        else:
            common.print_each_string_after_two_new_lines([
                'INFORMATION',
                'Local development server is ready! Opening a default web '
                'browser window pointing to it: '
                'http://localhost:%s/' % PORT_NUMBER_FOR_GAE_SERVER,
            ])
            stack.enter_context(managed_web_browser)

        dev_appserver.wait()
예제 #10
0
 def setUp(self):
     super(PipelinedTestBase, self).setUp()
     with python_utils.ExitStack() as pipeline_context_stack:
         pipeline_context_stack.enter_context(decorate_beam_errors())
         pipeline_context_stack.enter_context(self.pipeline)
         self._pipeline_context_stack = pipeline_context_stack.pop_all()
예제 #11
0
 def setUp(self):
     super(RunE2ETestsTests, self).setUp()
     self.exit_stack = python_utils.ExitStack()
예제 #12
0
def managed_webdriver_server(chrome_version=None):
    """Returns context manager to start/stop the Webdriver server gracefully.

    This context manager updates Google Chrome before starting the server.

    Args:
        chrome_version: str|None. The version of Google Chrome to run the tests
            on. If None, then the currently-installed version of Google Chrome
            is used instead.

    Yields:
        psutil.Process. The Webdriver process.
    """
    if chrome_version is None:
        # Although there are spaces between Google and Chrome in the path, we
        # don't need to escape them for Popen (as opposed to on the terminal, in
        # which case we would need to escape them for the command to run).
        chrome_command = (
            '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
            if common.is_mac_os() else 'google-chrome')
        try:
            output = subprocess.check_output([chrome_command, '--version'])
        except OSError:
            # For the error message on macOS, we need to add the backslashes in.
            # This is because it is likely that a user will try to run the
            # command on their terminal and, as mentioned above, the macOS
            # chrome version command has spaces in the path which need to be
            # escaped for successful terminal use.
            raise Exception(
                'Failed to execute "%s --version" command. This is used to '
                'determine the chromedriver version to use. Please set the '
                'chromedriver version manually using --chrome_driver_version '
                'flag. To determine the chromedriver version to be used, '
                'please follow the instructions mentioned in the following '
                'URL:\n'
                'https://chromedriver.chromium.org/downloads/version-selection'
                % chrome_command.replace(' ', r'\ '))

        installed_version_parts = b''.join(re.findall(rb'[0-9.]', output))
        installed_version = '.'.join(
            installed_version_parts.decode('utf-8').split('.')[:-1])
        response = python_utils.url_open(
            'https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s' %
            (installed_version))
        chrome_version = response.read().decode('utf-8')

    python_utils.PRINT('\n\nCHROME VERSION: %s' % chrome_version)
    subprocess.check_call([
        common.NODE_BIN_PATH,
        common.WEBDRIVER_MANAGER_BIN_PATH,
        'update',
        '--versions.chrome',
        chrome_version,
    ])

    with python_utils.ExitStack() as exit_stack:
        if common.is_windows_os():
            # NOTE: webdriver-manager (version 13.0.0) uses `os.arch()` to
            # determine the architecture of the operating system, however, this
            # function can only be used to determine the architecture of the
            # machine that compiled `node`. In the case of Windows, we are using
            # the portable version, which was compiled on `ia32` machine so that
            # is the value returned by this `os.arch` function. Unfortunately,
            # webdriver-manager seems to assume that Windows wouldn't run on the
            # ia32 architecture, so its help function used to determine download
            # link returns null for this, which means that the application has
            # no idea about where to download the correct version.
            #
            # https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L16
            # https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/geckodriver.ts#L21
            # https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L167
            # https://github.com/nodejs/node/issues/17036
            regex_pattern = re.escape('this.osArch = os.arch();')
            arch = 'x64' if common.is_x64_architecture() else 'x86'
            replacement_string = 'this.osArch = "%s";' % arch
            exit_stack.enter_context(
                common.inplace_replace_file_context(
                    common.CHROME_PROVIDER_FILE_PATH, regex_pattern,
                    replacement_string))
            exit_stack.enter_context(
                common.inplace_replace_file_context(
                    common.GECKO_PROVIDER_FILE_PATH, regex_pattern,
                    replacement_string))

        # OK to use shell=True here because we are passing string literals and
        # constants, so there is no risk of a shell-injection attack.
        proc = exit_stack.enter_context(
            managed_process([
                common.NODE_BIN_PATH,
                common.WEBDRIVER_MANAGER_BIN_PATH,
                'start',
                '--versions.chrome',
                chrome_version,
                '--quiet',
                '--standalone',
            ],
                            human_readable_name='Webdriver manager',
                            shell=True))

        common.wait_for_port_to_be_in_use(4444)

        yield proc
예제 #13
0
def managed_webpack_compiler(config_path=None,
                             use_prod_env=False,
                             use_source_maps=False,
                             watch_mode=False,
                             max_old_space_size=None):
    """Returns context manager to start/stop the webpack compiler gracefully.

    Args:
        config_path: str|None. Path to an explicit webpack config, or None to
            determine it from the other args.
        use_prod_env: bool. Whether to compile for use in production. Only
            respected if config_path is None.
        use_source_maps: bool. Whether to compile with source maps. Only
            respected if config_path is None.
        watch_mode: bool. Run the compiler in watch mode, which rebuilds on file
            change.
        max_old_space_size: int|None. Sets the max memory size of the compiler's
            "old memory" section. As memory consumption approaches the limit,
            the compiler will spend more time on garbage collection in an effort
            to free unused memory.

    Yields:
        psutil.Process. The Webpack compiler process.
    """
    if config_path is not None:
        pass
    elif use_prod_env:
        config_path = (common.WEBPACK_PROD_SOURCE_MAPS_CONFIG
                       if use_source_maps else common.WEBPACK_PROD_CONFIG)
    else:
        config_path = (common.WEBPACK_DEV_SOURCE_MAPS_CONFIG
                       if use_source_maps else common.WEBPACK_DEV_CONFIG)

    compiler_args = [
        common.NODE_BIN_PATH,
        common.WEBPACK_BIN_PATH,
        '--config',
        config_path,
    ]
    if max_old_space_size:
        # NOTE: --max-old-space-size is a flag for Node.js, not the Webpack
        # compiler, so we insert it immediately after NODE_BIN_PATH.
        compiler_args.insert(1, '--max-old-space-size=%d' % max_old_space_size)
    if watch_mode:
        compiler_args.extend(['--color', '--watch', '--progress'])

    with python_utils.ExitStack() as exit_stack:
        # OK to use shell=True here because we are passing string literals and
        # constants, so there is no risk of a shell-injection attack.
        proc = exit_stack.enter_context(
            managed_process(
                compiler_args,
                human_readable_name='Webpack Compiler',
                shell=True,
                # Capture compiler's output to detect when builds have completed.
                stdout=subprocess.PIPE))

        if watch_mode:
            for line in iter(lambda: proc.stdout.readline() or None, None):
                common.write_stdout_safe(line)
                # Message printed when a compilation has succeeded. We break
                # after the first one to ensure the site is ready to be visited.
                if b'Built at: ' in line:
                    break
            else:
                # If none of the lines contained the string 'Built at',
                # raise an error because a build hasn't finished successfully.
                raise IOError('First build never completed')

        def print_proc_output():
            """Prints the proc's output until it is exhausted."""
            for line in iter(lambda: proc.stdout.readline() or None, None):
                common.write_stdout_safe(line)

        # Start a thread to print the rest of the compiler's output to stdout.
        printer_thread = threading.Thread(target=print_proc_output)
        printer_thread.start()
        exit_stack.callback(printer_thread.join)

        yield proc
예제 #14
0
 def setUp(self):
     super(PutResultsTests, self).setUp()
     self.datastoreio_stub = stub_io.DatastoreioStub()
     with python_utils.ExitStack() as exit_stack:
         exit_stack.enter_context(self.datastoreio_stub.context())
         self.exit_stack = exit_stack.pop_all()
예제 #15
0
 def setUp(self):
     super(ManagedProcessTests, self).setUp()
     self.exit_stack = python_utils.ExitStack()