def test_matching_digests_should_return_true(self): secret = 'secrettoken' message = 'message blah blah horse battery staple' Secret.set(secret) digest_received = Secret._get_hex_digest(message, secret) self.assertTrue(Secret.digest_is_valid(digest_received, message))
def test_unspecified_digest_should_return_false(self): secret = 'secrettoken' message = 'message blah blah horse battery staple' Secret.set(secret) digest_received = None self.assertFalse(Secret.digest_is_valid(digest_received, message))
def test_non_matching_digests_should_return_false(self): secret = 'secrettoken' message = 'message blah blah horse battery staple' Secret.set(secret) digest_received = Secret._get_hex_digest('not the original message', secret) self.assertFalse(Secret.digest_is_valid(digest_received, message))
def setUp(self): # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests. log.configure_logging('DEBUG') Secret.set('testsecret') self.cluster = FunctionalTestCluster(verbose=self._get_test_verbosity())
def _set_secret(config_filename): if 'secret' in Configuration and Configuration['secret'] is not None: secret = Configuration['secret'] else: # No secret found, generate one and persist it secret = hashlib.sha512().hexdigest() conf_file = ConfigFile(config_filename) conf_file.write_value('secret', secret, BASE_CONFIG_FILE_SECTION) Secret.set(secret)
def setUp(self): # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests. log.configure_logging('DEBUG') self._reset_config() Secret.set('testsecret') SlaveRegistry.reset_singleton() self.cluster = FunctionalTestCluster(verbose=self._get_test_verbosity()) self._network = Network()
def _set_secret(config_filename): if 'secret' in Configuration and Configuration['secret'] is not None: secret = Configuration['secret'] else: # No secret found, generate one and persist it secret_length = 128 chars = string.ascii_lowercase + string.digits secret = ''.join(random.SystemRandom().choice(chars) for _ in range(secret_length)) conf_file = ConfigFile(config_filename) conf_file.write_value('secret', secret, BASE_CONFIG_FILE_SECTION) Secret.set(secret)
def kill(self): """ Instructs the slave process to kill itself. """ kill_url = self._slave_api.url('kill') self._network.post_with_digest(kill_url, {}, Secret.get()) self.mark_dead()
def setup(self, build: Build, executor_start_index: int) -> bool: """ Execute a setup command on the slave for the specified build. The setup process executes asynchronously on the slave and the slave will alert the master when setup is complete and it is ready to start working on subjobs. :param build: The build to set up this slave to work on :param executor_start_index: The index the slave should number its executors from for this build :return: Whether or not the call to start setup on the slave was successful """ slave_project_type_params = build.build_request.build_parameters( ).copy() slave_project_type_params.update( build.project_type.slave_param_overrides()) setup_url = self._slave_api.url('build', build.build_id(), 'setup') post_data = { 'project_type_params': slave_project_type_params, 'build_executor_start_index': executor_start_index, } self.current_build_id = build.build_id() try: self._network.post_with_digest(setup_url, post_data, Secret.get()) except (requests.ConnectionError, requests.Timeout) as ex: self._logger.warning('Setup call to {} failed with {}: {}.', self, ex.__class__.__name__, str(ex)) self.mark_dead() return False return True
def _create_test_config_file(self, base_dir_sys_path): """ Create a temporary conf file just for this test. :param base_dir_sys_path: Sys path of the base app dir :type base_dir_sys_path: unicode :return: The path to the conf file :rtype: str """ # Copy default conf file to tmp location self._conf_template_path = join(self._clusterrunner_repo_dir, 'conf', 'default_clusterrunner.conf') # Create the conf file inside base dir so we can clean up the test at the end just by removing the base dir test_conf_file_path = tempfile.NamedTemporaryFile(dir=base_dir_sys_path).name shutil.copy(self._conf_template_path, test_conf_file_path) os.chmod(test_conf_file_path, ConfigFile.CONFIG_FILE_MODE) conf_file = ConfigFile(test_conf_file_path) # Set custom conf file values for this test conf_values_to_set = { 'secret': Secret.get(), 'base_directory': base_dir_sys_path, 'max_log_file_size': 1024 * 5, } for conf_key, conf_value in conf_values_to_set.items(): conf_file.write_value(conf_key, conf_value, BASE_CONFIG_FILE_SECTION) return test_conf_file_path
def test_git_project_params_are_modified_for_slave(self): slave = self._create_slave() slave._network.post_with_digest = Mock() build_request = BuildRequest({ 'type': 'git', 'url': 'http://original-user-specified-url', }) mock_git = Mock(slave_param_overrides=Mock(return_value={ 'url': 'ssh://new-url-for-clusterrunner-master', 'extra': 'something_extra', })) mock_build = MagicMock(spec=Build, build_request=build_request, build_id=Mock(return_value=888), project_type=mock_git) slave.setup(mock_build, executor_start_index=777) slave._network.post_with_digest.assert_called_with( 'http://{}/v1/build/888/setup'.format(self._FAKE_SLAVE_URL), { 'build_executor_start_index': 777, 'project_type_params': { 'type': 'git', 'url': 'ssh://new-url-for-clusterrunner-master', 'extra': 'something_extra'} }, Secret.get() )
def start_subjob(self, subjob: Subjob): """ Send a subjob of a build to this slave. The slave must have already run setup for the corresponding build. :param subjob: The subjob to send to this slave """ if not self.is_alive(): raise DeadSlaveError('Tried to start a subjob on a dead slave.') if self._is_in_shutdown_mode: raise SlaveMarkedForShutdownError( 'Tried to start a subjob on a slave in shutdown mode.') execution_url = self._slave_api.url('build', subjob.build_id(), 'subjob', subjob.subjob_id()) post_data = {'atomic_commands': subjob.atomic_commands()} try: response = self._network.post_with_digest(execution_url, post_data, Secret.get(), error_on_failure=True) except (requests.ConnectionError, requests.Timeout, RequestFailedError) as ex: raise SlaveCommunicationError( 'Call to slave service failed: {}.'.format(repr(ex))) from ex subjob_executor_id = response.json().get('executor_id') analytics.record_event(analytics.MASTER_TRIGGERED_SUBJOB, executor_id=subjob_executor_id, build_id=subjob.build_id(), subjob_id=subjob.subjob_id(), slave_id=self.id)
def setUp(self): # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests. log.configure_logging('DEBUG') Secret.set('testsecret') self.test_app_base_dir = tempfile.TemporaryDirectory() self.test_conf_file_path = self._create_test_config_file({ 'secret': Secret.get(), 'base_directory': self.test_app_base_dir.name, }) self.cluster = FunctionalTestCluster( conf_file_path=self.test_conf_file_path, verbose=self._get_test_verbosity(), )
def _create_test_config_file(self, base_dir_sys_path: str, **extra_conf_vals) -> str: """ Create a temporary conf file just for this test. :param base_dir_sys_path: Sys path of the base app dir :param extra_conf_vals: Optional; additional values to set in the conf file :return: The path to the conf file """ # Copy default conf file to tmp location self._conf_template_path = join(self._clusterrunner_repo_dir, 'conf', 'default_clusterrunner.conf') # Create the conf file inside base dir so we can clean up the test at the end just by removing the base dir test_conf_file_path = tempfile.NamedTemporaryFile( dir=base_dir_sys_path).name shutil.copy(self._conf_template_path, test_conf_file_path) os.chmod(test_conf_file_path, ConfigFile.CONFIG_FILE_MODE) conf_file = ConfigFile(test_conf_file_path) # Set custom conf file values for this test conf_values_to_set = { 'secret': Secret.get(), 'base_directory': base_dir_sys_path, 'max_log_file_size': 1024 * 5, 'database_name': TEST_DB_NAME, 'database_url': TEST_DB_URL } conf_values_to_set.update(extra_conf_vals) for conf_key, conf_value in conf_values_to_set.items(): conf_file.write_value(conf_key, conf_value, BASE_CONFIG_FILE_SECTION) return test_conf_file_path
def setup(self, build: Build, executor_start_index: int) -> bool: """ Execute a setup command on the slave for the specified build. The setup process executes asynchronously on the slave and the slave will alert the master when setup is complete and it is ready to start working on subjobs. :param build: The build to set up this slave to work on :param executor_start_index: The index the slave should number its executors from for this build :return: Whether or not the call to start setup on the slave was successful """ slave_project_type_params = build.build_request.build_parameters().copy() slave_project_type_params.update(build.project_type.slave_param_overrides()) setup_url = self._slave_api.url('build', build.build_id(), 'setup') post_data = { 'project_type_params': slave_project_type_params, 'build_executor_start_index': executor_start_index, } self.current_build_id = build.build_id() try: self._network.post_with_digest(setup_url, post_data, Secret.get()) except (requests.ConnectionError, requests.Timeout) as ex: self._logger.warning('Setup call to {} failed with {}: {}.', self, ex.__class__.__name__, str(ex)) self.mark_dead() return False return True
def _send_heartbeat_to_master(self): heartbeat_url = self._master_api.url('slave', self._slave_id, 'heartbeat') return self._network.post_with_digest( heartbeat_url, request_params={'slave': { 'heartbeat': True }}, secret=Secret.get())
def setUp(self): # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests. log.configure_logging('DEBUG') Secret.set('testsecret') self.test_app_base_dir = tempfile.TemporaryDirectory() self.test_conf_file_path = self._create_test_config_file({ 'secret': Secret.get(), 'base_directory': self.test_app_base_dir.name, # Set the max log file size to a low value so that we cause at least one rollover during the test. 'max_log_file_size': 1024 * 5, }) self.cluster = FunctionalTestCluster( conf_file_path=self.test_conf_file_path, verbose=self._get_test_verbosity(), )
def function_with_auth(self, *args, **kwargs): header_digest = self.request.headers.get(Secret.DIGEST_HEADER_KEY) if not Secret.digest_is_valid(header_digest, self.encoded_body.decode('utf-8')): raise AuthenticationError( 'Message digest does not match header, message not authenticated.' ) return function(self, *args, **kwargs)
def _notify_master_of_state_change(self, new_state): """ Send a state notification to the master. This is used to notify the master of events occurring on the slave related to build execution progress. :type new_state: SlaveState """ state_url = self._master_api.url('slave', self._slave_id) self._network.put_with_digest(state_url, request_params={'slave': {'state': new_state}}, secret=Secret.get(), error_on_failure=True)
def _async_start_subjob(self, subjob): """ :type subjob: Subjob """ execution_url = self._slave_api.url('build', subjob.build_id(), 'subjob', subjob.subjob_id()) post_data = {'atomic_commands': subjob.atomic_commands()} response = self._network.post_with_digest(execution_url, post_data, Secret.get(), error_on_failure=True) subjob_executor_id = response.json().get('executor_id') analytics.record_event(analytics.MASTER_TRIGGERED_SUBJOB, executor_id=subjob_executor_id, build_id=subjob.build_id(), subjob_id=subjob.subjob_id(), slave_id=self.id)
def kill(self): """ Instruct the slave process to kill itself. """ self._logger.notice('Killing {}', self) kill_url = self._slave_api.url('kill') try: self._network.post_with_digest(kill_url, {}, Secret.get()) except (requests.ConnectionError, requests.Timeout): pass self.mark_dead()
def put_with_digest(self, url, request_params, secret, error_on_failure=False): """ Put to a url with the Message Authentication Digest :type url: str :type request_params: dict [str, any] :param secret: the secret used to produce the message auth digest :rtype: requests.Response """ encoded_body = self.encode_body(request_params) return self.put(url, encoded_body, headers=Secret.header(encoded_body, secret), error_on_failure=error_on_failure)
def test_run_instantiates_buildrunner_with_correct_constructor_args_for_git_project_type(self): Configuration['hostname'] = 'localhost' Configuration['port'] = 43000 build_subcommand = BuildSubcommand() build_subcommand.run(None, None, type='git') # assert on constructor params self.mock_BuildRunner.assert_called_once_with( 'localhost:43000', request_params={'type': 'git'}, secret=Secret.get() )
def _graceful_shutdown_slaves(self, body): """ :type body: dict :rtype: requests.Response """ shutdown_url = self._api.url('slave', 'shutdown') response = self._network.post_with_digest(shutdown_url, body, Secret.get(), error_on_failure=True) return response
def test_run_instantiates_buildrunner_with_correct_constructor_args_for_git_project_type( self): Configuration['hostname'] = 'localhost' Configuration['port'] = 43000 build_subcommand = BuildSubcommand() build_subcommand.run(None, None, type='git') # assert on constructor params self.mock_BuildRunner.assert_called_once_with( 'localhost:43000', request_params={'type': 'git'}, secret=Secret.get())
def run(self, log_level, master_url, remote_file=None, build_type=None, **request_params): """ Execute a build and wait for it to complete. :param log_level: the log level at which to do application logging (or None for default log level) :type log_level: str | None :param master_url: the url (specified by the user) of the master to which we should send the build :type master_url: str | None :param remote_file: a list of remote files where each element contains the output file name and the resource URL :type remote_file: list[list[str]] | None :param build_type: the build type of the request to be sent (e.g., "git", "directory"). If not specified will default to the "directory" project type. :type build_type: str | None :param request_params: key-value pairs to be provided as build parameters in the build request :type request_params: dict """ log_level = log_level or Configuration['log_level'] log.configure_logging(log_level=log_level, simplified_console_logs=True) request_params['type'] = build_type or request_params.get( 'type') or 'directory' if remote_file: request_params['remote_files'] = { name: url for name, url in remote_file } operational_master_url = master_url or '{}:{}'.format( Configuration['hostname'], Configuration['port']) # If running a single master, single slave--both on localhost--we need to launch services locally. if master_url is None and Network.are_hosts_same(Configuration['master_hostname'], 'localhost') \ and len(Configuration['slaves']) == 1 \ and Network.are_hosts_same(Configuration['slaves'][0], 'localhost'): self._start_local_services_if_needed(operational_master_url) if request_params['type'] == 'directory': request_params['project_directory'] = request_params.get( 'project_directory') or os.getcwd() runner = BuildRunner(master_url=operational_master_url, request_params=request_params, secret=Secret.get()) if not runner.run(): sys.exit(1)
def _graceful_shutdown_slaves(self, body): """ :type body: dict :rtype: requests.Response """ shutdown_url = self._api.url('slave', 'shutdown') response = self._network.post_with_digest( shutdown_url, body, Secret.get(), error_on_failure=True ) return response
def test_run_instantiates_buildrunner_with_correct_constructor_args_for_directory_project_type(self): Configuration['hostname'] = 'localhost' Configuration['port'] = 43000 os_getcwd_patch = self.patch('os.getcwd') os_getcwd_patch.return_value = '/current/directory' build_subcommand = BuildSubcommand() build_subcommand.run(None, None) # assert on constructor params self.mock_BuildRunner.assert_called_once_with( 'localhost:43000', request_params={'type':'directory', 'project_directory':'/current/directory'}, secret=Secret.get() )
def cancel_build(self, build_id): """ PUT a request to the master to cancel a build. :param build_id: The id of the build we want to cancel :type build_id: int :return: The API response :rtype: dict """ build_url = self._api.url('build', build_id) response = self._network.put_with_digest(build_url, {'status': 'canceled'}, Secret.get(), error_on_failure=True) return response.json()
def post_new_build(self, request_params): """ Send a post request to the master to start a new build with the specified parameters. :param request_params: The build parameters to send in the post body :type request_params: dict :return: The API response data :rtype: dict """ build_url = self._api.url('build') response = self._network.post_with_digest(build_url, request_params, Secret.get(), error_on_failure=True) return response.json()
def test_git_project_params_are_modified_for_slave(self): remote_path = 'central.sourcecode.example.com/company/project' base_directory = '/home/cr_user/.clusterrunner' Configuration['repo_directory'] = '{}/repos/master'.format(base_directory) slave = self._create_slave() slave._network.post_with_digest = Mock() slave.setup(1, {'type': 'git', 'url': 'http://{}'.format(remote_path)}) slave._network.post_with_digest.assert_called_with('http://{}/v1/build/1/setup'.format(self._FAKE_SLAVE_URL), {'project_type_params': { 'url': 'ssh://{}{}/repos/master/{}'.format( self._fake_hostname, base_directory, remote_path), 'type': 'git'}}, Secret.get())
def cancel_build(self, build_id): """ PUT a request to the master to cancel a build. :param build_id: The id of the build we want to cancel :type build_id: int :return: The API response :rtype: dict """ build_url = self._api.url('build', build_id) response = self._network.put_with_digest( build_url, {'status': 'canceled'}, Secret.get(), error_on_failure=True ) return response.json()
def setup(self, build_id, project_type_params): """ Execute a setup command on the slave for the specified build. The command is executed asynchronously from the perspective of this method, but any subjobs will block until the slave finishes executing the setup command. :param build_id: The build id that this setup command is for. :type build_id: int :param project_type_params: The parameters that define the project type this build will execute in :typeproject_type_paramss: dict """ setup_url = self._slave_api.url('build', build_id, 'setup') post_data = { 'project_type_params': project_type_params, } self._network.post_with_digest(setup_url, post_data, Secret.get())
def post_new_build(self, request_params): """ Send a post request to the master to start a new build with the specified parameters. :param request_params: The build parameters to send in the post body :type request_params: dict :return: The API response data :rtype: dict """ build_url = self._api.url('build') response = self._network.post_with_digest( build_url, request_params, Secret.get(), error_on_failure=True ) return response.json()
def test_run_instantiates_buildrunner_with_correct_constructor_args_for_directory_project_type( self): Configuration['hostname'] = 'localhost' Configuration['port'] = 43000 os_getcwd_patch = self.patch('os.getcwd') os_getcwd_patch.return_value = '/current/directory' build_subcommand = BuildSubcommand() build_subcommand.run(None, None) # assert on constructor params self.mock_BuildRunner.assert_called_once_with('localhost:43000', request_params={ 'type': 'directory', 'project_directory': '/current/directory' }, secret=Secret.get())
def setup(self, build): """ Execute a setup command on the slave for the specified build. The setup process executes asynchronously on the slave and the slave will alert the master when setup is complete and it is ready to start working on subjobs. :param build: The build to set up this slave to work on :type build: Build """ slave_project_type_params = build.build_request.build_parameters().copy() slave_project_type_params.update(build.project_type.slave_param_overrides()) setup_url = self._slave_api.url('build', build.build_id(), 'setup') post_data = { 'project_type_params': slave_project_type_params, 'build_executor_start_index': build.num_executors_allocated, } self._network.post_with_digest(setup_url, post_data, Secret.get()) self.current_build_id = build.build_id()
def setup(self, build_id, project_type_params): """ Execute a setup command on the slave for the specified build. The command is executed asynchronously from the perspective of this method, but any subjobs will block until the slave finishes executing the setup command. :param build_id: The build id that this setup command is for. :type build_id: int :param project_type_params: The parameters that define the project type this build will execute in :type project_type_params: dict """ setup_url = self._slave_api.url('build', build_id, 'setup') slave_project_type_params = util.project_type_params_for_slave( project_type_params) post_data = { 'project_type_params': slave_project_type_params, } self._network.post_with_digest(setup_url, post_data, Secret.get()) self.current_build_id = build_id
def test_git_project_params_are_modified_for_slave(self): remote_path = 'central.sourcecode.example.com/company/project' base_directory = '/home/cr_user/.clusterrunner' Configuration['repo_directory'] = '{}/repos/master'.format( base_directory) slave = self._create_slave() slave._network.post_with_digest = Mock() slave.setup(1, {'type': 'git', 'url': 'http://{}'.format(remote_path)}) slave._network.post_with_digest.assert_called_with( 'http://{}/v1/build/1/setup'.format(self._FAKE_SLAVE_URL), { 'project_type_params': { 'url': 'ssh://{}{}/repos/master/{}'.format( self._fake_hostname, base_directory, remote_path), 'type': 'git' } }, Secret.get())
def start_subjob(self, subjob: Subjob): """ Send a subjob of a build to this slave. The slave must have already run setup for the corresponding build. :param subjob: The subjob to send to this slave """ if not self.is_alive(): raise DeadSlaveError('Tried to start a subjob on a dead slave.') if self._is_in_shutdown_mode: raise SlaveMarkedForShutdownError('Tried to start a subjob on a slave in shutdown mode.') execution_url = self._slave_api.url('build', subjob.build_id(), 'subjob', subjob.subjob_id()) post_data = {'atomic_commands': subjob.atomic_commands()} try: response = self._network.post_with_digest(execution_url, post_data, Secret.get(), error_on_failure=True) except (requests.ConnectionError, requests.Timeout, RequestFailedError) as ex: raise SlaveCommunicationError('Call to slave service failed: {}.'.format(repr(ex))) from ex subjob_executor_id = response.json().get('executor_id') analytics.record_event(analytics.MASTER_TRIGGERED_SUBJOB, executor_id=subjob_executor_id, build_id=subjob.build_id(), subjob_id=subjob.subjob_id(), slave_id=self.id)
def function_with_auth(self, *args, **kwargs): header_digest = self.request.headers.get(Secret.DIGEST_HEADER_KEY) if not Secret.digest_is_valid(header_digest, self.encoded_body.decode('utf-8')): raise AuthenticationError('Message digest does not match header, message not authenticated.') return function(self, *args, **kwargs)
def test_get_secret_should_return_set_secret(self): secret = 'secret1234' Secret.set(secret) self.assertEqual(secret, Secret.get())
def secret_setter(*args): Secret.set('mellon1234')
def run(self, log_level, master_url, remote_file=None, build_type=None, **request_params): """ Execute a build and wait for it to complete. :param log_level: the log level at which to do application logging (or None for default log level) :type log_level: str | None :param master_url: the url (specified by the user) of the master to which we should send the build :type master_url: str | None :param remote_file: a list of remote files where each element contains the output file name and the resource URL :type remote_file: list[list[str]] | None :param build_type: the build type of the request to be sent (e.g., "git", "directory"). If not specified will default to the "directory" project type. :type build_type: str | None :param request_params: key-value pairs to be provided as build parameters in the build request :type request_params: dict """ log_level = log_level or Configuration['log_level'] log.configure_logging(log_level=log_level, simplified_console_logs=True) request_params['type'] = build_type or request_params.get('type') or 'directory' if remote_file: request_params['remote_files'] = {name: url for name, url in remote_file} operational_master_url = master_url or '{}:{}'.format(Configuration['hostname'], Configuration['port']) # If running a single master, single slave--both on localhost--we need to launch services locally. if master_url is None and Configuration['master_hostname'] == 'localhost'\ and len(Configuration['slaves']) == 1 and Configuration['slaves'][0] == 'localhost': self._start_local_services(operational_master_url) if request_params['type'] == 'directory': request_params['project_directory'] = request_params.get('project_directory') or os.getcwd() runner = BuildRunner(master_url=operational_master_url, request_params=request_params, secret=Secret.get()) if not runner.run(): sys.exit(1)
def test_header_generates_128_character_digest(self): secret = hashlib.sha512().hexdigest() header = Secret.header('message', secret) self.assertEqual(len(header[Secret.DIGEST_HEADER_KEY]), 128)