Exemplo n.º 1
0
    def async_run(self, port, log_level, eventlog_file):
        """
        Run a ClusterRunner master service.

        :param port: the port on which to run the slave service
        :type port: int | None
        :param log_level: the log level at which to do application logging (or None for default log level)
        :type log_level: str | None
        :param eventlog_file: an optional alternate file in which to write event logs
        :type eventlog_file: str | None
        """
        port = port or Configuration['port']
        log_level = log_level or Configuration['log_level']
        eventlog_file = eventlog_file or Configuration['eventlog_file']

        log.configure_logging(log_level=log_level, log_file=Configuration['log_file'])
        analytics.initialize(eventlog_file)
        analytics.record_event(analytics.SERVICE_STARTED, service='master')

        cluster_master = ClusterMaster()
        application = ClusterMasterApplication(cluster_master)

        ioloop = self._start_application(application, port)

        self._write_pid_file(Configuration['master_pid_file'])

        # log startup message once ioloop is running
        hostname = Configuration['hostname']
        log_startup = functools.partial(self._logger.info, 'Master service is running on {}:{}.'.format(hostname, port))
        ioloop.add_callback(log_startup)

        ioloop.start()  # this call blocks until the server is stopped
        ioloop.close(all_fds=True)  # all_fds=True is necessary here to make sure connections don't hang
        self._logger.notice('Master server was stopped.')
Exemplo n.º 2
0
    def setUp(self):
        super().setUp()
        self._set_up_safe_guards()

        # Reset singletons so that they get recreated for every test that uses them.
        Configuration.reset_singleton()
        UnhandledExceptionHandler.reset_singleton()

        # Explicitly initialize UnhandledExceptionHandler singleton here (on the main thread) since it sets up signal
        # handlers that must execute on the main thread.
        UnhandledExceptionHandler.singleton()

        MasterConfigLoader().configure_defaults(Configuration.singleton())
        MasterConfigLoader().configure_postload(Configuration.singleton())
        self.patch('app.util.conf.master_config_loader.MasterConfigLoader.load_from_config_file')

        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')
        # Then stub out configure_logging so we don't end up logging to real files during testing.
        self.patch('app.util.log.configure_logging')

        # Set up TestHandler. This allows asserting on log messages in tests.
        self.log_handler = logbook.TestHandler(bubble=True)
        self.log_handler.push_application()

        self._base_setup_called = True
Exemplo n.º 3
0
    def setUp(self):
        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')

        Secret.set('testsecret')

        self.cluster = FunctionalTestCluster(verbose=self._get_test_verbosity())
Exemplo n.º 4
0
    def run(self, log_level, master, master_port, slaves, slave_port, num_executors):
        """
        'Deploy' can be a vague word, so we should be specific about what this command accomplishes.

        This command will:
        - Replace the existing binary files in the slaves and master hosts with the binary files running this
          command currently. If there is nothing to replace, this command will just place the binary files there.
        - Stop all clusterrunner services running on all slaves and the master.
        - Start the master and slave services on the master and slave hosts.
        - Poll until timeout to validate that the master service has started, and that the slaves have successfully
          connected with the master.

        :param log_level: the log level at which to do application logging (or None for default log level)
        :type log_level: str | None
        :param master: the master hostname (no port) to deploy the master service to
        :type master: str | None
        :param master_port: the port number the master service will listen on
        :type master_port: int | None
        :param slaves: list of slave hostnames (no ports) to deploy the slave service to
        :type slaves: [str] | None
        :param slave_port: the port number the slave services will listen on
        :type slave_port: int | None
        :param num_executors: the number of executors that will be run per slave
        :type num_executors: int | None
        """
        log.configure_logging(
            log_level=log_level or Configuration['log_level'],
            log_file=Configuration['log_file'],
            simplified_console_logs=True,
        )
        conf_path = Configuration['config_file']
        current_executable = sys.executable
        username = getpass.getuser()
        slave_config = self._get_loaded_config(conf_path, SlaveConfigLoader())
        master_config = self._get_loaded_config(conf_path, MasterConfigLoader())
        master = master or slave_config.get('master_hostname')
        master_port = master_port or master_config.get('port')
        slaves = slaves or master_config.get('slaves')
        slave_port = slave_port or slave_config.get('port')
        num_executors = num_executors or slave_config.get('num_executors')
        clusterrunner_executable_dir = join(os.path.expanduser('~'), '.clusterrunner', 'dist')
        clusterrunner_executable = join(clusterrunner_executable_dir, 'clusterrunner')

        self._logger.info('Compressing binaries...')
        binaries_tar_path = self._binaries_tar(current_executable, Configuration['root_directory'])

        self._logger.info('Deploying binaries and confs on master and slaves...')
        arguments = [[host, username, current_executable, binaries_tar_path, conf_path] for host in slaves + [master]]
        Pool().starmap(self._deploy_binaries_and_conf, arguments)

        self._logger.info('Stopping and starting all clusterrunner services...')
        self._start_services(master, master_port, slaves, slave_port, num_executors, username, clusterrunner_executable)

        self._logger.info('Validating successful deployment...')
        master_service_url = '{}:{}'.format(master, master_port)
        self._validate_successful_deployment(master_service_url, slaves)

        self._logger.info('Deploy SUCCESS to slaves: {}'.format(','.join(slaves)))
Exemplo n.º 5
0
    def setUp(self):
        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')

        self._reset_config()
        Secret.set('testsecret')
        SlaveRegistry.reset_singleton()

        self.cluster = FunctionalTestCluster(verbose=self._get_test_verbosity())
        self._network = Network()
Exemplo n.º 6
0
    def async_run(self, port, master_url, num_executors, log_level,
                  eventlog_file):
        """
        Run a ClusterRunner slave service.

        :param port: the port on which to run the slave service
        :type port: int | None
        :param master_url: the url of the master to which this slave should attach
        :type master_url: string | None
        :param num_executors: the number of executors the slave service should use
        :type num_executors: int | None
        :param log_level: the log level at which to do application logging (or None for default log level)
        :type log_level: str | None
        :param eventlog_file: an optional alternate file in which to write event logs
        :type eventlog_file: str | None
        """
        num_executors = num_executors or Configuration['num_executors']
        master_url = master_url or '{}:{}'.format(
            Configuration['master_hostname'], Configuration['master_port'])
        port = port or Configuration['port']
        log_level = log_level or Configuration['log_level']
        eventlog_file = eventlog_file or Configuration['eventlog_file']

        log.configure_logging(log_level=log_level,
                              log_file=Configuration['log_file'].format(port))
        analytics.initialize(eventlog_file)
        analytics.record_event(analytics.SERVICE_STARTED, service='slave')

        cluster_slave = ClusterSlave(
            port=port,
            num_executors=num_executors,
            host=Configuration['hostname'],
        )

        application = ClusterSlaveApplication(cluster_slave)

        ioloop = self._start_application(application, port)

        self._write_pid_file(Configuration['slave_pid_file'])

        # connect to master once tornado ioloop is running
        connect_slave_to_master = functools.partial(
            cluster_slave.connect_to_master, master_url=master_url)
        ioloop.add_callback(connect_slave_to_master)

        # start sending heartbeat after connecting to master
        start_slave_heartbeat = functools.partial(
            cluster_slave.start_heartbeat_thread)
        ioloop.add_callback(start_slave_heartbeat)

        ioloop.start()  # this call blocks until the server is stopped
        ioloop.close(
            all_fds=True
        )  # all_fds=True is necessary here to make sure connections don't hang
        self._logger.notice('Slave server was stopped.')
    def run(self,
            log_level,
            master_url,
            remote_file=None,
            build_type=None,
            **request_params):
        """
        Execute a build and wait for it to complete.

        :param log_level: the log level at which to do application logging (or None for default log level)
        :type log_level: str | None
        :param master_url: the url (specified by the user) of the master to which we should send the build
        :type master_url: str | None
        :param remote_file: a list of remote files where each element contains the output file name and the resource URL
        :type remote_file: list[list[str]] | None
        :param build_type: the build type of the request to be sent (e.g., "git", "directory"). If not specified
            will default to the "directory" project type.
        :type build_type: str | None
        :param request_params: key-value pairs to be provided as build parameters in the build request
        :type request_params: dict
        """
        log_level = log_level or Configuration['log_level']
        log.configure_logging(log_level=log_level,
                              simplified_console_logs=True)
        request_params['type'] = build_type or request_params.get(
            'type') or 'directory'

        if remote_file:
            request_params['remote_files'] = {
                name: url
                for name, url in remote_file
            }

        operational_master_url = master_url or '{}:{}'.format(
            Configuration['hostname'], Configuration['port'])

        # If running a single master, single slave--both on localhost--we need to launch services locally.
        if master_url is None and Network.are_hosts_same(Configuration['master_hostname'], 'localhost') \
                and len(Configuration['slaves']) == 1 \
                and Network.are_hosts_same(Configuration['slaves'][0], 'localhost'):
            self._start_local_services_if_needed(operational_master_url)

        if request_params['type'] == 'directory':
            request_params['project_directory'] = request_params.get(
                'project_directory') or os.getcwd()

        runner = BuildRunner(master_url=operational_master_url,
                             request_params=request_params,
                             secret=Secret.get())

        if not runner.run():
            sys.exit(1)
    def run(self, log_level, master_url, slave_ids=None, all_slaves=False, **request_params):
        log_level = log_level or Configuration['log_level']
        log.configure_logging(log_level=log_level, simplified_console_logs=True)

        master_url = master_url or '{}:{}'.format(Configuration['hostname'], Configuration['port'])
        client = ClusterMasterAPIClient(master_url)
        if all_slaves:
            client.graceful_shutdown_all_slaves()
        elif slave_ids and len(slave_ids) > 0:
            client.graceful_shutdown_slaves_by_id(slave_ids)
        else:
            self._logger.error('No slaves specified to shutdown.')
            exit(1)
Exemplo n.º 9
0
    def setUp(self):
        super().setUp()
        self.addCleanup(patch.stopall)

        self._patched_items = {}
        self._blacklist_methods_not_allowed_in_unit_tests()

        # Stub out a few library dependencies that launch subprocesses.
        self.patch(
            'app.util.autoversioning.get_version').return_value = '0.0.0'
        self.patch('app.util.conf.base_config_loader.platform.node'
                   ).return_value = self._fake_hostname

        if self._do_network_mocks:
            # requests.Session() also makes some subprocess calls on instantiation.
            self.patch('app.util.network.requests.Session')
            # Stub out Network.are_hosts_same() call with a simple string comparison.
            self.patch('app.util.network.Network.are_hosts_same',
                       new=lambda host_a, host_b: host_a == host_b)

        # Reset singletons so that they get recreated for every test that uses them.
        Configuration.reset_singleton()
        UnhandledExceptionHandler.reset_singleton()
        SlaveRegistry.reset_singleton()

        # Explicitly initialize UnhandledExceptionHandler singleton here (on the main thread) since it sets up signal
        # handlers that must execute on the main thread.
        UnhandledExceptionHandler.singleton()

        MasterConfigLoader().configure_defaults(Configuration.singleton())
        MasterConfigLoader().configure_postload(Configuration.singleton())
        self.patch(
            'app.util.conf.master_config_loader.MasterConfigLoader.load_from_config_file'
        )

        # Reset counters
        Slave._slave_id_counter = Counter()
        Build._build_id_counter = Counter()
        analytics._event_id_generator = Counter()

        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')
        # Then stub out configure_logging so we don't end up logging to real files during testing.
        self.patch('app.util.log.configure_logging')

        # Set up TestHandler. This allows asserting on log messages in tests.
        self.log_handler = logbook.TestHandler(bubble=True)
        self.log_handler.push_application()

        self._base_setup_called = True
Exemplo n.º 10
0
    def run(self, log_level):
        """
        Stop/kill all ClusterRunner processes that are running on this host (both master and slave services).
        This is implemented via the pid file that gets written to upon service startup.

        :param log_level: the log level at which to do application logging (or None for default log level)
        :type log_level: str | None
        """
        log.configure_logging(
            log_level=log_level or Configuration['log_level'],
            log_file=Configuration['log_file'],
            simplified_console_logs=True,
        )
        self._kill_pid_in_file_if_exists(Configuration['slave_pid_file'])
        self._kill_pid_in_file_if_exists(Configuration['master_pid_file'])
Exemplo n.º 11
0
    def run(self, log_level):
        """
        Stop/kill all ClusterRunner processes that are running on this host (both master and slave services).
        This is implemented via the pid file that gets written to upon service startup.

        :param log_level: the log level at which to do application logging (or None for default log level)
        :type log_level: str | None
        """
        log.configure_logging(
            log_level=log_level or Configuration['log_level'],
            log_file=Configuration['log_file'],
            simplified_console_logs=True,
        )
        self._kill_pid_in_file_if_exists(Configuration['slave_pid_file'])
        self._kill_pid_in_file_if_exists(Configuration['master_pid_file'])
Exemplo n.º 12
0
    def async_run(self, port, master_url, num_executors, log_level, eventlog_file):
        """
        Run a ClusterRunner slave service.

        :param port: the port on which to run the slave service
        :type port: int | None
        :param master_url: the url of the master to which this slave should attach
        :type master_url: string | None
        :param num_executors: the number of executors the slave service should use
        :type num_executors: int | None
        :param log_level: the log level at which to do application logging (or None for default log level)
        :type log_level: str | None
        :param eventlog_file: an optional alternate file in which to write event logs
        :type eventlog_file: str | None
        """
        num_executors = num_executors or Configuration['num_executors']
        master_url = master_url or '{}:{}'.format(Configuration['master_hostname'], Configuration['master_port'])
        port = port or Configuration['port']
        log_level = log_level or Configuration['log_level']
        eventlog_file = eventlog_file or Configuration['eventlog_file']

        log.configure_logging(log_level=log_level, log_file=Configuration['log_file'].format(port))
        analytics.initialize(eventlog_file)
        analytics.record_event(analytics.SERVICE_STARTED, service='slave')

        cluster_slave = ClusterSlave(
            port=port,
            num_executors=num_executors,
            host=Configuration['hostname'],
        )

        application = ClusterSlaveApplication(cluster_slave)

        ioloop = self._start_application(application, port)

        self._write_pid_file(Configuration['slave_pid_file'])

        # connect to master once tornado ioloop is running
        connect_slave_to_master = functools.partial(cluster_slave.connect_to_master, master_url=master_url)
        ioloop.add_callback(connect_slave_to_master)

        # start sending heartbeat after connecting to master
        start_slave_heartbeat = functools.partial(cluster_slave.start_heartbeat_thread)
        ioloop.add_callback(start_slave_heartbeat)

        ioloop.start()  # this call blocks until the server is stopped
        ioloop.close(all_fds=True)  # all_fds=True is necessary here to make sure connections don't hang
        self._logger.notice('Slave server was stopped.')
    def setUp(self):
        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')

        Secret.set('testsecret')
        self.test_app_base_dir = tempfile.TemporaryDirectory()

        self.test_conf_file_path = self._create_test_config_file({
            'secret': Secret.get(),
            'base_directory': self.test_app_base_dir.name,
        })

        self.cluster = FunctionalTestCluster(
            conf_file_path=self.test_conf_file_path,
            verbose=self._get_test_verbosity(),
        )
Exemplo n.º 14
0
    def setUp(self):
        super().setUp()
        self.addCleanup(patch.stopall)

        self._patched_items = {}
        self._blacklist_methods_not_allowed_in_unit_tests()

        # Stub out a few library dependencies that launch subprocesses.
        self.patch('app.util.autoversioning.get_version').return_value = '0.0.0'
        self.patch('app.util.conf.base_config_loader.platform.node').return_value = self._fake_hostname

        if self._do_network_mocks:
            # requests.Session() also makes some subprocess calls on instantiation.
            self.patch('app.util.network.requests.Session')
            # Stub out Network.are_hosts_same() call with a simple string comparison.
            self.patch('app.util.network.Network.are_hosts_same', new=lambda host_a, host_b: host_a == host_b)

        # Reset singletons so that they get recreated for every test that uses them.
        Configuration.reset_singleton()
        UnhandledExceptionHandler.reset_singleton()
        SlaveRegistry.reset_singleton()

        # Explicitly initialize UnhandledExceptionHandler singleton here (on the main thread) since it sets up signal
        # handlers that must execute on the main thread.
        UnhandledExceptionHandler.singleton()

        MasterConfigLoader().configure_defaults(Configuration.singleton())
        MasterConfigLoader().configure_postload(Configuration.singleton())
        self.patch('app.util.conf.master_config_loader.MasterConfigLoader.load_from_config_file')

        # Reset counters
        Slave._slave_id_counter = Counter()
        Build._build_id_counter = Counter()
        analytics._event_id_generator = Counter()

        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')
        # Then stub out configure_logging so we don't end up logging to real files during testing.
        self.patch('app.util.log.configure_logging')

        # Set up TestHandler. This allows asserting on log messages in tests.
        self.log_handler = logbook.TestHandler(bubble=True)
        self.log_handler.push_application()

        self._base_setup_called = True
    def setUp(self):
        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')

        Secret.set('testsecret')
        self.test_app_base_dir = tempfile.TemporaryDirectory()

        self.test_conf_file_path = self._create_test_config_file({
            'secret': Secret.get(),
            'base_directory': self.test_app_base_dir.name,
            # Set the max log file size to a low value so that we cause  at least one rollover during the test.
            'max_log_file_size': 1024 * 5,
        })

        self.cluster = FunctionalTestCluster(
            conf_file_path=self.test_conf_file_path,
            verbose=self._get_test_verbosity(),
        )
Exemplo n.º 16
0
    def setUp(self):
        # Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
        log.configure_logging('DEBUG')

        Secret.set('testsecret')
        self.test_app_base_dir = tempfile.TemporaryDirectory()

        self.test_conf_file_path = self._create_test_config_file({
            'secret':
            Secret.get(),
            'base_directory':
            self.test_app_base_dir.name,
            # Set the max log file size to a low value so that we cause  at least one rollover during the test.
            'max_log_file_size':
            1024 * 5,
        })

        self.cluster = FunctionalTestCluster(
            conf_file_path=self.test_conf_file_path,
            verbose=self._get_test_verbosity(),
        )
Exemplo n.º 17
0
    def async_run(self, port, log_level, eventlog_file):
        """
        Run a ClusterRunner master service.

        :param port: the port on which to run the slave service
        :type port: int | None
        :param log_level: the log level at which to do application logging (or None for default log level)
        :type log_level: str | None
        :param eventlog_file: an optional alternate file in which to write event logs
        :type eventlog_file: str | None
        """
        port = port or Configuration['port']
        log_level = log_level or Configuration['log_level']
        eventlog_file = eventlog_file or Configuration['eventlog_file']

        log.configure_logging(log_level=log_level,
                              log_file=Configuration['log_file'])
        analytics.initialize(eventlog_file)
        analytics.record_event(analytics.SERVICE_STARTED, service='master')

        cluster_master = ClusterMaster()
        application = ClusterMasterApplication(cluster_master)

        ioloop = self._start_application(application, port)

        self._write_pid_file(Configuration['master_pid_file'])

        # log startup message once ioloop is running
        hostname = Configuration['hostname']
        log_startup = functools.partial(
            self._logger.info,
            'Master service is running on {}:{}.'.format(hostname, port))
        ioloop.add_callback(log_startup)

        ioloop.start()  # this call blocks until the server is stopped
        ioloop.close(
            all_fds=True
        )  # all_fds=True is necessary here to make sure connections don't hang
        self._logger.notice('Master server was stopped.')
Exemplo n.º 18
0
    def run(self, log_level, master_url, remote_file=None, build_type=None, **request_params):
        """
        Execute a build and wait for it to complete.

        :param log_level: the log level at which to do application logging (or None for default log level)
        :type log_level: str | None
        :param master_url: the url (specified by the user) of the master to which we should send the build
        :type master_url: str | None
        :param remote_file: a list of remote files where each element contains the output file name and the resource URL
        :type remote_file: list[list[str]] | None
        :param build_type: the build type of the request to be sent (e.g., "git", "directory"). If not specified
            will default to the "directory" project type.
        :type build_type: str | None
        :param request_params: key-value pairs to be provided as build parameters in the build request
        :type request_params: dict
        """
        log_level = log_level or Configuration['log_level']
        log.configure_logging(log_level=log_level, simplified_console_logs=True)
        request_params['type'] = build_type or request_params.get('type') or 'directory'

        if remote_file:
            request_params['remote_files'] = {name: url for name, url in remote_file}

        operational_master_url = master_url or '{}:{}'.format(Configuration['hostname'], Configuration['port'])

        # If running a single master, single slave--both on localhost--we need to launch services locally.
        if master_url is None and Network.are_hosts_same(Configuration['master_hostname'], 'localhost') \
                and len(Configuration['slaves']) == 1 \
                and Network.are_hosts_same(Configuration['slaves'][0], 'localhost'):
            self._start_local_services_if_needed(operational_master_url)

        if request_params['type'] == 'directory':
            request_params['project_directory'] = request_params.get('project_directory') or os.getcwd()

        runner = BuildRunner(master_url=operational_master_url, request_params=request_params, secret=Secret.get())

        if not runner.run():
            sys.exit(1)