コード例 #1
0
    def __init__(self):
        self._logger = get_logger(__name__)

        self._all_slaves_by_url = {}
        self._all_builds_by_id = OrderedDict()  # This is an OrderedDict so we can more easily implement get_queue()
        self._builds_waiting_for_slaves = Queue()

        self._request_queue = Queue()
        self._request_handler = SerialRequestHandler()

        self._request_queue_worker_thread = SafeThread(
            target=self._build_preparation_loop, name='RequestHandlerLoop', daemon=True)
        self._request_queue_worker_thread.start()

        self._slave_allocation_worker_thread = SafeThread(
            target=self._slave_allocation_loop, name='SlaveAllocationLoop', daemon=True)
        self._slave_allocation_worker_thread.start()

        self._master_results_path = Configuration['results_directory']

        # It's important that idle slaves are only in the queue once so we use OrderedSet
        self._idle_slaves = OrderedSetQueue()

        # Delete all old builds when master starts.  Remove this if/when build numbers are unique across master
        # starts/stops
        if os.path.exists(self._master_results_path):
            shutil.rmtree(self._master_results_path)

        fs.create_dir(self._master_results_path)
コード例 #2
0
    def _get_event_handler(self):
        """
        Retrieves the correct event handler. Returns a Stream Handler
        if the event should write to STDOUT, otherwise it will return
        a ready RotatingFileHandler.

        Both subclasses inherit from the StreamHandler base class.

        :return: Event handler
        :rtype: StreamHandler
        """
        if self.filename.upper() == 'STDOUT':
            return StreamHandler(sys.stdout)
        else:
            fs.create_dir(os.path.dirname(self.filename))
            previous_log_file_exists = os.path.exists(self.filename)

            event_handler = RotatingFileHandler(
                filename=self.filename,
                max_size=Configuration['max_eventlog_file_fize'],
                backup_count=Configuration['max_eventlog_file_backups'])
            if previous_log_file_exists:
                event_handler.perform_rollover(
                )  # force starting a new eventlog file on application startup

            return event_handler
コード例 #3
0
def configure_logging(log_level=None,
                      log_file=None,
                      simplified_console_logs=False):
    """
    This should be called once as early as possible in app startup to configure logging handlers and formatting.

    :param log_level: The level at which to record log messages (DEBUG|INFO|NOTICE|WARNING|ERROR|CRITICAL)
    :type log_level: str
    :param log_file: The file to write logs to, or None to disable logging to a file
    :type log_file: str | None
    :param simplified_console_logs: Whether or not to use the simplified logging format and coloring
    :type simplified_console_logs: bool
    """
    # Set datetimes in log messages to be local timezone instead of UTC
    logbook.set_datetime_format('local')

    # Redirect standard lib logging to capture third-party logs in our log files (e.g., tornado, requests)
    logging.root.setLevel(
        logging.WARNING
    )  # don't include DEBUG/INFO/NOTICE-level logs from third parties
    logbook.compat.redirect_logging(set_root_logger_level=False)

    # Add a NullHandler to suppress all log messages lower than our desired log_level. (Otherwise they go to stderr.)
    NullHandler().push_application()

    log_level = log_level or Configuration['log_level']
    format_string, log_colors = _LOG_FORMAT_STRING, _LOG_COLORS
    if simplified_console_logs:
        format_string, log_colors = _SIMPLIFIED_LOG_FORMAT_STRING, _SIMPLIFIED_LOG_COLORS

    # handler for stdout
    log_handler = _ColorizingStreamHandler(
        stream=sys.stdout,
        level=log_level,
        format_string=format_string,
        log_colors=log_colors,
        bubble=True,
    )
    log_handler.push_application()

    # handler for log file
    if log_file:
        fs.create_dir(os.path.dirname(log_file))
        previous_log_file_exists = os.path.exists(log_file)

        event_handler = _ColorizingRotatingFileHandler(
            filename=log_file,
            level=log_level,
            format_string=_LOG_FORMAT_STRING,
            log_colors=_LOG_COLORS,
            bubble=True,
            max_size=Configuration['max_log_file_size'],
            backup_count=Configuration['max_log_file_backups'],
        )
        event_handler.push_application()
        if previous_log_file_exists:
            # Force application to create a new log file on startup.
            event_handler.perform_rollover(increment_logfile_counter=False)
        else:
            event_handler.log_application_summary()
コード例 #4
0
    def _load_section_from_config_file(self, config, config_filename, section):
        """
        Load a config file and copy all the values in a particular section to the Configuration singleton
        :type config: Configuration
        :type config_filename: str
        :type section: str
        """
        try:
            config_parsed = ConfigFile(config_filename).read_config_from_disk()
        except FileNotFoundError:
            sample_filename = join(config.get('root_directory'), 'conf', 'default_clusterrunner.conf')
            fs.create_dir(config.get('base_directory'))
            shutil.copy(sample_filename, config_filename)
            chmod(config_filename, ConfigFile.CONFIG_FILE_MODE)
            config_parsed = ConfigFile(config_filename).read_config_from_disk()

        if section not in config_parsed:
            raise InvalidConfigError('The config file {} does not contain a [{}] section'
                                     .format(config_filename, section))

        clusterrunner_config = config_parsed[section]
        whitelisted_file_keys = self._get_config_file_whitelisted_keys()
        for key in clusterrunner_config:
            if key not in whitelisted_file_keys:
                raise InvalidConfigError('The config file contains an invalid key: {}'.format(key))
            value = clusterrunner_config[key]

            self._cast_and_set(key, value, config)
コード例 #5
0
    def _execute_atom_command(self, atomic_command, atom_environment_vars, atom_artifact_dir):
        """
        Run the main command for this atom. Output the command, console output and exit code to
        files in the atom artifact directory.

        :type atomic_command: str
        :type atom_environment_vars: dict[str, str]
        :type atom_artifact_dir: str
        """
        fs_util.create_dir(atom_artifact_dir)

        start_time = time.time()
        output, exit_code = self._project_type.execute_command_in_project(atomic_command, atom_environment_vars)
        elapsed_time = time.time() - start_time

        console_output_path = os.path.join(atom_artifact_dir, Subjob.OUTPUT_FILE)
        fs_util.write_file(output, console_output_path)

        exit_code_output_path = os.path.join(atom_artifact_dir, Subjob.EXIT_CODE_FILE)
        fs_util.write_file(str(exit_code) + '\n', exit_code_output_path)

        command_output_path = os.path.join(atom_artifact_dir, Subjob.COMMAND_FILE)
        fs_util.write_file(str(atomic_command) + '\n', command_output_path)

        time_output_path = os.path.join(atom_artifact_dir, Subjob.TIMING_FILE)
        fs_util.write_file('{:.2f}\n'.format(elapsed_time), time_output_path)
コード例 #6
0
ファイル: git.py プロジェクト: mdengler/ClusterRunner
    def _setup_build(self):
        """
        Clones the project if necessary, fetches from the remote repo and resets to the requested commit
        """
        # For backward compatibility: If a shallow repo exists, delete it.  Shallow cloning is no longer supported,
        # it causes failures when fetching refs that depend on commits which are excluded from the shallow clone.
        existing_repo_is_shallow = os.path.isfile(os.path.join(self._repo_directory, '.git', 'shallow'))
        if existing_repo_is_shallow:
            if os.path.exists(self._repo_directory):
                shutil.rmtree(self._repo_directory)
                fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)

        # Clone the repo if it doesn't exist
        _, git_exit_code = self.execute_command_in_project('git rev-parse', cwd=self._repo_directory)
        repo_exists = git_exit_code == 0
        if not repo_exists:  # This is not a git repo yet, we have to clone the project.
            clone_command = 'git clone {} {}'. format(self._url, self._repo_directory)
            self._execute_git_remote_command(clone_command)

        # Must add the --update-head-ok in the scenario that the current branch of the working directory
        # is equal to self._branch, otherwise the git fetch will exit with a non-zero exit code.
        # Must specify the colon in 'branch:branch' so that the branch will be created locally. This is
        # important because it allows the slave hosts to do a git fetch from the master for this branch.
        fetch_command = 'git fetch --update-head-ok {0} {1}:{1}'.format(self._remote, self._branch)
        self._execute_git_remote_command(fetch_command, self._repo_directory)

        commit_hash = self._hash or 'FETCH_HEAD'
        reset_command = 'git reset --hard {}'.format(commit_hash)
        self._execute_in_repo_and_raise_on_failure(reset_command, 'Could not reset Git repo.')

        self._execute_in_repo_and_raise_on_failure('git clean -dfx', 'Could not clean Git repo.')
コード例 #7
0
    def _execute_atom_command(self, atomic_command, atom_environment_vars,
                              atom_artifact_dir):
        """
        Run the main command for this atom. Output the command, console output and exit code to
        files in the atom artifact directory. Return the exit code.

        :type atomic_command: str
        :type atom_environment_vars: dict[str, str]
        :type atom_artifact_dir: str
        :rtype: int
        """
        fs_util.create_dir(atom_artifact_dir)

        start_time = time.time()
        output, exit_code = self._project_type.execute_command_in_project(
            atomic_command, atom_environment_vars)
        elapsed_time = time.time() - start_time

        console_output_path = os.path.join(atom_artifact_dir,
                                           Subjob.OUTPUT_FILE)
        fs_util.write_file(output, console_output_path)

        exit_code_output_path = os.path.join(atom_artifact_dir,
                                             Subjob.EXIT_CODE_FILE)
        fs_util.write_file(str(exit_code) + '\n', exit_code_output_path)

        command_output_path = os.path.join(atom_artifact_dir,
                                           Subjob.COMMAND_FILE)
        fs_util.write_file(str(atomic_command) + '\n', command_output_path)

        time_output_path = os.path.join(atom_artifact_dir, Subjob.TIMING_FILE)
        fs_util.write_file('{:.2f}\n'.format(elapsed_time), time_output_path)

        return exit_code
コード例 #8
0
ファイル: event_log.py プロジェクト: dncarley/ClusterRunner
    def _get_event_handler(self):
        """
        Retrieves the correct event handler. Returns a Stream Handler
        if the event should write to STDOUT, otherwise it will return
        a ready RotatingFileHandler.

        Both subclasses inherit from the StreamHandler base class.

        :return: Event handler
        :rtype: StreamHandler
        """
        if self.filename.upper() == "STDOUT":
            return StreamHandler(sys.stdout)
        else:
            fs.create_dir(os.path.dirname(self.filename))
            previous_log_file_exists = os.path.exists(self.filename)

            event_handler = RotatingFileHandler(
                filename=self.filename,
                max_size=Configuration["max_eventlog_file_fize"],
                backup_count=Configuration["max_eventlog_file_backups"],
            )
            if previous_log_file_exists:
                event_handler.perform_rollover()  # force starting a new eventlog file on application startup

            return event_handler
コード例 #9
0
    def __init__(self):
        self._logger = get_logger(__name__)

        self._all_slaves_by_url = {}
        self._all_builds_by_id = OrderedDict()  # This is an OrderedDict so we can more easily implement get_queue()
        self._builds_waiting_for_slaves = Queue()

        self._request_queue = Queue()
        self._request_handler = SerialRequestHandler()

        self._request_queue_worker_thread = SafeThread(
            target=self._build_preparation_loop, name='RequestHandlerLoop', daemon=True)
        self._request_queue_worker_thread.start()

        self._slave_allocation_worker_thread = SafeThread(
            target=self._slave_allocation_loop, name='SlaveAllocationLoop', daemon=True)
        self._slave_allocation_worker_thread.start()

        self._master_results_path = Configuration['results_directory']

        # It's important that idle slaves are only in the queue once so we use OrderedSet
        self._idle_slaves = OrderedSetQueue()

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)

        fs.create_dir(self._master_results_path)
コード例 #10
0
    def _execute_atom_command(self, atomic_command, atom_environment_vars, atom_artifact_dir):
        """
        Run the main command for this atom. Output the command, console output and exit code to
        files in the atom artifact directory. Return the exit code.

        :type atomic_command: str
        :type atom_environment_vars: dict[str, str]
        :type atom_artifact_dir: str
        :rtype: int
        """
        fs_util.create_dir(atom_artifact_dir)
        # This console_output_file must be opened in 'w+b' mode in order to be interchangeable with the
        # TemporaryFile instance that gets instantiated in self._project_type.execute_command_in_project.
        with open(os.path.join(atom_artifact_dir, BuildArtifact.OUTPUT_FILE), mode='w+b') as console_output_file:
            start_time = time.time()
            _, exit_code = self._project_type.execute_command_in_project(atomic_command, atom_environment_vars,
                                                                         output_file=console_output_file)
            elapsed_time = time.time() - start_time

        exit_code_output_path = os.path.join(atom_artifact_dir, BuildArtifact.EXIT_CODE_FILE)
        fs_util.write_file(str(exit_code) + '\n', exit_code_output_path)

        command_output_path = os.path.join(atom_artifact_dir, BuildArtifact.COMMAND_FILE)
        fs_util.write_file(str(atomic_command) + '\n', command_output_path)

        time_output_path = os.path.join(atom_artifact_dir, BuildArtifact.TIMING_FILE)
        fs_util.write_file('{:.2f}\n'.format(elapsed_time), time_output_path)

        return exit_code
コード例 #11
0
    def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration['results_directory']
        self._slave_registry = SlaveRegistry.singleton()
        self._scheduler_pool = BuildSchedulerPool()
        self._build_request_handler = BuildRequestHandler(self._scheduler_pool)
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._scheduler_pool)
        self._slave_allocator.start()

        # The best practice for determining the number of threads to use is
        # the number of threads per core multiplied by the number of physical
        # cores. So for example, with 10 cores, 2 sockets and 2 per core, the
        # max would be 40.
        #
        # Currently we use threads for incrementing/decrementing slave executor
        # counts (lock acquisition) and tearing down the slave (network IO). 32 threads should be
        # plenty for these tasks. In the case of heavy load, the bottle neck will be the number
        # of executors, not the time it takes to lock/unlock the executor counts or the number of
        # teardown requests. Tweak the number to find the sweet spot if you feel this is the case.
        self._thread_pool_executor = ThreadPoolExecutor(max_workers=32)

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)
        fs.create_dir(self._master_results_path)

        # Configure heartbeat tracking
        self._unresponsive_slaves_cleanup_interval = Configuration[
            'unresponsive_slaves_cleanup_interval']
        self._hb_scheduler = sched.scheduler()

        SlavesCollector.register_slaves_metrics_collector(
            lambda: self._slave_registry.get_all_slaves_by_id().values())
コード例 #12
0
    def execute_subjob(self, build_id, subjob_id, subjob_artifact_dir,
                       atomic_commands):
        """
        This is the method for executing a subjob. This performs the work required by executing the specified command,
        then archives the results into a single file and returns the filename.

        :type build_id: int
        :type subjob_id: int
        :type subjob_artifact_dir: str
        :type atomic_commands: list[str]
        :rtype: str
        """
        self._logger.info('Executing subjob (Build {}, Subjob {})...',
                          build_id, subjob_id)

        # Set the current task
        self._current_build_id = build_id
        self._current_subjob_id = subjob_id

        # Maintain a list of atom artifact directories for compression and sending back to master
        atom_artifact_dirs = []

        # execute every atom and keep track of time elapsed for each
        for atom_id, atomic_command in enumerate(atomic_commands):

            atom_artifact_dir = os.path.join(
                subjob_artifact_dir,
                Subjob.ATOM_DIR_FORMAT.format(subjob_id, atom_id))

            # remove and recreate the atom artifact dir
            shutil.rmtree(atom_artifact_dir, ignore_errors=True)
            fs_util.create_dir(atom_artifact_dir)

            atom_environment_vars = {
                'ARTIFACT_DIR': atom_artifact_dir,
                'ATOM_ID': atom_id,
                'EXECUTOR_INDEX': self.id,
            }

            atom_artifact_dirs.append(atom_artifact_dir)

            self._execute_atom_command(atomic_command, atom_environment_vars,
                                       atom_artifact_dir)

        # Generate mapping of atom directories (for archiving) to paths in the archive file
        targets_to_archive_paths = {
            atom_dir: os.path.basename(os.path.normpath(atom_dir))
            for atom_dir in atom_artifact_dirs
        }

        # zip file names must be unique for a build, so we append the subjob_id to the compressed file
        tarfile_path = os.path.join(subjob_artifact_dir,
                                    'results_{}.tar.gz'.format(subjob_id))
        fs_util.compress_directories(targets_to_archive_paths, tarfile_path)

        # Reset the current task
        self._current_build_id = None
        self._current_subjob_id = None

        return tarfile_path
コード例 #13
0
    def _load_section_from_config_file(self, config, config_filename, section):
        """
        Load a config file and copy all the values in a particular section to the Configuration singleton
        :type config: Configuration
        :type config_filename: str
        :type section: str
        """
        try:
            config_parsed = ConfigFile(config_filename).read_config_from_disk()
        except FileNotFoundError:
            sample_filename = join(config.get('root_directory'), 'conf',
                                   'default_clusterrunner.conf')
            fs.create_dir(config.get('base_directory'))
            shutil.copy(sample_filename, config_filename)
            chmod(config_filename, ConfigFile.CONFIG_FILE_MODE)
            config_parsed = ConfigFile(config_filename).read_config_from_disk()

        if section not in config_parsed:
            raise InvalidConfigError(
                'The config file {} does not contain a [{}] section'.format(
                    config_filename, section))

        clusterrunner_config = config_parsed[section]
        whitelisted_file_keys = self._get_config_file_whitelisted_keys()
        for key in clusterrunner_config:
            if key not in whitelisted_file_keys:
                raise InvalidConfigError(
                    'The config file contains an invalid key: {}'.format(key))
            value = clusterrunner_config[key]

            self._cast_and_set(key, value, config)
コード例 #14
0
 def _write_config_to_disk(self, config_parsed):
     """
     Write a data structure of parsed config values to disk in an INI-style format.
     :type config_parsed: ConfigObj
     """
     fs.create_dir(os.path.dirname(self._filename))
     config_parsed.write()
     os.chmod(self._filename, self.CONFIG_FILE_MODE)
コード例 #15
0
ファイル: config_file.py プロジェクト: Medium/ClusterRunner
 def _write_config_to_disk(self, config_parsed):
     """
     Write a data structure of parsed config values to disk in an INI-style format.
     :type config_parsed: ConfigObj
     """
     fs.create_dir(os.path.dirname(self._filename))
     config_parsed.write()
     os.chmod(self._filename, self.CONFIG_FILE_MODE)
コード例 #16
0
ファイル: git.py プロジェクト: Medium/ClusterRunner
    def _fetch_project(self):
        """
        Clones the project if necessary, fetches from the remote repo and resets to the requested commit
        """
        # If shallow_clones is set to True, then we need to specify the --depth=1 argument to all git fetch
        # and clone invocations.
        git_clone_fetch_depth_arg = ''
        if Configuration['shallow_clones']:
            git_clone_fetch_depth_arg = '--depth=1'

        existing_repo_is_shallow = os.path.isfile(os.path.join(self._repo_directory, '.git', 'shallow'))

        # If we disable shallow clones, but the existing repo is shallow, we must re-clone non-shallowly.
        if not Configuration['shallow_clones'] and existing_repo_is_shallow and os.path.exists(self._repo_directory):
            shutil.rmtree(self._repo_directory)
            fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)

        # Clone the repo if it doesn't exist
        try:
            self._execute_git_command_in_repo_and_raise_on_failure('rev-parse')  # rev-parse succeeds if repo exists
        except RuntimeError:
            self._logger.notice('No valid repo in "{}". Cloning fresh from "{}".', self._repo_directory, self._url)
            self._execute_git_command_in_repo_and_raise_on_failure(
                git_command='clone {} {} {}'. format(git_clone_fetch_depth_arg, self._url, self._repo_directory),
                error_msg='Could not clone repo.'
            )

        # Must add the --update-head-ok in the scenario that the current branch of the working directory
        # is equal to self._branch, otherwise the git fetch will exit with a non-zero exit code.
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='fetch {} --update-head-ok {} {}'.format(git_clone_fetch_depth_arg, self._remote, self._branch),
            error_msg='Could not fetch specified branch "{}" from remote "{}".'.format(self._branch, self._remote)
        )

        # Validate and convert the user-specified hash/refspec to a full git hash
        fetch_head_hash = self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='rev-parse FETCH_HEAD',
            error_msg='Could not rev-parse FETCH_HEAD of {} to a commit hash.'.format(self._branch)
        ).strip()

        # Save this hash as a local ref. Named local refs are necessary for slaves to fetch correctly from the master.
        # The local ref will be passed on to slaves instead of the user-specified branch.
        self._local_ref = 'refs/clusterrunner/{}'.format(fetch_head_hash)
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='update-ref {} {}'.format(self._local_ref, fetch_head_hash),
            error_msg='Could not update local ref.'
        )

        # The '--' argument acts as a delimiter to differentiate values that can be "tree-ish" or a "path"
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='reset --hard {} --'.format(fetch_head_hash),
            error_msg='Could not reset Git repo.'
        )

        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='clean -dfx',
            error_msg='Could not clean Git repo.'
        )
コード例 #17
0
ファイル: log.py プロジェクト: fengshao0907/ClusterRunner
def configure_logging(log_level=None, log_file=None, simplified_console_logs=False):
    """
    This should be called once as early as possible in app startup to configure logging handlers and formatting.

    :param log_level: The level at which to record log messages (DEBUG|INFO|NOTICE|WARNING|ERROR|CRITICAL)
    :type log_level: str
    :param log_file: The file to write logs to, or None to disable logging to a file
    :type log_file: str | None
    :param simplified_console_logs: Whether or not to use the simplified logging format and coloring
    :type simplified_console_logs: bool
    """
    # Set datetimes in log messages to be local timezone instead of UTC
    logbook.set_datetime_format('local')

    # Redirect standard lib logging to capture third-party logs in our log files (e.g., tornado, requests)
    logging.root.setLevel(logging.WARNING)  # don't include DEBUG/INFO/NOTICE-level logs from third parties
    logbook.compat.redirect_logging(set_root_logger_level=False)

    # Add a NullHandler to suppress all log messages lower than our desired log_level. (Otherwise they go to stderr.)
    NullHandler().push_application()

    log_level = log_level or Configuration['log_level']
    format_string, log_colors = _LOG_FORMAT_STRING, _LOG_COLORS
    if simplified_console_logs:
        format_string, log_colors = _SIMPLIFIED_LOG_FORMAT_STRING, _SIMPLIFIED_LOG_COLORS

    # handler for stdout
    log_handler = _ColorizingStreamHandler(
        stream=sys.stdout,
        level=log_level,
        format_string=format_string,
        log_colors=log_colors,
        bubble=True,
    )
    log_handler.push_application()

    # handler for log file
    if log_file:
        fs.create_dir(os.path.dirname(log_file))
        previous_log_file_exists = os.path.exists(log_file)

        event_handler = _ColorizingRotatingFileHandler(
            filename=log_file,
            level=log_level,
            format_string=_LOG_FORMAT_STRING,
            log_colors=_LOG_COLORS,
            bubble=True,
            max_size=Configuration['max_log_file_size'],
            backup_count=Configuration['max_log_file_backups'],
        )
        event_handler.push_application()
        if previous_log_file_exists:
            # Force application to create a new log file on startup.
            event_handler.perform_rollover(increment_logfile_counter=False)
        else:
            event_handler.log_application_summary()
コード例 #18
0
ファイル: git.py プロジェクト: linearregression/ClusterRunner
    def _fetch_project(self):
        """
        Clones the project if necessary, fetches from the remote repo and resets to the requested commit
        """
        # For backward compatibility: If a shallow repo exists, delete it.  Shallow cloning is no longer supported,
        # it causes failures when fetching refs that depend on commits which are excluded from the shallow clone.
        existing_repo_is_shallow = os.path.isfile(os.path.join(self._repo_directory, '.git', 'shallow'))
        if existing_repo_is_shallow:
            if os.path.exists(self._repo_directory):
                shutil.rmtree(self._repo_directory)
                fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)

        # Clone the repo if it doesn't exist
        try:
            self._execute_git_command_in_repo_and_raise_on_failure('rev-parse')  # rev-parse succeeds if repo exists
        except RuntimeError:
            self._logger.notice('No valid repo in "{}". Cloning fresh from "{}".', self._repo_directory, self._url)
            self._execute_git_command_in_repo_and_raise_on_failure(
                git_command='clone {} {}'. format(self._url, self._repo_directory),
                error_msg='Could not clone repo.'
            )

        # Must add the --update-head-ok in the scenario that the current branch of the working directory
        # is equal to self._branch, otherwise the git fetch will exit with a non-zero exit code.
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='fetch --update-head-ok {} {}'.format(self._remote, self._branch),
            error_msg='Could not fetch specified branch "{}" from remote "{}".'.format(self._branch, self._remote)
        )

        # Validate and convert the user-specified hash/refspec to a full git hash
        self._hash = self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='rev-parse {}'.format(self._hash),
            error_msg='Could not rev-parse "{}" to a commit hash.'.format(self._hash)
        ).strip()

        # Save this hash as a local ref. Named local refs are necessary for slaves to fetch correctly from the master.
        # The local ref will be passed on to slaves instead of the user-specified branch.
        self._local_ref = 'refs/clusterrunner/' + self._hash
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='update-ref {} {}'.format(self._local_ref, self._hash),
            error_msg='Could not update local ref.'
        )

        # The '--' argument acts as a delimiter to differentiate values that can be "tree-ish" or a "path"
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='reset --hard {} --'.format(self._hash),
            error_msg='Could not reset Git repo.'
        )

        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='clean -dfx',
            error_msg='Could not clean Git repo.'
        )
コード例 #19
0
ファイル: git.py プロジェクト: dncarley/ClusterRunner
    def _fetch_project(self):
        """
        Clones the project if necessary, fetches from the remote repo and resets to the requested commit
        """
        # For backward compatibility: If a shallow repo exists, delete it.  Shallow cloning is no longer supported,
        # it causes failures when fetching refs that depend on commits which are excluded from the shallow clone.
        existing_repo_is_shallow = os.path.isfile(os.path.join(self._repo_directory, '.git', 'shallow'))
        if existing_repo_is_shallow:
            if os.path.exists(self._repo_directory):
                shutil.rmtree(self._repo_directory)
                fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)

        # Clone the repo if it doesn't exist
        try:
            self._execute_git_command_in_repo_and_raise_on_failure('rev-parse')  # rev-parse succeeds if repo exists
        except RuntimeError:
            self._logger.notice('No valid repo in "{}". Cloning fresh from "{}".', self._repo_directory, self._url)
            self._execute_git_command_in_repo_and_raise_on_failure(
                git_command='clone {} {}'. format(self._url, self._repo_directory),
                error_msg='Could not clone repo.'
            )

        # Must add the --update-head-ok in the scenario that the current branch of the working directory
        # is equal to self._branch, otherwise the git fetch will exit with a non-zero exit code.
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='fetch --update-head-ok {} {}'.format(self._remote, self._branch),
            error_msg='Could not fetch specified branch "{}" from remote "{}".'.format(self._branch, self._remote)
        )

        # Validate and convert the user-specified hash/refspec to a full git hash
        self._hash = self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='rev-parse {}'.format(self._hash),
            error_msg='Could not rev-parse "{}" to a commit hash.'.format(self._hash)
        ).strip()

        # Save this hash as a local ref. Named local refs are necessary for slaves to fetch correctly from the master.
        # The local ref will be passed on to slaves instead of the user-specified branch.
        self._local_ref = 'refs/clusterrunner/' + self._hash
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='update-ref {} {}'.format(self._local_ref, self._hash),
            error_msg='Could not update local ref.'
        )

        # The '--' argument acts as a delimiter to differentiate values that can be "tree-ish" or a "path"
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='reset --hard {} --'.format(self._hash),
            error_msg='Could not reset Git repo.'
        )

        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='clean -dfx',
            error_msg='Could not clean Git repo.'
        )
コード例 #20
0
    def execute_subjob(self, build_id, subjob_id, subjob_artifact_dir, atomic_commands):
        """
        This is the method for executing a subjob. This performs the work required by executing the specified command,
        then archives the results into a single file and returns the filename.

        :type build_id: int
        :type subjob_id: int
        :type subjob_artifact_dir: str
        :type atomic_commands: list[str]
        :rtype: str
        """
        self._logger.info('Executing subjob (Build {}, Subjob {})...', build_id, subjob_id)

        # Set the current task
        self._current_build_id = build_id
        self._current_subjob_id = subjob_id

        # Maintain a list of atom artifact directories for compression and sending back to master
        atom_artifact_dirs = []

        # execute every atom and keep track of time elapsed for each
        for atom_id, atomic_command in enumerate(atomic_commands):

            atom_artifact_dir = os.path.join(subjob_artifact_dir,
                                             Subjob.ATOM_DIR_FORMAT.format(subjob_id, atom_id))

            # remove and recreate the atom artifact dir
            shutil.rmtree(atom_artifact_dir, ignore_errors=True)
            fs_util.create_dir(atom_artifact_dir)

            atom_environment_vars = {
                'ARTIFACT_DIR': atom_artifact_dir,
                'ATOM_ID': atom_id,
                'EXECUTOR_INDEX': self.id,
            }

            atom_artifact_dirs.append(atom_artifact_dir)

            self._execute_atom_command(atomic_command, atom_environment_vars, atom_artifact_dir)

        # Generate mapping of atom directories (for archiving) to paths in the archive file
        targets_to_archive_paths = {atom_dir: os.path.basename(os.path.normpath(atom_dir))
                                    for atom_dir in atom_artifact_dirs}

        # zip file names must be unique for a build, so we append the subjob_id to the compressed file
        tarfile_path = os.path.join(subjob_artifact_dir, 'results_{}.tar.gz'.format(subjob_id))
        fs_util.compress_directories(targets_to_archive_paths, tarfile_path)

        # Reset the current task
        self._current_build_id = None
        self._current_subjob_id = None

        return tarfile_path
コード例 #21
0
    def _load_section_from_config_file(self, config, config_filename, section):
        """
        Load a config file and copy all the values in a particular section to the Configuration singleton
        :type config: Configuration
        :type config_filename: str
        :type section: str
        """
        # Only keys from this list will be loaded from a conf file.  If the conf file contains other keys we will
        # error to alert the user.
        config_key_validation = [
            'secret',
            'base_directory',
            'log_level',
            'build_symlink_directory',
            'hostname',
            'slaves',
            'port',
            'num_executors',
            'master_hostname',
            'master_port',
            'log_filename',
            'max_log_file_size',
            'eventlog_filename',
            'git_strict_host_key_checking',
            'cors_allowed_origins_regex',
        ]
        try:
            config_parsed = ConfigFile(config_filename).read_config_from_disk()
        except FileNotFoundError:
            sample_filename = join(config.get('root_directory'), 'conf',
                                   'default_clusterrunner.conf')
            fs.create_dir(config.get('base_directory'))
            shutil.copy(sample_filename, config_filename)
            chmod(config_filename, ConfigFile.CONFIG_FILE_MODE)
            config_parsed = ConfigFile(config_filename).read_config_from_disk()

        if section not in config_parsed:
            raise _InvalidConfigError(
                'The config file {} does not contain a [{}] section'.format(
                    config_filename, section))
        clusterrunner_config = config_parsed[section]
        for key in clusterrunner_config:
            if key not in config_key_validation:
                raise _InvalidConfigError(
                    'The config file contains an invalid key: {}'.format(key))
            value = clusterrunner_config[key]

            self._cast_and_set(key, value, config)
コード例 #22
0
    def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration['results_directory']
        self._all_slaves_by_url = {}
        self._all_builds_by_id = OrderedDict()
        self._build_request_handler = BuildRequestHandler()
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._build_request_handler)
        self._slave_allocator.start()

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)

        fs.create_dir(self._master_results_path)
コード例 #23
0
    def __init__(self):
        self._logger = get_logger(__name__)
        self._master_results_path = Configuration["results_directory"]
        self._all_slaves_by_url = {}
        self._all_builds_by_id = OrderedDict()
        self._build_request_handler = BuildRequestHandler()
        self._build_request_handler.start()
        self._slave_allocator = SlaveAllocator(self._build_request_handler)
        self._slave_allocator.start()

        # Asynchronously delete (but immediately rename) all old builds when master starts.
        # Remove this if/when build numbers are unique across master starts/stops
        if os.path.exists(self._master_results_path):
            fs.async_delete(self._master_results_path)

        fs.create_dir(self._master_results_path)
コード例 #24
0
    def _load_section_from_config_file(self, config, config_filename, section):
        """
        Load a config file and copy all the values in a particular section to the Configuration singleton
        :type config: Configuration
        :type config_filename: str
        :type section: str
        """
        # Only keys from this list will be loaded from a conf file.  If the conf file contains other keys we will
        # error to alert the user.
        config_key_validation = [
            'secret',
            'base_directory',
            'log_level',
            'build_symlink_directory',
            'hostname',
            'slaves',
            'port',
            'num_executors',
            'master_hostname',
            'master_port',
            'log_filename',
            'max_log_file_size',
            'eventlog_filename',
            'git_strict_host_key_checking',
            'cors_allowed_origins_regex',
        ]
        try:
            config_parsed = ConfigFile(config_filename).read_config_from_disk()
        except FileNotFoundError:
            sample_filename = join(config.get('root_directory'), 'conf', 'default_clusterrunner.conf')
            fs.create_dir(config.get('base_directory'))
            shutil.copy(sample_filename, config_filename)
            chmod(config_filename, ConfigFile.CONFIG_FILE_MODE)
            config_parsed = ConfigFile(config_filename).read_config_from_disk()

        if section not in config_parsed:
            raise _InvalidConfigError('The config file {} does not contain a [{}] section'
                                      .format(config_filename, section))
        clusterrunner_config = config_parsed[section]
        for key in clusterrunner_config:
            if key not in config_key_validation:
                raise _InvalidConfigError('The config file contains an invalid key: {}'.format(key))
            value = clusterrunner_config[key]

            self._cast_and_set(key, value, config)
コード例 #25
0
ファイル: analytics.py プロジェクト: ojammeh/ClusterRunner
def initialize(eventlog_file=None):
    """
    Initialize the analytics output. This will cause analytics events to be output to either a file or stdout.

    If this function is not called, analytics events will not be output. If it is called with a filename, the events
    will be output to that file. If it is called with 'STDOUT' or None, the events will be output to stdout.

    :param eventlog_file: The filename to output events to, 'STDOUT' to output to stdout, None to disable event logging
    :type eventlog_file: str | None
    """
    global _analytics_logger, _eventlog_file

    _eventlog_file = eventlog_file
    if not eventlog_file:
        _analytics_logger = None
        return

    if eventlog_file.upper() == 'STDOUT':
        event_handler = StreamHandler(sys.stdout)
    else:
        fs.create_dir(os.path.dirname(eventlog_file))
        previous_log_file_exists = os.path.exists(eventlog_file)

        event_handler = RotatingFileHandler(
            filename=eventlog_file,
            max_size=Configuration['max_eventlog_file_size'],
            backup_count=Configuration['max_eventlog_file_backups'],
        )
        if previous_log_file_exists:
            event_handler.perform_rollover(
            )  # force starting a new eventlog file on application startup

    event_handler.format_string = '{record.message}'  # only output raw log message -- no timestamp or log level
    handler = TaggingHandler(
        {'event': event_handler
         },  # enable logging to the event_handler with the event() method
        bubble=True,
    )
    handler.push_application()

    _analytics_logger = TaggingLogger('analytics', ['event'])
コード例 #26
0
    def _fetch_project(self):
        """
        Clones the project if necessary, fetches from the remote repo and resets to the requested commit
        """
        # For backward compatibility: If a shallow repo exists, delete it.  Shallow cloning is no longer supported,
        # it causes failures when fetching refs that depend on commits which are excluded from the shallow clone.
        existing_repo_is_shallow = os.path.isfile(os.path.join(self._repo_directory, '.git', 'shallow'))
        if existing_repo_is_shallow:
            if os.path.exists(self._repo_directory):
                shutil.rmtree(self._repo_directory)
                fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)

        # Clone the repo if it doesn't exist
        _, git_exit_code = self.execute_command_in_project('git rev-parse', cwd=self._repo_directory)
        repo_exists = git_exit_code == 0
        if not repo_exists:  # This is not a git repo yet, we have to clone the project.
            clone_command = 'git clone {} {}'. format(self._url, self._repo_directory)
            self._git_remote_command_executor.execute(clone_command)

        # Must add the --update-head-ok in the scenario that the current branch of the working directory
        # is equal to self._branch, otherwise the git fetch will exit with a non-zero exit code.
        fetch_command = 'git fetch --update-head-ok {} {}'.format(self._remote, self._branch)
        self._git_remote_command_executor.execute(fetch_command, cwd=self._repo_directory)

        # Validate and convert the user-specified hash/refspec to a full git hash
        self._hash = self._execute_in_repo_and_raise_on_failure(
            'git rev-parse {}'.format(self._hash),
            'Could not rev-parse "{}" to a commit hash.'.format(self._hash)
        ).strip()

        # Save this hash as a local ref. Named local refs are necessary for slaves to fetch correctly from the master.
        # The local ref will be passed on to slaves instead of the user-specified branch.
        self._local_ref = 'refs/clusterrunner/' + self._hash
        update_ref_command = 'git update-ref {} {}'.format(self._local_ref, self._hash)
        self._execute_in_repo_and_raise_on_failure(update_ref_command, 'Could not update local ref.')

        # The '--' option acts as a delimiter to differentiate values that can be "tree-ish" or a "path"
        reset_command = 'git reset --hard {} --'.format(self._hash)
        self._execute_in_repo_and_raise_on_failure(reset_command, 'Could not reset Git repo.')

        self._execute_in_repo_and_raise_on_failure('git clean -dfx', 'Could not clean Git repo.')
コード例 #27
0
ファイル: git.py プロジェクト: drobertduke/ClusterRunner
    def __init__(self, url, build_project_directory='', project_directory='', remote='origin', branch='master',
                 hash=None, config=None, job_name=None, remote_files=None):
        """
        Note: the first line of each parameter docstring will be exposed as command line argument documentation for the
        clusterrunner build client.

        :param url: url to the git repo (ie: https, ssh)
        :type url: str
        :param build_project_directory: the symlinked directory of where PROJECT_DIR should end up being set to
        :type build_project_directory: str
        :param project_directory: path within the repo that contains cluster_runner.yaml
        :type project_directory: str
        :param remote: The git remote name to fetch from
        :type remote: str
        :param branch: The git branch name on the remote to fetch
        :type branch: str
        :param hash: The hash to reset hard on. If both hash and branch are set, we only use the hash.
        :type hash: str
        :param config: a yaml string representing the project_type's config
        :type config: str|None
        :param job_name: a list of job names we intend to run
        :type job_name: list [str] | None
        :param remote_files: dictionary mapping of output file to URL
        :type remote_files: dict[str, str] | None
        """
        super().__init__(config, job_name, remote_files)
        self._url = url
        self._remote = remote
        self._branch = branch
        self._hash = hash

        url_components = urlparse(url)
        url_full_path_parts = url_components.path.split('/')
        repo_name = url_full_path_parts[-1].split('.')[0]
        url_folder_path_parts = url_full_path_parts[:-1]
        repo_directory = os.path.join(Configuration['repo_directory'], url_components.netloc, *url_folder_path_parts)
        self._repo_directory = os.path.join(repo_directory, repo_name)
        self._timing_file_directory = os.path.join(
            Configuration['timings_directory'],
            url_components.netloc,
            url_components.path.strip('/')
        )

        # We explicitly set the repo directory to 700 so we don't inadvertently expose the repo to access by other users
        fs.create_dir(self._repo_directory, 0o700)
        fs.create_dir(self._timing_file_directory, 0o700)
        fs.create_dir(os.path.dirname(build_project_directory))

        # Create a symlink from the generated build project directory to the actual project directory.
        # This is done in order to switch between the master's and the slave's copies of the repo while not
        # having to do something hacky in order to user the master's generated atoms on the slaves.
        actual_project_directory = os.path.join(self._repo_directory, project_directory)

        try:
            os.unlink(build_project_directory)
        except FileNotFoundError:
            pass

        os.symlink(actual_project_directory, build_project_directory)
        self.project_directory = build_project_directory
コード例 #28
0
ファイル: git.py プロジェクト: ojammeh/ClusterRunner
    def _fetch_project(self):
        """
        Clones the project if necessary, fetches from the remote repo and resets to the requested commit
        """
        # For backward compatibility: If a shallow repo exists, delete it.  Shallow cloning is no longer supported,
        # it causes failures when fetching refs that depend on commits which are excluded from the shallow clone.
        existing_repo_is_shallow = os.path.isfile(
            os.path.join(self._repo_directory, '.git', 'shallow'))
        if existing_repo_is_shallow:
            if os.path.exists(self._repo_directory):
                shutil.rmtree(self._repo_directory)
                fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)

        # Clone the repo if it doesn't exist
        _, git_exit_code = self.execute_command_in_project(
            'git rev-parse', cwd=self._repo_directory)
        repo_exists = git_exit_code == 0
        if not repo_exists:  # This is not a git repo yet, we have to clone the project.
            clone_command = 'git clone {} {}'.format(self._url,
                                                     self._repo_directory)
            self._git_remote_command_executor.execute(clone_command)

        # Must add the --update-head-ok in the scenario that the current branch of the working directory
        # is equal to self._branch, otherwise the git fetch will exit with a non-zero exit code.
        # Must specify the colon in 'branch:branch' so that the branch will be created locally. This is
        # important because it allows the slave hosts to do a git fetch from the master for this branch.
        fetch_command = 'git fetch --update-head-ok {0} {1}:{1}'.format(
            self._remote, self._branch)
        self._git_remote_command_executor.execute(fetch_command,
                                                  cwd=self._repo_directory)

        commit_hash = self._hash or 'FETCH_HEAD'

        # The '--' option acts as a delimiter to differentiate values that can be "tree-ish" or a "path"
        reset_command = 'git reset --hard {} --'.format(commit_hash)
        self._execute_in_repo_and_raise_on_failure(
            reset_command, 'Could not reset Git repo.')

        self._execute_in_repo_and_raise_on_failure(
            'git clean -dfx', 'Could not clean Git repo.')
コード例 #29
0
ファイル: git.py プロジェクト: ojammeh/ClusterRunner
    def __init__(self,
                 url,
                 build_project_directory='',
                 project_directory='',
                 remote='origin',
                 branch='master',
                 hash=None,
                 config=None,
                 job_name=None,
                 remote_files=None):
        """
        Note: the first line of each parameter docstring will be exposed as command line argument documentation for the
        clusterrunner build client.

        :param url: url to the git repo (ie: https, ssh)
        :type url: str
        :param build_project_directory: the symlinked directory of where PROJECT_DIR should end up being set to
        :type build_project_directory: str
        :param project_directory: path within the repo that contains cluster_runner.yaml
        :type project_directory: str
        :param remote: The git remote name to fetch from
        :type remote: str
        :param branch: The git branch name on the remote to fetch
        :type branch: str
        :param hash: The hash to reset hard on. If both hash and branch are set, we only use the hash.
        :type hash: str
        :param config: a yaml string representing the project_type's config
        :type config: str|None
        :param job_name: a list of job names we intend to run
        :type job_name: list [str] | None
        :param remote_files: dictionary mapping of output file to URL
        :type remote_files: dict[str, str] | None
        """
        super().__init__(config, job_name, remote_files)
        self._url = url
        self._remote = remote
        self._branch = branch
        self._hash = hash
        self._repo_directory = self.get_full_repo_directory(self._url)
        self._timing_file_directory = self.get_timing_file_directory(self._url)
        self._git_remote_command_executor = _GitRemoteCommandExecutor()

        # We explicitly set the repo directory to 700 so we don't inadvertently expose the repo to access by other users
        fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)
        fs.create_dir(self._timing_file_directory, self.DIRECTORY_PERMISSIONS)
        fs.create_dir(os.path.dirname(build_project_directory))

        # Create a symlink from the generated build project directory to the actual project directory.
        # This is done in order to switch between the master's and the slave's copies of the repo while not
        # having to do something hacky in order to user the master's generated atoms on the slaves.
        actual_project_directory = os.path.join(self._repo_directory,
                                                project_directory)

        try:
            os.unlink(build_project_directory)
        except FileNotFoundError:
            pass

        os.symlink(actual_project_directory, build_project_directory)
        self.project_directory = build_project_directory
コード例 #30
0
def initialize(eventlog_file=None):
    """
    Initialize the analytics output. This will cause analytics events to be output to either a file or stdout.

    If this function is not called, analytics events will not be output. If it is called with a filename, the events
    will be output to that file. If it is called with 'STDOUT' or None, the events will be output to stdout.

    :param eventlog_file: The filename to output events to, 'STDOUT' to output to stdout, None to disable event logging
    :type eventlog_file: str | None
    """
    global _analytics_logger, _eventlog_file

    _eventlog_file = eventlog_file
    if not eventlog_file:
        _analytics_logger = None
        return

    if eventlog_file.upper() == 'STDOUT':
        event_handler = StreamHandler(sys.stdout)
    else:
        fs.create_dir(os.path.dirname(eventlog_file))
        previous_log_file_exists = os.path.exists(eventlog_file)

        event_handler = RotatingFileHandler(
            filename=eventlog_file,
            max_size=Configuration['max_eventlog_file_size'],
            backup_count=Configuration['max_eventlog_file_backups'],
        )
        if previous_log_file_exists:
            event_handler.perform_rollover()  # force starting a new eventlog file on application startup

    event_handler.format_string = '{record.message}'  # only output raw log message -- no timestamp or log level
    handler = TaggingHandler(
        {'event': event_handler},  # enable logging to the event_handler with the event() method
        bubble=True,
    )
    handler.push_application()

    _analytics_logger = TaggingLogger('analytics', ['event'])
コード例 #31
0
    def _execute_atom_command(self, atomic_command, atom_environment_vars,
                              atom_artifact_dir):
        """
        Run the main command for this atom. Output the command, console output and exit code to
        files in the atom artifact directory. Return the exit code.

        :type atomic_command: str
        :type atom_environment_vars: dict[str, str]
        :type atom_artifact_dir: str
        :rtype: int
        """
        fs_util.create_dir(atom_artifact_dir)
        # This console_output_file must be opened in 'w+b' mode in order to be interchangeable with the
        # TemporaryFile instance that gets instantiated in self._project_type.execute_command_in_project.
        with open(os.path.join(atom_artifact_dir, BuildArtifact.OUTPUT_FILE),
                  mode='w+b') as console_output_file:
            start_time = time.time()
            _, exit_code = self._project_type.execute_command_in_project(
                atomic_command,
                atom_environment_vars,
                output_file=console_output_file)
            elapsed_time = time.time() - start_time

        exit_code_output_path = os.path.join(atom_artifact_dir,
                                             BuildArtifact.EXIT_CODE_FILE)
        fs_util.write_file(str(exit_code) + '\n', exit_code_output_path)

        command_output_path = os.path.join(atom_artifact_dir,
                                           BuildArtifact.COMMAND_FILE)
        fs_util.write_file(str(atomic_command) + '\n', command_output_path)

        time_output_path = os.path.join(atom_artifact_dir,
                                        BuildArtifact.TIMING_FILE)
        fs_util.write_file('{:.2f}\n'.format(elapsed_time), time_output_path)

        return exit_code
コード例 #32
0
ファイル: git.py プロジェクト: fengshao0907/ClusterRunner
    def __init__(self, url, build_project_directory='', project_directory='', remote='origin', branch='master',
                 hash='FETCH_HEAD', config=None, job_name=None, remote_files=None, atoms_override=None):
        """
        Note: the first line of each parameter docstring will be exposed as command line argument documentation for the
        clusterrunner build client.

        :param url: url to the git repo (ie: https, ssh)
        :type url: str
        :param build_project_directory: the symlinked directory of where PROJECT_DIR should end up being set to
        :type build_project_directory: str
        :param project_directory: path within the repo that contains clusterrunner.yaml
        :type project_directory: str
        :param remote: The git remote name to fetch from
        :type remote: str
        :param branch: The git branch name on the remote to fetch
        :type branch: str
        :param hash: The hash to reset hard on. If hash is not set, we use the FETCH_HEAD of <branch>.
        :type hash: str
        :param config: a yaml string representing the project_type's config
        :type config: str|None
        :param job_name: a list of job names we intend to run
        :type job_name: list [str] | None
        :param remote_files: dictionary mapping of output file to URL
        :type remote_files: dict[str, str] | None
        :param atoms_override: The list of overridden atoms (if specified, will not run atomizer).
        :type atoms_override: list[str] | None
        """
        super().__init__(config, job_name, remote_files, atoms_override)
        self._url = url
        self._remote = remote
        self._branch = branch
        self._hash = hash
        self._repo_directory = self.get_full_repo_directory(self._url)
        self._timing_file_directory = self.get_timing_file_directory(self._url)
        self._local_ref = None
        self._logger = log.get_logger(__name__)

        # We explicitly set the repo directory to 700 so we don't inadvertently expose the repo to access by other users
        fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)
        fs.create_dir(self._timing_file_directory, self.DIRECTORY_PERMISSIONS)
        fs.create_dir(os.path.dirname(build_project_directory))

        # Create a symlink from the generated build project directory to the actual project directory.
        # This is done in order to switch between the master's and the slave's copies of the repo while not
        # having to do something hacky in order to user the master's generated atoms on the slaves.
        actual_project_directory = os.path.join(self._repo_directory, project_directory)
        try:
            os.unlink(build_project_directory)
        except FileNotFoundError:
            pass

        os.symlink(actual_project_directory, build_project_directory)
        self.project_directory = build_project_directory
コード例 #33
0
    def execute_subjob(self, build_id, subjob_id, atomic_commands,
                       base_executor_index):
        """
        This is the method for executing a subjob. This performs the work required by executing the specified command,
        then archives the results into a single file and returns the filename.

        :type build_id: int
        :type subjob_id: int
        :type atomic_commands: list[str]
        :type base_executor_index: int
        :rtype: str
        """
        self._logger.info('Executing subjob (Build {}, Subjob {})...',
                          build_id, subjob_id)

        # Set the current task
        self._current_build_id = build_id
        self._current_subjob_id = subjob_id

        # Maintain a list of atom artifact directories for compression and sending back to master
        atom_artifact_dirs = []

        # execute every atom and keep track of time elapsed for each
        for atom_id, atomic_command in enumerate(atomic_commands):
            atom_artifact_dir = BuildArtifact.atom_artifact_directory(
                build_id,
                subjob_id,
                atom_id,
                result_root=Configuration['artifact_directory'])

            # remove and recreate the atom artifact dir
            shutil.rmtree(atom_artifact_dir, ignore_errors=True)
            fs_util.create_dir(atom_artifact_dir)

            atom_environment_vars = {
                'ARTIFACT_DIR': atom_artifact_dir,
                'ATOM_ID': atom_id,
                'EXECUTOR_INDEX':
                self.id,  # Deprecated, use MACHINE_EXECUTOR_INDEX
                'MACHINE_EXECUTOR_INDEX': self.id,
                'BUILD_EXECUTOR_INDEX': base_executor_index + self.id,
            }

            atom_artifact_dirs.append(atom_artifact_dir)

            job_name = self._project_type.job_name
            atom_event_data = {
                'build_id': build_id,
                'atom_id': atom_id,
                'job_name': job_name,
                'subjob_id': subjob_id
            }
            analytics.record_event(analytics.ATOM_START, **atom_event_data)

            exit_code = self._execute_atom_command(atomic_command,
                                                   atom_environment_vars,
                                                   atom_artifact_dir)

            atom_event_data['exit_code'] = exit_code
            analytics.record_event(analytics.ATOM_FINISH, **atom_event_data)

        # Generate mapping of atom directories (for archiving) to paths in the archive file
        targets_to_archive_paths = {
            atom_dir: os.path.basename(os.path.normpath(atom_dir))
            for atom_dir in atom_artifact_dirs
        }

        # zip file names must be unique for a build, so we append the subjob_id to the compressed file
        subjob_artifact_dir = BuildArtifact.build_artifact_directory(
            build_id, result_root=Configuration['artifact_directory'])
        tarfile_path = os.path.join(subjob_artifact_dir,
                                    'results_{}.tar.gz'.format(subjob_id))
        fs_util.tar_directories(targets_to_archive_paths, tarfile_path)

        # Reset the current task
        self._current_build_id = None
        self._current_subjob_id = None

        return tarfile_path
コード例 #34
0
    def _fetch_project(self):
        """
        Clones the project if necessary, fetches from the remote repo and resets to the requested commit
        """
        # If shallow_clones is set to True, then we need to specify the --depth=1 argument to all git fetch
        # and clone invocations.
        git_clone_fetch_depth_arg = ''
        if Configuration['shallow_clones']:
            git_clone_fetch_depth_arg = '--depth=1'

        existing_repo_is_shallow = os.path.isfile(
            os.path.join(self._repo_directory, '.git', 'shallow'))

        # If we disable shallow clones, but the existing repo is shallow, we must re-clone non-shallowly.
        if not Configuration[
                'shallow_clones'] and existing_repo_is_shallow and os.path.exists(
                    self._repo_directory):
            shutil.rmtree(self._repo_directory)
            fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)

        # Clone the repo if it doesn't exist
        try:
            self._execute_git_command_in_repo_and_raise_on_failure(
                'rev-parse')  # rev-parse succeeds if repo exists
        except RuntimeError:
            self._logger.notice(
                'No valid repo in "{}". Cloning fresh from "{}".',
                self._repo_directory, self._url)
            self._execute_git_command_in_repo_and_raise_on_failure(
                git_command='clone {} {} {}'.format(git_clone_fetch_depth_arg,
                                                    self._url,
                                                    self._repo_directory),
                error_msg='Could not clone repo.')

        # Must add the --update-head-ok in the scenario that the current branch of the working directory
        # is equal to self._branch, otherwise the git fetch will exit with a non-zero exit code.
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='fetch {} --update-head-ok {} {}'.format(
                git_clone_fetch_depth_arg, self._remote, self._branch),
            error_msg='Could not fetch specified branch "{}" from remote "{}".'
            .format(self._branch, self._remote))

        # Validate and convert the user-specified hash/refspec to a full git hash
        fetch_head_hash = self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='rev-parse FETCH_HEAD',
            error_msg='Could not rev-parse FETCH_HEAD of {} to a commit hash.'.
            format(self._branch)).strip()

        # Save this hash as a local ref. Named local refs are necessary for slaves to fetch correctly from the master.
        # The local ref will be passed on to slaves instead of the user-specified branch.
        self._local_ref = 'refs/clusterrunner/{}'.format(fetch_head_hash)
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='update-ref {} {}'.format(self._local_ref,
                                                  fetch_head_hash),
            error_msg='Could not update local ref.')

        # The '--' argument acts as a delimiter to differentiate values that can be "tree-ish" or a "path"
        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='reset --hard {} --'.format(fetch_head_hash),
            error_msg='Could not reset Git repo.')

        self._execute_git_command_in_repo_and_raise_on_failure(
            git_command='clean -dfx', error_msg='Could not clean Git repo.')
コード例 #35
0
    def execute_subjob(self, build_id, subjob_id, atomic_commands, base_executor_index):
        """
        This is the method for executing a subjob. This performs the work required by executing the specified command,
        then archives the results into a single file and returns the filename.

        :type build_id: int
        :type subjob_id: int
        :type atomic_commands: list[str]
        :type base_executor_index: int
        :rtype: str
        """
        self._logger.info('Executing subjob (Build {}, Subjob {})...', build_id, subjob_id)

        # Set the current task
        self._current_build_id = build_id
        self._current_subjob_id = subjob_id

        # Maintain a list of atom artifact directories for compression and sending back to master
        atom_artifact_dirs = []

        # execute every atom and keep track of time elapsed for each
        for atom_id, atomic_command in enumerate(atomic_commands):
            atom_artifact_dir = BuildArtifact.atom_artifact_directory(
                build_id,
                subjob_id,
                atom_id,
                result_root=Configuration['artifact_directory']
            )

            # remove and recreate the atom artifact dir
            shutil.rmtree(atom_artifact_dir, ignore_errors=True)
            fs_util.create_dir(atom_artifact_dir)

            atom_environment_vars = {
                'ARTIFACT_DIR': atom_artifact_dir,
                'ATOM_ID': atom_id,
                'EXECUTOR_INDEX': self.id,  # Deprecated, use MACHINE_EXECUTOR_INDEX
                'MACHINE_EXECUTOR_INDEX': self.id,
                'BUILD_EXECUTOR_INDEX': base_executor_index + self.id,
            }

            atom_artifact_dirs.append(atom_artifact_dir)

            job_name = self._project_type.job_name
            atom_event_data = {'build_id': build_id, 'atom_id': atom_id, 'job_name': job_name, 'subjob_id': subjob_id}
            analytics.record_event(analytics.ATOM_START, **atom_event_data)

            exit_code = self._execute_atom_command(atomic_command, atom_environment_vars, atom_artifact_dir)

            atom_event_data['exit_code'] = exit_code
            analytics.record_event(analytics.ATOM_FINISH, **atom_event_data)

        # Generate mapping of atom directories (for archiving) to paths in the archive file
        targets_to_archive_paths = {atom_dir: os.path.basename(os.path.normpath(atom_dir))
                                    for atom_dir in atom_artifact_dirs}

        # zip file names must be unique for a build, so we append the subjob_id to the compressed file
        subjob_artifact_dir = BuildArtifact.build_artifact_directory(build_id,
                                                                     result_root=Configuration['artifact_directory'])
        tarfile_path = os.path.join(subjob_artifact_dir, 'results_{}.tar.gz'.format(subjob_id))
        fs_util.compress_directories(targets_to_archive_paths, tarfile_path)

        # Reset the current task
        self._current_build_id = None
        self._current_subjob_id = None

        return tarfile_path