def __init__(self, config_file, action, stage=None):
        super(ProjectHandler, self).__init__()
        self.config_file = config_file
        self.stage = stage
        self.action = action

        self.stage_actions = [
            'submit', 'clean', 'status', 'check', 'statistics'
        ]
        self.project_actions = ['check', 'clean']

        if stage is None and self.action not in self.project_actions:
            raise Exception("Action {} not available".format(self.action))
        elif stage is not None and self.action not in self.stage_actions:
            raise Exception("Action {} not available".format(self.action))

        # Build the configuration class:
        self.config = ProjectConfig(config_file)

        # Make sure the stage requested is in the file:
        if stage is not None and stage not in self.config.stages:
            raise Exception(
                'Stage {0} not in configuration file.'.format(stage))

        # Create the work directory:
        self.project_work_dir = self.config['top_dir'] + '/work/'

        if stage is not None:
            self.stage_work_dir = self.project_work_dir + stage + '/'
Example #2
0
    def __init__(self, config: ProjectConfig):
        self.ip_address = config.get('IP_ADDRESS')
        self.port = int(config.get('IP_PORT'))
        self._connected = False

        self.conn = None
        self.client = None
        self.addr = None
def main():
    project = ProjectConfig('example_project.yml')
    project.larsoft().setup_larsoft()
    db_file = project['top_dir'] +'/work/' + project['name'] + '.db'
    a = JobRunner(project = project, stage=project.stage('generation'))
    a.prepare_job()
    a.run_job(db_util = DBUtil(db_file))
    return
Example #4
0
    def __init__(self, config: ProjectConfig):
        # initialize the camera and stream
        self.camera = PiCamera()
        self.camera.resolution = (int(config.get('RES_WIDTH')),
                                  int(config.get('RES_HEIGHT')))
        self.camera.framerate = int(config.get('FRAMERATE'))

        self.server = None
        self.address = (config.get('IP_ADDRESS'),
                        int(config.get('PICAM_PORT')))

        time.sleep(2.0)

        self.camera.start_recording(output, format='mjpeg')
        self._connected = False
Example #5
0
def build_project_if_first_sync(config: ProjectConfig):
    if config.UE4EnginePath == '':
        # No engine, definitely build project
        print_action('No engine found, running full build...')
        if not do_project_build(['--error_pause_only']):
            sys.exit(1)
        else:
            config.setup_engine_paths()
            build_checker = ProjectBuildCheck(config)
            build_checker.update_repo_rev_cache()
            build_checker.save_cache()
    else:
        print_action('Checking First Sync Status...')
        build_checker = ProjectBuildCheck(config)
        if not build_checker.was_loaded():
            # First sync, so do a build
            if not do_project_build(['--error_pause_only']):
                sys.exit(1)
            else:
                build_checker.update_repo_rev_cache()
                build_checker.save_cache()
Example #6
0
def tools(config: ProjectConfig, script):
    if not os.path.isfile(script):
        error_exit('No build script defined! Use the -s arg')

    with open(script, 'r') as fp:
        try:
            script_json = json.load(fp)
        except Exception as jsonError:
            error_exit('Build Script Syntax Error:\n{}'.format(jsonError))
            return
        if not config.load_configuration(script_json, ensure_engine=True):
            error_exit('Invalid Script file!')
def part1():
    # parse the configuration file:
    config_file = '/home/cadams/Harvard-Production/yml-configs/bnb_plus_cosmics-multistage-test.yml'
    config = ProjectConfig(config_file)

    # Print out the stages to run over:
    for name, stage in config.stages.iteritems():
        # Create a handler to check files and such
        handler = ProjectHandler(config_file, action='status', stage=name)
        handler.build_directory()
        handler.first_stage_check()
        db = handler.project_db

        print('Faking stage {0}'.format(stage.name))
        print('  Input specified? {0}'.format(stage.has_input()))

        #Make sure output directory exists:
        out_dir = stage.output_directory()
        handler.make_directory(out_dir)

        # Generate fake output for this stage:
        for i in range(stage.n_jobs()):
            for fcl in stage.fcl():
                fcl = os.path.basename(fcl)
                _f = '/{0}_fake_{1}.root'.format(fcl, i)

            # Input
            if stage.has_input():
                input_files, locations = stage.get_next_files(1, db)

                print('Input files are: \n  {0}'.format(input_files))

            # Processing
            # Nothing actually happens
            # Output

            with open(out_dir + _f, 'w') as file:
                pass

            # Declare the file to the database:
            db.declare_file(filename=_f,
                            dataset=stage.output_dataset(),
                            location=out_dir,
                            stage=name,
                            status=0,
                            nevents=10,
                            ftype=0)

            # Mark the input files as consumed:
            if stage.has_input():
                stage.finalize(input_files, db)
Example #8
0
def load(window):
    """Intelligently guess the appropriate .ensime file location for the
    given window. Load the .ensime and parse as s-expression.
    Return: (inferred project root directory, config sexp)
    """
    for f in _locations(window):
        try:
            conf = ProjectConfig(f)
            return conf
        except Exception:
            exc_type, exc_val, _ = os.sys.exc_info()
            raise BadEnsimeConfig(
                """Ensime has failed to parse the .ensime configuration file at
{loc} because ofthe following error:
{typ} : {val}""".format(loc=str(f), typ=str(exc_type), val=str(exc_val)))
    raise DotEnsimeNotFound(
        errno.ENOENT,
        """Ensime has failed to find a .ensime file within this project.
Create a .ensime file by running'sbt ensimeConfig' or equivalent for your build tool.
We looked at """, window.folders())
Example #9
0
def genproj_func(config: ProjectConfig, run_it):
    """ Generate project file """
    print_action('Generating Project Files')

    cmd_args = [
        '-ProjectFiles', '-project={}'.format(config.uproject_file_path),
        '-game', '-engine'
    ]
    if config.engine_minor_version <= 25:
        cmd_args.append('-VS{}'.format(
            get_visual_studio_version(config.get_suitable_vs_versions())))

    if launch(config.UE4UBTPath, cmd_args) != 0:
        error_exit('Failed to generate project files, see errors...',
                   not config.automated)

    if run_it:
        launch(os.path.join(config.uproject_dir_path,
                            config.uproject_name + '.sln'),
               separate_terminal=True,
               should_wait=False)
Example #10
0
def main(config_file, stage):
    print("Creating Project Config Object")
    project = ProjectConfig(config_file)
    print("Config created, setup software ...")
    project.software().setup()

    runner_class = RunnerTypes()[project.software()['type']]
    runner = runner_class(project = project, stage=project.stage(stage))
    print("Preparing job ...")
    runner.prepare_job()

    job_id = "{0}_{1}".format(os.environ['SLURM_ARRAY_JOB_ID'], os.environ['SLURM_ARRAY_TASK_ID'])
    print("Job ID is {0}".format(job_id))
    print("Running job ...")
    runner.run_job(job_id)
    return
Example #11
0
def test_fails_when_given_invalid_config():
    badconf = path.local(__file__).dirpath() / 'resources' / 'broken.conf'
    with raises(sexpdata.ExpectClosingBracket):
        ProjectConfig(badconf.strpath)
Example #12
0
# coding: utf-8

from py import path
from pytest import raises
import sexpdata

from config import ProjectConfig

confpath = path.local(__file__).dirpath() / 'resources' / 'test.conf'
config = ProjectConfig(confpath.strpath)


def test_parses_dot_ensime():
    assert config.get('scala-version') == '2.11.8'
    assert config.get('list') == ["a", "b", "c", "d"]
    assert len(config['nest']) == 2
    assert config['nest'][0]['id'] == {'config': 'conf1', 'name': 'nested1'}
    assert config['nest'][0]['targets'] == ['abc', 'xyz']


def test_is_immutable():
    with raises(TypeError) as excinfo:
        config['scala-version'] = 'bogus'
    assert 'does not support item assignment' in str(excinfo.value)


def test_is_dict_like():
    assert set(config.keys()) == set(['name', 'scala-version', 'list', 'nest'])
    assert len(config) == 4

Example #13
0
class Project:
    def __init__(self, out):
        self.out = out
        self.config = ProjectConfig()
        self.config.load()

    def name(self):
        return self.config.name()

    def version(self):
        return self.config.version()

    def load_options(self, opt):
        opt.load('compiler_c')
        opt.load('vala')

    def load_configure(self, conf):
        conf.load('compiler_c')
        conf.load('vala')
        conf.find_program('g-ir-compiler', var='GIR_COMPILER')
        for lib in self.config.glib_libs():
            conf.check_cfg(
                package = lib,
                uselib_store = lib ,
                atleast_version = self.config.min_glib_version(),
                args = '--cflags --libs'
            )

        for lib in self.config.libs():
            conf.check_cfg(
                package = lib.name_version,
                uselib_store = lib.name_version,
                atleast_version = lib.min_version,
                args = '--cflags --libs'
            )


    def load_build(self, bld):
        for lib in self.__builds_for('src/lib'):
            self.__load_lib_build_tasks(bld, lib)

        for bin in self.__builds_for('src/bin'):
            self.__load_bin_build_tasks(bld, bin)

        for test in self.__builds_for('tests/lib'):
            self.__load_test_build_tasks(bld, test)

    def load_test(self, ctx):
        env = { 'LD_LIBRARY_PATH': self.out }
        for test in _listdirs('tests/lib'):
            command = '%s/lib%s_TESTS' % (self.out, test)
            if ctx.exec_command(command, env=env) != 0:
                raise Errors.WafError('Tests failed')

    def __builds_for(self, base_dir):
        for name in _listdirs(base_dir):
            yield BuildConfig(base_dir, name, self.config)

    def __load_bin_build_tasks(self, bld, bin):
        bld(
            features     = 'c cprogram',
            source       = bld.path.ant_glob(bin.source_pattern()),
            target       = bin.name,
            vapi_dirs    = self.config.vapi_dirs(),
            uselib       = bin.external_packages(),
            packages     = bin.external_packages(),
            use          = bin.internal_packages(),
        )

    def __load_test_build_tasks(self, bld, test):
        bld(
            features     = 'c cprogram',
            source       = bld.path.ant_glob('tests/helpers/**/*.vala') +
                           bld.path.ant_glob(test.source_pattern()),
            target       = 'lib%s_TESTS' % test.name,
            install_path = False,
            vapi_dirs    = self.config.vapi_dirs(),
            uselib       = test.external_packages(),
            packages     = test.external_packages(),
            use          = test.internal_packages(),
        )

    def __load_lib_build_tasks(self, bld, lib):
        bld(
            features     = 'c cshlib',
            source       = bld.path.ant_glob(lib.source_pattern()),
            target       = lib.name_version(),
            pkg_name     = lib.name,
            gir          = lib.gir_name_version(),
            vapi_dirs    = self.config.vapi_dirs(),
            vnum         = self.config.so_version(),
            uselib       = lib.external_packages(),
            packages     = lib.external_packages(),
            use          = lib.internal_packages(),
        )

        bld(
            after        = lib.name_version(),
            source       = '%s.gir' % lib.gir_name_version(),
            target       = '%s.typelib' % lib.gir_name_version(),
            install_path = '${LIBDIR}/girepository-1.0',
            rule         = '${GIR_COMPILER} ${SRC} -o ${TGT}',
        )

        bld(
            features     = 'subst',
            source       = path.join(PKG_DIR, 'lib.pc.in'),
            target       = '%s.pc' % lib.name_version(),
            install_path = '${LIBDIR}/pkgconfig',
            VERSION      = self.config.version(),
            NAME_VERSION = lib.name_version(),
            GIR_NAME     = lib.gir_name(),
            DESCRIPTION  = lib.description(),
            PACKAGES     = lib.pkg_configs(),
        )
class ProjectHandler(object):
    '''
    This class takes the input from the command line, parses,
    and takes the action needed.
    '''
    def __init__(self, config_file, action, stage=None):
        super(ProjectHandler, self).__init__()
        self.config_file = config_file
        self.stage = stage
        self.action = action

        self.stage_actions = [
            'submit', 'clean', 'status', 'check', 'statistics'
        ]
        self.project_actions = ['check', 'clean']

        if stage is None and self.action not in self.project_actions:
            raise Exception("Action {} not available".format(self.action))
        elif stage is not None and self.action not in self.stage_actions:
            raise Exception("Action {} not available".format(self.action))

        # Build the configuration class:
        self.config = ProjectConfig(config_file)

        # Make sure the stage requested is in the file:
        if stage is not None and stage not in self.config.stages:
            raise Exception(
                'Stage {0} not in configuration file.'.format(stage))

        # Create the work directory:
        self.project_work_dir = self.config['top_dir'] + '/work/'

        if stage is not None:
            self.stage_work_dir = self.project_work_dir + stage + '/'

    def build_directory(self):

        self.make_directory(self.project_work_dir)

        self.make_directory(self.stage_work_dir)

    def act(self):
        if self.action == 'submit':
            self.submit()
        elif self.action == 'clean':
            self.clean()
        elif self.action == 'status':
            self.status()
        elif self.action == 'check':
            self.check()
        elif self.action == 'makeup':
            self.makeup()
        elif self.action == 'statistics':
            self.statistics()
        else:
            return

    def submit(self, makeup=False):
        '''
        Build a submission script, then call it to launch
        batch jobs.

        Slurm copies environment variables from the process that launches jobs,
        so we will make a child of the launching process in python and launch jobs
        with larsoft env variables set up.
        '''

        self.build_directory()

        # Get the active stage:
        stage = self.config.stage(self.stage)

        # First part of 'submit' is to make sure the input, work
        # and output directories exist
        print('Verifying output directory ..........')
        self.make_directory(stage.output_directory())
        print('Verifying project work directory ....')
        self.make_directory(self.project_work_dir)
        print('Verifying stage work directory ......')
        self.make_directory(self.stage_work_dir)

        if not makeup:
            print('Initializing database entries .......')
            # Make sure the datasets for this project are initialized:
            proj_util = ProjectUtils()

            proj_util.create_dataset(dataset=stage.output_dataset(),
                                     parents=stage.input_dataset())

        # If the stage work directory is not empty, force the user to clean it:
        if os.listdir(self.stage_work_dir) != [] and not makeup:
            print('Error: stage work directory is not empty.')
            raise Exception('Please clean the work directory and resubmit.')

        print('Building submission script ..........')
        # Next, build a submission script to actually submit the jobs
        job_name = self.config['name'] + '.' + stage.name
        script_name = self.stage_work_dir + '{0}_submission_script.slurm'.format(
            job_name)
        with open(script_name, 'w') as script:
            script.write('#!/bin/bash\n')
            script.write('#SBATCH --job-name={0}\n'.format(job_name))
            script.write('#SBATCH --ntasks=1\n')
            script.write('#SBATCH -p guenette\n')
            script.write('#SBATCH --mem={0}mb\n'.format(stage['memory']))
            script.write('#SBATCH --time={0}\n'.format(stage['time']))
            script.write('#SBATCH --output=array_%A-%a.log\n')
            script.write('\n')
            script.write('pwd; hostname; date;\n')
            script.write('whoami;\n')
            script.write('echo \"about to execute run_job.py.\";\n')
            script.write('unset module\n')
            script.write('unset helmod\n')
            script.write('\n')
            script.write(
                '#Below is the python script that runs on each node:\n')
            script.write('run_job.py {0} {1} \n'.format(
                os.environ['PWD'] + '/' + self.config_file, self.stage))
            script.write('date;\n')
            script.write('\n')

        # Maximum running jobs is not set by default, but can be specified:

        n_jobs = stage.n_jobs() - 1
        if makeup:
            with open(self.stage_work_dir + "makeup_jobs.txt", 'r') as _mj:
                n_jobs = int(_mj.readline())

        # Here is the command to actually submit jobs:
        command = [
            'sbatch', '-a', '0-{0}%{1}'.format(stage.n_jobs() - 1,
                                               stage.concurrent_jobs()),
            script_name
        ]

        with open(self.stage_work_dir + '/slurm_submission_command.txt',
                  'w') as _com:
            _com.write(' '.join(command))

        print("Submitting jobs ...")
        # Run the command:
        proc = subprocess.Popen(command,
                                cwd=self.stage_work_dir,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                env=dict(os.environ))
        retval = proc.poll()
        # the loop executes to wait till the command finish running
        stdout = ''
        stderr = ''
        while retval is None:
            time.sleep(1.0)
            # while waiting, fetch stdout (including STDERR) to avoid crogging the pipe
            for line in iter(proc.stdout.readline, b''):
                stdout += line
            for line in iter(proc.stderr.readline, b''):
                stderr += line
            # update the return value
            retval = proc.poll()

        with open(self.stage_work_dir + '/submission_log.out', 'w') as _log:
            _log.write(stdout)
        with open(self.stage_work_dir + '/submission_log.err', 'w') as _log:
            _log.write(stderr)

        return_code = proc.returncode
        if return_code == 0:
            print("Submitted jobs successfully.")

            # Make sure to store the currently running jobID:
            jobid = int(stdout.split(' ')[-1])
            with open(self.stage_work_dir + 'current_running_jobid',
                      'w') as _log:
                _log.write(str(jobid))
        else:
            print(
                "sbatch exited with status {0}, check output logs in the work directory"
                .format(return_code))

    def make_directory(self, path):
        '''
        Make a directory safely
        '''
        try:
            os.makedirs(path)
        except OSError:
            if not os.path.isdir(path):
                raise

    def clean(self):
        '''
        Clean the project.  If stage is none clean the whole thing.
        Otherwise, clean only that stage.  If cleaning everything, clean the database file
        Only when files are deleted
        '''

        proj_utils = ProjectUtils()
        dataset_reader = DatasetReader()

        if not self.get_clean_confirmation():
            return
        # If stage is set, clean that stage only:
        if self.stage is not None:
            stage = self.config.stages[self.stage]
            # Remove files from the database and purge them from disk:
            for f in dataset_reader.list_file_locations(
                    dataset=stage.output_dataset()):
                os.remove(f)
            # Clean the files from the database:
            proj_utils.drop_dataset(stage.output_dataset())

            shutil.rmtree(stage.output_directory())
            shutil.rmtree(self.stage_work_dir)
        else:
            # Clean ALL stages plus the work directory and the top level directory
            for name, stage in self.config.stages.iteritems():
                # Remove files from the database and purge them from disk:
                for f in dataset_reader.list_file_locations(
                        dataset=stage.output_dataset()):
                    os.remove(f)
                proj_utils.drop_dataset(stage.output_dataset())
                if os.path.isdir(stage.output_directory()):
                    shutil.rmtree(stage.output_directory())
            if os.path.isdir(self.project_work_dir):
                shutil.rmtree(self.project_work_dir)
            if os.path.isdir(self.config['top_dir']):
                shutil.rmtree(self.config['top_dir'])

    def get_clean_confirmation(self):
        '''
        Force the user to confirm he/she wants to clean things up
        '''
        print 'You are requesting to clean the following stages:'
        if self.stage is not None:
            print '  {0}'.format(self.stage)
        else:
            for name, stage in self.config.stages.iteritems():
                print '  {0}'.format(stage.name)
            print('Additionally, this will delete:')
            print('  {0}'.format(self.project_work_dir))
            print('  {0}'.format(self.config['top_dir']))
        confirmation = raw_input(
            'Please confirm this is the intended action (type \"y\"): ')
        if confirmation.lower() in ['y', 'yes']:
            return True
        return False

    def squeue_parse(self, jobid):

        # Going to use squeue for this command and parse the output

        command = [
            '/usr/bin/squeue',
            '--format=%.25i %.9P %.8j %.8u %.8T %.10M %.9l %.6D %R', '-j',
            str(jobid)
        ]

        proc = subprocess.Popen(command,
                                cwd=self.stage_work_dir,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                env=dict(os.environ))
        retval = proc.poll()
        # the loop executes to wait till the command finish running
        stdout = ''
        stderr = ''
        while retval is None:
            time.sleep(1.0)
            # while waiting, fetch stdout (including STDERR) to avoid crogging the pipe
            for line in iter(proc.stdout.readline, b''):
                stdout += line
            for line in iter(proc.stderr.readline, b''):
                stderr += line
            # update the return value
            retval = proc.poll()

        if retval != 0:

            raise Exception('Error when querying the job status.')

        # Now, start digging through the output
        lines = stdout.split('\n')
        if len(lines) <= 1:
            # No jobs running
            return None

        # Else, sort the jobs.
        job_status_counts = dict()
        keys = lines[0].split()
        state_index = -1
        jobid_index = -1
        i = 0
        for key in keys:
            if key == 'STATE':
                state_index = i
            if key == 'JOBID':
                jobid_index = i
            i += 1

        for line in lines[1:]:
            line = line.split()
            if len(line) == 0:
                continue
            state = line[state_index]
            jobid = line[jobid_index]
            if state == 'PENDING':
                # have to do something special to count the number of pending jobs
                pnd_split = jobid.split('_')[-1]
                pnd_split = pnd_split.replace('[', '').replace(']', '')
                pnd_split = pnd_split.split('%')[0]
                n_jobs = int(pnd_split.split('-')[-1]) - int(
                    pnd_split.split('-')[0]) + 1
                job_status_counts[state] = n_jobs
            else:
                if state not in job_status_counts.keys():
                    job_status_counts[state] = 1
                else:
                    job_status_counts[state] += 1

        return job_status_counts

    def status(self):
        '''
        The status function reads in the job id number from the work directory
        and queries the scheduler to get job status.
        '''
        # The job submission output is stored in the work directory.

        # Get the job ID from the submission script:

        if self.stage is None:
            raise Exception('Please specify a stage.')

        # Get the jobid, first:
        jobid = self.job_id()

        job_status_counts = self.squeue_parse(jobid)

        print('Condensed information for jobid {0}:'.format(jobid))
        for state, count in job_status_counts.iteritems():
            print('  {0} jobs in state {1}'.format(count, state))

    def job_id(self):
        '''Look up the job id

        '''
        # Get the job ID from the submission script:
        submission_log = self.stage_work_dir + '/current_running_jobid'
        with open(submission_log, 'r') as sl:
            line = sl.readline()
            job_id = int(line.split(' ')[-1])

        return job_id

    def is_running_jobs(self):
        '''Find out how many jobs are running or queued

        '''

        # Get the jobid, first:
        jobid = self.job_id()

        if self.squeue_parse(jobid) is None:
            return False
        else:
            return True

    def check(self):
        '''
        The check function parses the data base and prints out information
        about number of completed files and number of events processed
        '''

        if self.stage is not None:
            stage = self.config.stage(self.stage)
            self.check_stage(stage)
        else:
            for stage_name, stage in self.config.stages.iteritems():
                self.check_stage(stage)
        pass

    def print_check_information(self):
        pass

    def check_stage(self, stage):
        '''Check only a single stage

        Figure out what the goals of this stage were, and the results were

        Arguments:
            stage {StageConfig} -- stage identifier
        '''

        # First figure out what are the goals of this stage
        total_out_events = stage.total_output_events()
        total_ana_events = stage.total_output_events()
        if stage['output']['anaonly']:
            total_out_events = 0

        dataset_reader = DatasetReader()
        project_reader = ProjectReader()

        # Next, count the events declared to the database for this stage:
        n_ana_events = dataset_reader.sum(dataset=stage.output_dataset(),
                                          target='nevents',
                                          type=1)
        n_out_events = dataset_reader.sum(dataset=stage.output_dataset(),
                                          target='nevents',
                                          type=0)

        n_ana_files = dataset_reader.count_files(
            dataset=stage.output_dataset(), type=1)
        n_out_files = dataset_reader.count_files(
            dataset=stage.output_dataset(), type=0)

        print('Report for stage {0}: '.format(stage.name))
        print(
            '  Completed {n_ana} events of {target} specified, across {n_ana_files} ana files.'
            .format(n_ana=n_ana_events,
                    target=total_ana_events,
                    n_ana_files=n_ana_files))
        print(
            '  Completed {n_out} events of {target} specified, across {n_out_files} output files.'
            .format(n_out=n_out_events,
                    target=total_out_events,
                    n_out_files=n_out_files))

        # If this stage has an input, and therefore a consumption table,
        # Find out how many files are remaining to be processed and
        # How many are yielded but not consumed.

        if project_reader.has_parents(stage.output_dataset()):
            n_consumed = dataset_reader.count_consumption_files(
                dataset=stage.output_dataset(), state='consumed')
            n_unyielded = dataset_reader.count_consumption_files(
                dataset=stage.output_dataset(), state='unyielded')
            n_yielded = dataset_reader.count_consumption_files(
                dataset=stage.output_dataset(), state='yielded')
            print('  {0} files have been consumed from the input'.format(
                n_consumed))
            print(
                '  {0} files have been yielded from the input without finishing'
                .format(n_yielded))
            print('  {0} files are unprocessed from the input'.format(
                n_unyielded))

        #Calculate how many makeup jobs to run
        # Look at:
        # how many events are supposed to be there
        # how many events were produced, over how many files
        # how many files per job are consumed (if using an input)

        # Since we don't always know how many events are in each job,
        # compare the number of produced events to the number of produced files:
        n_missing_events = 0
        out_events_per_file = 0
        if stage['output']['anaonly']:
            if n_ana_events is None or n_ana_events == 0:
                n_makeup_jobs = stage.n_jobs()
            else:
                n_missing_events = total_ana_events - n_ana_events
                out_events_per_file = n_ana_events / n_ana_files
                n_makeup_jobs = int(n_missing_events / out_events_per_file + 1)

        else:
            if n_out_events is None or n_out_events == 0:
                n_makeup_jobs = stage.n_jobs()
            else:
                n_missing_events = total_out_events - n_out_events
                out_events_per_file = n_out_events / n_out_files
                n_makeup_jobs = int(n_missing_events / out_events_per_file + 1)

        # How many events were produced over how many files?
        print('  Need to run {0} makeup jobs, makeup is not implemented yet.'.
              format(n_makeup_jobs))

        # Write the number of required makeup jobs to the work directory:
        makeup_log = self.stage_work_dir + "makeup_jobs.txt"
        with open(makeup_log, 'w') as _ml:
            _ml.write(str(n_makeup_jobs))

    def makeup(self):
        '''Run makeup jobs

        Search the list of completed jobs, and query how many jobs are not running

        If no jobs are running, submit jobs to complete the previous stage of running.
        '''

        # Makeup behavior is different for jobs with input than without.
        # For jobs without input, we look at the target number of events,
        # compare with the produced number of events/per file, calculate
        # the approximate number of needed jobs to meet the target, and submit that

        # For jobs with input, we reset the consumption status of failed jobs,
        # then compare the number of files per job in the yml to the number of unprocessed
        # files.  We submit the number of needed jobs to process remaining files.

        # First, make sure there are no jobs running for the current submission
        # of this project

        # Now, move the file containing the job id to a list of old job ids, and
        # clean the old file to make room for the new one.

        # First,
        n_makeup_jobs

        # Makeup command requires a check stage command first
        print('Submission of makeup jobs is not implemented yet.')

    def statistics(self):
        ''' Call sacct to get the statistics for this job in long form.

        Saves to a file in the work area for this job.
        '''
        command = ['sacct']

        format_list = [
            'jobid%20',
            'jobname%50',
            'partition%30',
            'account%20',
            'maxvmsize',
            'avevmsize',
            'maxrss',
            'reqmem',
            'averss',
            'avecpu',
            'avecpufreq',
            'elapsed',
            'state',
            'exitcode',
        ]

        command.append('--format=' + ','.join(format_list) + '')
        # command.append('--long')

        command.append('-j')
        command.append(str(self.job_id()))

        proc = subprocess.Popen(command,
                                cwd=self.stage_work_dir,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                env=dict(os.environ))
        retval = proc.poll()
        # the loop executes to wait till the command finish running
        stdout = ''
        stderr = ''
        while retval is None:
            time.sleep(1.0)
            # while waiting, fetch stdout (including STDERR) to avoid crogging the pipe
            for line in iter(proc.stdout.readline, b''):
                stdout += line
            for line in iter(proc.stderr.readline, b''):
                stderr += line
            # update the return value
            retval = proc.poll()

        if retval != 0:

            raise Exception('Error when querying the sacct database.')

        # Finished querying, write the output to a log file.
        file_name = "/sacct_long_job_{0}.out".format(self.job_id())
        with open(self.stage_work_dir + file_name, 'w') as _job_sacct_log:
            _job_sacct_log.write(stdout)

        print('sacct files for job_id {job_id} have been written to {path}'.
              format(job_id=self.job_id(),
                     path=self.stage_work_dir + file_name))
Example #15
0
 def __init__(self, out):
     self.out = out
     self.config = ProjectConfig()
     self.config.load()
Example #16
0
def build_script(engine, script, configuration, buildtype, build, platform, clean):
    """
    The Main call for build script execution.
    :param engine: The desired engine path, absolute or relative.
    :param script: The Project Script which defines the projects paths, build steps, and extra information.
    :param configuration: Build configuration, e.g. Shipping
    :param buildtype: Which type of build are you trying to create? Editor OR Package?
    :param build: Which build steps to execute?
    :param platform: Which platform to build for?
    :param clean: Causes all actions to consider cleaning up their workspaces before executing their action.
    """
    # Fixup for old build type 'Game'.
    if buildtype == 'Game':
        buildtype = 'Editor'

    # Ensure Visual Studio is installed
    if get_visual_studio_version() not in [2015, 2017]:
        print_error('Cannot run build, valid visual studio install not found!')
        return False

    if not os.path.isfile(script):
        error_exit('No build script defined! Use the -s arg')

    with open(script, 'r') as fp:
        try:
            script_json = json.load(fp)
        except Exception as jsonError:
            error_exit('Build Script Syntax Error:\n{}'.format(jsonError))
            return

    config = ProjectConfig(configuration, platform, False, clean)
    if not config.load_configuration(script_json, engine, False):
        error_exit('Failed to load configuration. See errors above.')

    print_title('Unreal Project Builder')

    build_meta = BuildMeta('project_build_meta')
    if "meta" in config.script:
        build_meta.insert_meta(**config.script["meta"])

    # Ensure the engine exists and we can build
    ensure_engine(config, engine)
    click.secho('\nProject File Path: {}\nEngine Path: {}'.format(config.uproject_dir_path, config.UE4EnginePath))

    # Ensure the unreal header tool exists. It is important for all Unreal projects
    if not os.path.isfile(os.path.join(config.UE4EnginePath, 'Engine\\Binaries\\Win64\\UnrealHeaderTool.exe')):
        b = Build(config, build_name='UnrealHeaderTool')
        if not b.run():
            error_exit(b.error)

    # Build required engine tools
    clean_revert = config.clean
    if buildtype == "Package":
        config.clean = False  # Don't clean if packaging, waste of time
    for tool_name in config.build_engine_tools:
        b = Build(config, build_name=tool_name)
        if not b.run():
            error_exit(b.error)
    config.clean = clean_revert

    # If a specific set of steps if being requested, only build those
    if build != '':
        run_build_steps(config, build_meta, build, True)
    else:
        # Ensure engine is built
        if not config.editor_running:
            clean_revert = config.clean
            if buildtype == "Package":
                config.clean = False  # Don't clean if packaging, waste of time
            b = Build(config, build_name='UE4Editor')
            if not b.run():
                error_exit(b.error)
            config.clean = clean_revert
        else:
            print_warning('Skipping engine build because engine is running!')

        run_build_steps(config, build_meta, 'pre_build_steps')

        if buildtype == "Editor":
            if config.editor_running:
                print_warning('Cannot build the Editor while the editor is running!')
                click.pause()
                sys.exit(1)

            if 'game_editor_steps' in config.script:
                run_build_steps(config, build_meta, 'game_editor_steps')
            elif 'editor_steps' in config.script:
                run_build_steps(config, build_meta, 'editor_steps')
            else:
                b = Build(config, build_name='{}Editor'.format(config.uproject_name))
                if not b.run():
                    error_exit(b.error)

        elif buildtype == "Package":
            if 'package_steps' in config.script:
                run_build_steps(config, build_meta, 'package_steps')
            else:
                package = Package(config)
                if not package.run():
                    error_exit(package.error)

        run_build_steps(config, build_meta, 'post_build_steps')

    build_meta.save_meta()
    print_action('SUCCESS!')
    click.pause()
Example #17
0
    def __init__(self, config: ProjectConfig):
        self.port = config.get('USB_PORT')
        self.baud_rate = int(config.get('USB_BAUD_RATE'))

        self.conn = None
        self._connected = False
Example #18
0
                data = str(data.decode('utf-8')).strip()

                # wrap data in a JSON format
                if data[0].isnumeric():
                    data_dict = {'MDP15': 'SENSORS', 'SENSORS': data}
                elif data == 'MC':
                    data_dict = {'MDP15': 'MC'}
                else:
                    data_dict = {'MDP15': 'STATUS', 'STATUS': data}

                print(f'Received from Arduino: {data}')
                return self.format_data(data_dict)

        except Exception as e:
            print(f'Error with reading from {self.get_name()}: {e}')
            #print('Reconnecting...')
            #self.disconnect()
            raise ConnectionError

    def disconnect(self):
        if self.conn:
            self.conn.close()
            print('Terminating serial socket..')

        self._connected = False


if __name__ == '__main__':
    ar = ArduinoConn(ProjectConfig(USB_PORT='COM3'))
    ar.connect()
Example #19
0
# Put job list into queue
def create_jobs(servers):
    for s in servers:
        thread_queue.put(s)
    thread_queue.join()  # blocks until task_done is called


# Do next job that is in the queue
def work():
    s = thread_queue.get()
    s.start()


if __name__ == '__main__':
    server_list = []
    config = ProjectConfig(default=False)

    bt_server = ProducerConsumer(BluetoothConn(config))
    usb_server = ProducerConsumer(ArduinoConn(config))
    pc_server = ProducerConsumer(PcConn(config))
    cam_server = ProducerConsumer(PiHttpStream(config))

    pc_server.register([bt_server, usb_server])
    bt_server.register([pc_server, usb_server])
    usb_server.register([bt_server, pc_server])
    cam_server.register([bt_server, pc_server, usb_server])

    server_list.append(bt_server)
    server_list.append(usb_server)
    server_list.append(pc_server)
    server_list.append(cam_server)
Example #20
0
def main(info_only):

    # if action not in ['--submit', '--status', '--check']:
    #     raise Exception("action not supported.")


    pr = ProjectReader()
    dr = DatasetReader()

    # Get the list of datasets that are in the production database:
    datasets = pr.list_datasets()

    datasets = [ ds for tupl in datasets for ds in tupl]

    for isotope in isotopes:
        element = isotope.split('-')[0]
        for region in regions:
            yml_name = '{element}/{region}/nexus_{element}_{region}.yml'.format(element=element, region=region)

            # Read in the yml file:
            pc = ProjectConfig(yml_name)
            stage = pc.stage(element)

            # print stage.output_dataset()

            # First, check if this project is in the database:
            if stage.output_dataset() in datasets:
                # Check the output of this dataset.

                # From the yml, get the number off jobs and the events per job:
                total_events_submitted = stage.total_output_events()
                total_events_produced  = dr.sum(
                    dataset=stage.output_dataset(),
                    target='nevents',
                    type=0)
                n_jobs = stage.n_jobs()

                # From the database figure out how many jobs succeeded,
                # and how many events were produced:
                n_jobs_succeeded = dr.get_n_successful_jobs(stage.output_dataset())

                # print "For dataset {}, {} of {} jobs completed".format(
                #     stage.output_dataset(),
                #     n_jobs_succeeded, n_jobs)
                # print "  {} of {} events passed the selection".format(
                #     total_events_produced,
                #     total_events_submitted)

                # If the number of jobs completed equals the number of jobs submitted,
                # it's done.

                if n_jobs_succeeded >= 0.95*n_jobs:
                    print bcolors.OKGREEN  + "{} - {} SUCCESS".format(element, region) + bcolors.ENDC
                    insertion_sql = '''
                        INSERT INTO next_new_bkg_summary(dataset, element, region, n_simulated, n_passed, events_per_job, n_jobs_submitted, n_jobs_succeeded)
                        VALUES (?, ?, ?, ?, ?, ?, ?, ?)
                    '''
                    conn = connect()
                    curr = conn.cursor()
                    tupl = (stage.output_dataset(), element, region, int(total_events_submitted), int(total_events_produced), int(stage.events_per_job()) int(n_jobs), int(n_jobs_succeeded))
                    curr.execute(insertion_sql, tupl)
                    conn.commit()
                    conn.close()

                elif n_jobs_succeeded == 0:
                    print bcolors.WARNING  + "{} - {} RESUBMIT".format(element, region) + bcolors.ENDC
                    # clean and resubmit
                    if not info_only:
                        ph = ProjectHandler(yml_name, action='clean', stage=element)
                        ph.act()
                        ph = ProjectHandler(yml_name, action='submit', stage=element)
                        ph.act()
                else:
                    print bcolors.FAIL  + "{} - {} MAKEUP NEEDED".format(element, region) + bcolors.ENDC
                    # Doing makeup jobs, just report it:
            else:
                # Need to submit it for the first time.
                print bcolors.OKBLUE  + "{} - {} SUBMITTING".format(element, region) + bcolors.ENDC
                if not info_only:
                    ph = ProjectHandler(yml_name, action='submit', stage=element)
                    ph.act()
Example #21
0
def move_files_to_neutrino():

    pr = ProjectReader()
    dr = DatasetReader()

    remote_host = '*****@*****.**'
    local_top_directory  = '/n/holylfs02/LABS/guenette_lab/data/NEXT/NEXTNEW/MC/OtherForTransfer/'
    remote_top_directory = '/lustre/neu/data4/NEXT/NEXTNEW/MC/Other/NEXUS_NEXT_v1_03_01/'


    # We want to copy, for every project here (76 + Xenon, eventually)
    # The files, configurations, and logs.

    # The remote directory structure should be:
    # remote_top_directory/nexus/{element}/{region}/output
    # remote_top_directory/nexus/{element}/{region}/log
    # remote_top_directory/nexus/{element}/{region}/config

    # The config files, logs, and output all live in the same directory.  So, what this script does
    # is to generate a list files to/from for transfering.  It creates symbolic links to
    # the local files in the right directory structure as needed on neutrinos.

    # The way this is done is to generate a file that will be used for creating symlinks
    # A process is spawned to make the links
    # Finally, a job is submitted to do the rsync command.

    with open('transfer_protocol.txt', 'w') as _trnsf:

        for isotope in isotopes:
            element = isotope.split('-')[0]
            for region in regions:
                yml_name = '{element}/{region}/nexus_{element}_{region}.yml'.format(element=element, region=region)

                # Read in the yml file:
                pc = ProjectConfig(yml_name)
                stage = pc.stage(element)
                dataset = stage.output_dataset()
                output_dir = stage.output_directory()

                # Get the log files, config files, and output files
                config_match = '/*config.mac'
                init_match = '/*init.mac'

                log_match = '/*.log'

                output_file_list = dr.list_file_locations(dataset)
                for _file in output_file_list:
                    _file = _file[0]
                    base = os.path.basename(_file)
                    destination = "{top}/nexus/{element}/{region}/output/{base}".format(
                        top     = local_top_directory,
                        element = element,
                        region  = region,
                        base    = base
                    )
                    trnsf_str = "{}\t{}\n".format(_file, destination)
                    _trnsf.write(trnsf_str)

                    directory = os.path.dirname(_file)

                    # Get the config files:
                    init = glob.glob(directory + init_match)[0]
                    base = os.path.basename(init)
                    destination = "{top}/nexus/{element}/{region}/config/{base}".format(
                        top     = local_top_directory,
                        element = element,
                        region  = region,
                        base    = base
                    )
                    trnsf_str = "{}\t{}\n".format(init, destination)
                    _trnsf.write(trnsf_str)

                    cfg  = glob.glob(directory + config_match)[0]
                    base = os.path.basename(cfg)
                    destination = "{top}/nexus/{element}/{region}/config/{base}".format(
                        top     = local_top_directory,
                        element = element,
                        region  = region,
                        base    = base
                    )
                    trnsf_str = "{}\t{}\n".format(cfg, destination)
                    _trnsf.write(trnsf_str)

                    # Get the log files:
                    logs = glob.glob(directory + log_match)
                    for log in logs:
                        base = os.path.basename(log)
                        destination = "{top}/nexus/{element}/{region}/log/{base}".format(
                            top     = local_top_directory,
                            element = element,
                            region  = region,
                            base    = base
                        )
                        trnsf_str = "{}\t{}\n".format(log, destination)
                        _trnsf.write(trnsf_str)
                break

    print "Done making transfer list, creating symbolic links"

    with open('transfer_protocol.txt', 'r') as _trnsf:
        for line in _trnsf.readlines():
            original, destination = line.rstrip('\n').split('\t')

            destdir = os.path.dirname(destination)
            try:
                os.makedirs(destdir)
            except:
                pass
            try:
                os.symlink(original, destination)
            except:
                pass

    print "Beginning file transfer."

    with cd(local_top_directory):

        command = ['rsync', '-rvL', 'nexus', '[email protected]:/lustre/neu/data4/NEXT/NEXTNEW/MC/Other/NEXUS_NEXT_v1_03_01/']

        proc = subprocess.Popen(command,
                                stdout = subprocess.PIPE,
                                stderr = subprocess.PIPE,
                                env = dict(os.environ))

        retval=proc.poll()

        # the loop executes to wait till the command finish running
        stdout=''
        stderr=''
        while retval is None:
            time.sleep(1.0)
            # while waiting, fetch stdout (including STDERR) to avoid crogging the pipe
            for line in iter(proc.stdout.readline, b''):
                stdout += line
            for line in iter(proc.stderr.readline, b''):
                stderr += line
            # update the return value
            retval = proc.poll()

        return_code = proc.returncode

        if return_code != 0:
            raise Exception("Failed")

        else:
            print stdout
Example #22
0
        except Exception as e:
            print(f'Error with reading from {self.get_name()}: {e}')
            #print('Reconnecting...')
            #self.disconnect()
            #raise ConnectionError

    def write(self, message):
        try:
            #message = str(message)
            #byte_msg: bytes = str.encode(message + '\n')
            json_str = json.dumps(message)
            byte_msg = bytes(json_str, encoding='utf-8')
            self.client.sendto(byte_msg, self.addr)
            print(f'Sent to PC: {message}')
        except Exception as e:
            print(f'Error with writing {message} to {self.get_name()}: {e}')
            #print('Reconnecting...')
            #self.disconnect()
            raise ConnectionError


if __name__ == '__main__':
    server = PcConn(ProjectConfig(default=False))
    server.connect()
    Robot_Position = '{"MDP15":"SENSORS","SENSORS":"0;0;0;0;0;0"}'  #to algo
    RP = json.loads(Robot_Position)
    while True:
        server.write(RP)
        server.read()
        time.sleep(2)
Example #23
0
def build_script(engine, script, configuration, buildtype, build, platform,
                 clean, automated, buildexplicit):
    """
    The Main call for build script execution.
    :param engine: The desired engine path, absolute or relative.
    :param script: The Project Script which defines the projects paths, build steps, and extra information.
    :param configuration: Build configuration, e.g. Shipping
    :param buildtype: Which type of build are you trying to create? Editor OR Package?
    :param build: Which build steps to execute?
    :param platform: Which platform to build for?
    :param clean: Causes all actions to consider cleaning up their workspaces before executing their action.
    :param automated: Configures the builder to recognize this build as being done by continuous integration and should
                      not manipulate the system environment.
    :param buildexplicit: Should the build system only build what is requested? This prevents convienience cases like
                          the package build building the editor before trying to package. By setting this to true, it is
                          expected that the user has setup the proper state before building.
    """
    # Fixup for old build type 'Game'.
    if buildtype == 'Game':
        buildtype = 'Editor'

    global is_automated
    if automated:
        is_automated = automated

    # Ensure Visual Studio is installed
    if get_visual_studio_version() not in [2015, 2017]:
        print_error('Cannot run build, valid visual studio install not found!')
        return False

    if not os.path.isfile(script):
        error_exit('No build script defined! Use the -s arg', not is_automated)

    with open(script, 'r') as fp:
        try:
            script_json = json.load(fp)
        except Exception as jsonError:
            error_exit('Build Script Syntax Error:\n{}'.format(jsonError),
                       not is_automated)
            return

    config = ProjectConfig(configuration, platform, False, clean, automated)
    if not config.load_configuration(script_json, engine, buildexplicit):
        error_exit('Failed to load configuration. See errors above.',
                   not config.automated)

    print_title('Unreal Project Builder')

    if config.automated:
        click.secho('\nAutomated flag set!')

    # Ensure the engine exists and we can build
    if not buildexplicit:
        ensure_engine(config, engine)
    click.secho('\nProject File Path: {}\nEngine Path: {}'.format(
        config.uproject_dir_path, config.UE4EnginePath))

    # Ensure the unreal header tool exists. It is important for all Unreal projects
    if not buildexplicit:
        if not os.path.isfile(
                os.path.join(config.UE4EnginePath,
                             'Engine\\Binaries\\Win64\\UnrealHeaderTool.exe')):
            b = Build(config, build_name='UnrealHeaderTool')
            if not b.run():
                error_exit(b.error, not config.automated)

    # Build required engine tools
    if config.should_build_engine_tools and not buildexplicit:
        clean_revert = config.clean
        if buildtype == "Package":
            config.clean = False  # Don't clean if packaging, waste of time

        b = Build(config, build_names=config.build_engine_tools)
        if not b.run():
            error_exit(b.error, not config.automated)

        config.clean = clean_revert

    # If a specific set of steps if being requested, only build those
    if build != '':
        steps = Buildsteps(config, steps_name=build)
        if not steps.run():
            error_exit(steps.error, not config.automated)
    else:
        if buildtype == "Editor":
            if config.editor_running:
                error_exit(
                    'Cannot build the Editor while the editor is running!',
                    not config.automated)

            if 'game_editor_steps' in config.script:
                steps = Buildsteps(config, steps_name='game_editor_steps')
                if not steps.run():
                    error_exit(steps.error, not config.automated)
            elif 'editor_steps' in config.script:
                steps = Buildsteps(config, steps_name='editor_steps')
                if not steps.run():
                    error_exit(steps.error, not config.automated)
            else:
                b = Build(config,
                          build_name='{}Editor'.format(config.uproject_name))
                if not b.run():
                    error_exit(b.error, not config.automated)

        elif buildtype == "Package":
            # We need to build the editor before we can run any cook commands. This seems important for blueprints
            # probably because it runs the engine and expects all of the native class RTTI to be up-to-date to be able
            # to compile the blueprints. Usually you would be starting a package build from the editor, so it makes
            # sense. Explicit builds ignore this however.
            if not buildexplicit:
                b = Build(config,
                          build_name='{}Editor'.format(config.uproject_name))
                if not b.run():
                    error_exit(b.error, not config.automated)

            if 'package_steps' in config.script:
                steps = Buildsteps(config, steps_name='package_steps')
                if not steps.run():
                    error_exit(steps.error, not config.automated)
            else:
                package = Package(config)
                if not package.run():
                    error_exit(package.error, not config.automated)

    print_action('SUCCESS!')
    if not config.automated:
        click.pause()
Example #24
0
def config(conffile):
    return ProjectConfig(CONFROOT.join(conffile).strpath)
Example #25
0
File: bt.py Project: MDP-15/Rasbpi
            #raise ConnectionError

    def write(self, message):
        try:
            json_str = json.dumps(message)
            byte_msg = bytes(json_str, encoding='utf-8')
            self.client.send(byte_msg)
            print(f'Sent to Android device: {byte_msg}')

        except Exception as e:
            print(f'Error with writing {message} to {self.get_name()}: {e}')
            #print('Reconnecting...')
            #self.disconnect()
            raise ConnectionError

    def disconnect(self):
        if self.conn:
            self.conn.close()
            print('Terminating server socket..')

        if self.client:
            self.client.close()
            print('Terminating client socket..')

        self._connected = False


if __name__ == '__main__':
    server = BluetoothConn(ProjectConfig())
    server.connect()