Beispiel #1
0
def run_tests(parallel, api, args, coverage=True, mock_speedup=1):
    args = copy.copy(args)

    test_cfg.select_speedup(mock_speedup)
    if api is not None:
        api_name = api.upper()
        api_type = ApiType[api_name]
        args.extend(['-k', 'ApiType.' + api_name])
    else:
        # We run for all apis
        api_type = None

    if coverage:
        engine = tenjin.Engine()
        cov_rc_file_name = jp(here, '.coverage_rc_' +  api_type.name.lower() if api_type else 'all')
        with open(cov_rc_file_name, 'w') as cov_rc_file:
            context = dict(api_type=api_type, top_dir=top_dir, major_version=major_version)
            cov_rc_file.write(engine.render(jp(here, "coverage_rc.tenjin"), context))
            args.extend(['--cov=' + top_dir, '--cov-report=term-missing', '--cov-config=' + cov_rc_file_name])

    try:
        if api_type != ApiType.MOCK:
            # Note: 'boxed' is required for the kill/abort_current test not to abort other tests
            args.append('--boxed')

        if parallel and api_type != ApiType.MOCK:
            args.extend(['-n', '16'])

        print('pytest.main', args)
        rc = pytest.main(args)
        if rc:
            raise Exception("pytest {args} failed with code {rc}".format(args=args, rc=rc))
    finally:
        if coverage:
            os.unlink(cov_rc_file_name)
Beispiel #2
0
def main(args):
    print("Running tests, args:", args)
    if args and args != ['-v']:
        return pytest.main(['--capture=sys'] + args)

    engine = tenjin.Engine(cache=False)
    major_version = sys.version_info[0]
    minor_version = sys.version_info[1]
    # Note: This naming is duplicated in .travis.yml
    cov_rc_file_name = jp(_here, '.coverage_rc_' +  str(os.environ.get('TRAVIS_PYTHON_VERSION', str(major_version) + '.' + str(minor_version))))
    with open(cov_rc_file_name, 'w') as cov_rc_file:
        cov_rc_file.write(engine.render(jp(_here, "coverage_rc.tenjin"), dict(
            major_version=major_version, minor_version=minor_version, type_check_supported=type_check.vcheck())))

    rc = pytest.main(['--capture=sys', '--cov=' + _here + '/..', '--cov-report=term-missing', '--cov-config=' + cov_rc_file_name] + (args if args == ['-v'] else []))

    print()
    try:
        del os.environ['PYTHONPATH']
    except KeyError:
        pass
    for env_name in 'prod', 'preprod', 'devlocal', 'devs', 'devi':
        demo_out = jp(_here, env_name + '.demo_out')
        print("Validating demo for env {env} - output in {out}".format(env=env_name, out=demo_out))
        osenv = {'PYTHONPATH': ':'.join(sys.path)}
        with open(demo_out, 'w') as outf:
            rc |= subprocess.check_call((sys.executable, _here + '/../demo/demo.py', '--env', env_name), env=osenv, stdout=outf)
    print()

    return rc
Beispiel #3
0
def main(args):
    os.environ['MULTICONF_WARN_JSON_NESTING'] = 'true'
    
    print("Running tests", args)
    if args and args != ['-v']:
        return pytest.main(['--capture=sys'] + args)

    engine = tenjin.Engine()
    major_version = sys.version_info[0]
    cov_rc_file_name = jp(here, '.coverage_rc_' +  str(major_version))
    with open(cov_rc_file_name, 'w') as cov_rc_file:
        cov_rc_file.write(engine.render(jp(here, "coverage_rc.tenjin"), dict(major_version=major_version)))

    rc = pytest.main(['--capture=sys', '--cov=' + here + '/..', '--cov-report=term-missing', '--cov-config=' + cov_rc_file_name] + (args if args == ['-v'] else []))

    print("Validating demo for all envs")
    try:
        del os.environ['PYTHONPATH']
    except KeyError:
        pass
    for env_name in 'prod', 'preprod', 'devlocal', 'devs', 'devi':
        print()
        osenv = {'PYTHONPATH': ':'.join(sys.path)}
        rc |= subprocess.call((sys.executable, here + '/../demo/demo.py', '--env', env_name), env=osenv)

    return rc
def setup_links(projectPath, targetPath, sampleFn, subProjectIds) :
    # Write a file containing the samples being analyzed.
    sampleF = open(jp(targetPath, sampleFn), 'w')
    all_samples = {}
    for spDirName in subProjectIds :
        spPath = jp(projectPath, spDirName)
        print spDirName
	for sampleDirName in os.listdir(spPath) :
	    print sampleDirName
	    ## Assume that we always want the output from the latest run
	    latestRun = sorted(os.listdir(jp(spPath, sampleDirName)))[-1]
	    breakmerPath = jp(spPath, sampleDirName, latestRun, 'analysis/Breakmer_0.0.6')
  	    if os.path.exists(breakmerPath) :
   	        latestAnalysis = sorted(os.listdir(breakmerPath))[-1]
	        breakmerPath = jp(breakmerPath, latestAnalysis, 'output')
   	        svsOutFs = glob(jp(breakmerPath, "*svs.out"))
                cmd = 'ln -s ' + breakmerPath + ' ' + jp(targetPath, sampleDirName + '_breakmer_out')
 		os.system(cmd)
                for svsF in svsOutFs :
 	          cmd = 'ln -s ' + svsF + ' ' + targetPath 
                  os.system(cmd)
            else :
              cmd = 'ln -s ' + jp(spPath, sampleDirName, latestRun, 'analysis') + ' ' + targetPath
              print breakmerPath, 'does not exist'
	    sampleF.write(spDirName + "\t" + sampleDirName + "\n")
    sampleF.close()
Beispiel #5
0
def run_tests(parallel, api_type):
    start_msg("Using " + str(api_type))
    test_cfg.select_api(api_type)

    engine = tenjin.Engine()
    cov_rc_file_name = jp(here, '.coverage_rc_' +  api_type.env_name().lower())
    with open(cov_rc_file_name, 'w') as cov_rc_file:
        cov_rc_file.write(engine.render(jp(here, "coverage_rc.tenjin"), dict(api_type=api_type)))
            
    cmd = ['py.test', '--capture=sys', '--cov=' + here + '/..', '--cov-report=term-missing', '--cov-config=' + cov_rc_file_name, '--instafail', '--ff']
    if not parallel:
        return subprocess.check_call(cmd)
    subprocess.check_call(cmd + ['-n', '16'])
    os.unlink(cov_rc_file_name)
Beispiel #6
0
def test_abort(capsys):
    with api_select.api(__file__, login=True) as api:
        if api.api_type == ApiType.SCRIPT:
            return

        api.flow_job()
        api.job('quick', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
        api.job('wait10_abort', exec_time=10, max_fails=0, expect_invocations=1, expect_order=1, final_result='ABORTED')
        api.job('wait1_fail', exec_time=1, max_fails=1, expect_invocations=1, expect_order=1)
        
        if api.api_type != ApiType.MOCK:
            subprocess32.Popen([sys.executable, jp(here, "abort_job.py"), __file__, 'abort', 'wait10_abort'])
        
        with raises(FailedChildJobsException) as exinfo:
            with parallel(api, timeout=40, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl:
                ctrl.invoke('quick')
                ctrl.invoke('wait10_abort')
                ctrl.invoke('wait1_fail')

        assert "wait10_abort" in exinfo.value.message
        assert "wait1_fail" in exinfo.value.message

        sout, _ = capsys.readouterr()
        assert_lines_in(
            sout,
            re.compile("^ABORTED: 'jenkinsflow_test__abort__wait10_abort' - build: .*/jenkinsflow_test__abort__wait10_abort.* after:"),
        )
Beispiel #7
0
def s3_signin(**auth):
    """
    Convenience function for validating keys  and providing
    access to bucket shares.
    
    Returns S3Object
    """
    home, cfg  = os.getenv('HOME'), ConfigParser()
    cred, user = jp(home, '.aws', 'credentials'), os.getlogin()
    token_ids = 'aws_access_key_id', 'aws_secret_access_key'
    
    cfg.read(cred)

    valid_user = user in cfg.sections()
    account    = itertools.repeat(user if valid_user else cfg.sections()[0], 2)

    # Authentication provided as function input overrides config file behavior
    valid_auth = all(auth.has_key(i) for i in token_ids)
    
    token      = zip(account, token_ids) if not valid_auth else [token_ids]
    store      = cfg if not auth else auth
    
    user_id    = dict(zip(token_ids, map(lambda t: store.get(*t), token)))
    
    if not all(user_id.values()):
        raise ValueError('No valid authorization found')

    return boto.connect_s3(**user_id)
Beispiel #8
0
def flow_graph_dir(flow_name):
    """
    Put the generated graph in the workspace root if running from Jenkins
    If running from commandline put it under config.flow_graph_root_dir/flow_name
    return: dir-name
    """
    return '.' if os.environ.get('JOB_NAME') else jp(flow_graph_root_dir, flow_name)
Beispiel #9
0
    def wait_for_jobs(self):
        if not self.jobs:
            print("WARNING: Empty toplevel flow", self, "nothing to do.")
            return

        if self.json_dir:
            self.json(jp(self.json_dir, 'flow_graph.json'), self.json_indent)

        # Wait for jobs to finish
        print()
        print("--- Getting initial job status ---")
        self.api.poll()
        self._prepare_first()

        if self.json_file:
            self.json(self.json_file, self.json_indent)

        # pylint: disable=attribute-defined-outside-init
        self.start_time = hyperspeed_time()
        self.last_report_time = self.start_time

        print()
        print("--- Starting flow ---")
        sleep_time = min(self.poll_interval, self.report_interval) / _hyperspeed_speedup
        while self.checking_status == Checking.MUST_CHECK:
            self._check(None)
            time.sleep(sleep_time)

        if self.result == BuildResult.UNSTABLE:
            set_build_result(self.username, self.password, 'unstable', direct_url=self.top_flow.direct_url)
def format_results(sampleListFn, sampleType, filterList):
    resD = {}
    for line in open(sampleListFn, 'rU'):
        line = line.strip()
        subProjId, sampleId = line.split("\t")
        outFs = glob(jp(resultsDir, sampleId + '*svs.out'))
        resD[sampleId] = {'recs': [], 'values': []}
        recIter = 1
        resD[sampleId]['values'] = [subProjId, sampleId]
        for outF in outFs:
            recIter = process_res(subProjId, sampleId, outF, resD[sampleId]['recs'], recIter)

    fbed = open('brkpts.%s.bed' % sampleType, 'w')
    for sid in resD:
        for rec in resD[sid]['recs']:
            rec.write_brkpt_bed(fbed)
    fbed.close()

    annoDictFs = run_bedtools('brkpts.%s.bed' % sampleType)
    annotate_recs(resD, annoDictFs)

    if sampleType == "normal":
        for sid in resD:
            for rec in resD[sid]['recs']:
                descriptions = rec.svDescription
                if rec.type.find("rearr") > -1:
                    descriptions = rec.format_trl_str()
                filterList.filters.append(filter(rec.targetName, rec.type, descriptions))
    return resD
Beispiel #11
0
    def __init__(self, deck_file):
        with open('data/origins.json', 'r') as f:
            card_data = json.load(f)
        with open(jp('data', 'decks', deck_file + '.json'), 'r') as f:
            deck_data = json.load(f)

        cards = []

        for card_name, num in deck_data['cards'].items():
            for card in card_data['cards']:
                if card['name'] == card_name:
                    if 'Creature' in card['types']:
                        for i in range(num):
                            cards.append(
                                CreatureCard(
                                    card.get('name'), card.get('types'),
                                    card.get('colors'), card.get('manaCost'),
                                    card.get('power'), card.get('toughness'),
                                    card.get('text'), card.get('cmc')
                                )
                            )
                    else:
                        for i in range(num):
                            cards.append(
                                Card(
                                    card.get('name'), card.get('types'),
                                    card.get('colors'), card.get('manaCost'),
                                    card.get('text'), card.get('cmc')
                                )
                            )

        self.cards = cards
        self.size = len(cards)
Beispiel #12
0
    def __init__(self, job, proc, build_number):
        self.job = job
        self.proc = proc
        self.build_number = build_number
        self.queued_why = None

        # Export some of the same variables that Jenkins does
        os.environ.update(dict(
            BUILD_NUMBER=repr(self.build_number),
            BUILD_ID=datetime.datetime.isoformat(datetime.datetime.utcnow()),
            BUILD_DISPLAY_NAME='#' + repr(self.build_number),
            JOB_NAME=self.job.name,
            BUILD_TAG='jenkinsflow-' + self.job.name + '-' + repr(self.build_number),
            EXECUTOR_NUMBER=repr(self.proc.pid),
            NODE_NAME='master',
            NODE_LABELS='',
            WORKSPACE=self.job.workspace,
            JENKINS_HOME=self.job.jenkins.public_uri,
            JENKINS_URL=self.job.jenkins.public_uri,
            HUDSON_URL=self.job.jenkins.public_uri,
            BUILD_URL=jp(self.job.public_uri, repr(self.build_number)),
            JOB_URL=self.job.public_uri,
        ))

        self.proc.start()
Beispiel #13
0
def test_json_unchecked_only_in_flows():
    with mock_api.api(__file__) as api:
        flow_name = api.flow_job()
        api.job('j1_unchecked', exec_time=40, max_fails=0, expect_invocations=1, invocation_delay=0.0000000000001, expect_order=None, unknown_result=True)
        api.job('j2_unchecked', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=None)
        api.job('j3_unchecked', exec_time=40, max_fails=0, expect_invocations=1, invocation_delay=0.0000000000001, expect_order=None, unknown_result=True)
        api.job('j4_unchecked', exec_time=40, max_fails=0, expect_invocations=1, invocation_delay=0.0000000000001, expect_order=None, unknown_result=True)
        api.job('j5_unchecked', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=None)
        api.job('j6', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
        api.job('j7', exec_time=5, max_fails=0, expect_invocations=1, expect_order=2)

        json_dir = flow_graph_dir(flow_name)
        if not os.path.exists(json_dir):
            os.makedirs(json_dir)

        with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, report_interval=1, json_dir=json_dir) as ctrl1:
            ctrl1.invoke_unchecked('j1_unchecked')

            with ctrl1.parallel(timeout=40, report_interval=3) as ctrl2:
                with ctrl2.serial(timeout=40, report_interval=3) as ctrl3a:
                    ctrl3a.invoke_unchecked('j2_unchecked')
                    ctrl3a.invoke_unchecked('j3_unchecked')

                with ctrl2.parallel(timeout=40, report_interval=3) as ctrl3b:
                    ctrl3b.invoke_unchecked('j4_unchecked')
                    ctrl3b.invoke_unchecked('j5_unchecked')

            ctrl1.invoke('j6')
            ctrl1.invoke('j7')

        # Test default compact json
        with open(ctrl1.json_file) as got_jf, open(jp(here, "json_test_unchecked_compact.json")) as expected_jf:
            _assert_json(got_jf.read().strip(), expected_jf.read().strip())
Beispiel #14
0
def test_json_strip_prefix():
    with mock_api.api(__file__) as api:
        flow_name = api.flow_job()
        api.job('j1', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
        api.job('j2', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=2)
        api.job('j3', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j4', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j5', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j6', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j7_unchecked', exec_time=40, max_fails=0, expect_invocations=1, invocation_delay=0.0000000000001, expect_order=None, unknown_result=True)
        api.job('j8_unchecked', exec_time=40, max_fails=0, expect_invocations=1, invocation_delay=0.0000000000001, expect_order=None, unknown_result=True)
        api.job('j9', exec_time=5, max_fails=0, expect_invocations=1, expect_order=4)

        ctrl1 = _flow(api, True, flow_graph_dir(flow_name))

        # Test pretty printing
        json_file = jp(flow_graph_dir(flow_name), "pretty.json")
        ctrl1.json(json_file, indent=4)
        with open(json_file) as jf:
            _assert_json(jf.read().strip(), _pretty_json)

        # Test default compact json
        with open(ctrl1.json_file) as jf:
            _assert_json(jf.read().strip(), _compact_json)

        # Test return json
        json = ctrl1.json(None)
        _assert_json(json, _compact_json)
Beispiel #15
0
 def __enter__(self):
     # pylint: disable=attribute-defined-outside-init
     self._pre_work_dir = os.getcwd()
     self._work_dir = jp(test_tmp_dir,self.job_name_prefix)
     _mkdir(self._work_dir)
     os.chdir(self._work_dir)
     return self
Beispiel #16
0
    def invoke(self, securitytoken, build_params, cause, description):
        _mkdir(self.jenkins.log_dir)
        _mkdir(self.workspace)
        build_number = (self.build_num or 0) + 1
        self.build_num = build_number

        fixed_args = [self.name, self.jenkins.job_prefix_filter, self.jenkins.username, self.jenkins.password, securitytoken, cause]
        fixed_args.append(build_params if build_params else {})

        # Export some of the same variables that Jenkins does
        extra_env = dict(
            BUILD_NUMBER=repr(build_number),
            BUILD_ID=datetime.datetime.isoformat(datetime.datetime.utcnow()),
            BUILD_DISPLAY_NAME='#' + repr(build_number),
            JOB_NAME=self.name,
            BUILD_TAG='jenkinsflow-' + self.name + '-' + repr(build_number),
            NODE_NAME='master',
            NODE_LABELS='',
            WORKSPACE=self.workspace,
            JENKINS_HOME=self.jenkins.public_uri,
            JENKINS_URL=self.jenkins.public_uri,
            HUDSON_URL=self.jenkins.public_uri,
            BUILD_URL=jp(self.public_uri, repr(build_number)),
            JOB_URL=self.public_uri,
        )

        proc = LoggingProcess(target=self.func, output_file_name=self.log_file, workspace=self.workspace, name=self.name, args=fixed_args, env=extra_env)
        self.build = self.jenkins.invocation_class(self, proc, build_number)
        if description:
            self.jenkins.set_build_description(self.name, build_number, description, replace=True, separator='')
        self._invocations.append(self.build)
        return self.build
Beispiel #17
0
    def flow_job(self, name=None, params=None):
        """
        Creates a flow job
        For running demo/test flow script as jenkins job
        Requires jenkinsflow to be copied to 'pseudo_install_dir' and all jobs to be loaded beforehand (e.g. test.py has been run)
        Returns job name
        """
        name = '0flow_' + name if name else '0flow'
        job_name = (self.job_name_prefix or '') + name
        # TODO Handle script api
        if self.api_type == ApiType.SCRIPT:
            return job_name

        #  Note: Use -B to avoid permission problems with .pyc files created from commandline test
        if self.func_name:
            script = "export PYTHONPATH=" + test_tmp_dir + "\n"
            script += test_cfg.skip_job_load_sh_export_str() + "\n"
            # script += "export " + ApiType.JENKINS.env_name() + "=true\n"  # pylint: disable=no-member
            # Supply dummy args for the py.test fixtures
            dummy_args = ','.join(['0' for _ in range(self.func_num_params)])
            script += sys.executable + " -Bc "import sys; from jenkinsflow.test." + self.file_name.replace('.py', '') + " import *; sys.exit(test_" + self.func_name + "(" + dummy_args + "))""
        else:
            script = sys.executable + " -B " + jp(pseudo_install_dir, 'demo', self.file_name)
        self._jenkins_job(job_name, exec_time=0.5, params=params, script=script, print_env=False, create_job=None, always_load=False,
                          num_builds_to_keep=4, final_result_use_cli=False, set_build_descriptions=())
        return job_name
Beispiel #18
0
def main():
    args = args_parser().parse_args()
    print("Creating temporary test installation in", repr(config.pseudo_install_dir), "to make files available to Jenkins.")
    install_script = jp(here, 'tmp_install.sh')
    rc = subprocess.call([install_script])
    if rc:
        print("Failed test installation to. Install script is:", repr(install_script), file=sys.stderr)
        print("Warning: Some tests will fail!", file=sys.stderr)

    print("\nRunning tests")
    try:
        if False or args.pytest_args or args.files:
            extra_args = args.pytest_args.split(' ') + args.files if args.pytest_args else args.files
            subprocess.check_call(['py.test', '--capture=sys', '--instafail'] + extra_args)
            test_cfg.unmock()
            sys.exit(subprocess.call(['py.test', '--capture=sys', '--instafail'] + extra_args))

        run_tests(False, here + '/.coverage_mocked_rc')
        validate_all_demos()

        test_cfg.unmock()
        parallel = test_cfg.skip_job_load() | test_cfg.skip_job_delete()
        if test_cfg.use_jenkinsapi():
            print("Using jenkinsapi_wrapper")
            run_tests(parallel, here + '/.coverage_jenkinsapi_rc')
        else:
            print("Using specialized_api")
            run_tests(parallel, here + '/.coverage_real_rc')
        validate_all_demos(execute_script=True)

        print("\nTesting setup.py")
        user = getpass.getuser()
        install_prefix = '/tmp/' + user
        tmp_packages_dir = install_prefix + '/lib/python2.7/site-packages'
        os.environ['PYTHONPATH'] = tmp_packages_dir
        if os.path.exists(tmp_packages_dir):
            shutil.rmtree(tmp_packages_dir)
        os.makedirs(tmp_packages_dir)
        subprocess.check_call([sys.executable, jp(here, '../setup.py'), 'install', '--prefix', install_prefix])
        shutil.rmtree(jp(here, '../build'))
    except Exception as ex:
        print('*** ERROR: There were errors! Check output! ***', repr(ex), file=sys.stderr)
        raise

    sys.exit(rc)
Beispiel #19
0
def main():
    pytest_args, files = args_parser()

    print("Creating temporary test installation in", repr(config.pseudo_install_dir), "to make files available to Jenkins.")
    install_script = jp(here, 'tmp_install.sh')
    rc = subprocess.call([install_script])
    if rc:
        print("Failed test installation to. Install script is:", repr(install_script), file=sys.stderr)
        print("Warning: Some tests will fail!", file=sys.stderr)

    print("\nRunning tests")
    try:
        if False or pytest_args or files:
            extra_args = pytest_args.split(' ') + files if pytest_args else files
            subprocess.check_call(['py.test', '--capture=sys', '--instafail'] + extra_args)
            test_cfg.unmock()
            sys.exit(subprocess.call(['py.test', '--capture=sys', '--instafail'] + extra_args))

        run_tests(False, ApiType.MOCK)
        test_cfg.unmock()

        parallel = test_cfg.skip_job_load() | test_cfg.skip_job_delete()
        # TODO run both, use extra job prefix
        if not test_cfg.selected_api() == ApiType.JENKINSAPI:
            run_tests(parallel, ApiType.SPECIALIZED)
        else:
            run_tests(parallel, ApiType.JENKINSAPI)
        run_tests(parallel, ApiType.SCRIPT)

        start_msg("Testing setup.py")
        user = getpass.getuser()
        install_prefix = '/tmp/' + user
        tmp_packages_dir = install_prefix + '/lib/python2.7/site-packages'
        os.environ['PYTHONPATH'] = tmp_packages_dir
        if os.path.exists(tmp_packages_dir):
            shutil.rmtree(tmp_packages_dir)
        os.makedirs(tmp_packages_dir)
        subprocess.check_call([sys.executable, jp(here, '../setup.py'), 'install', '--prefix', install_prefix])
        shutil.rmtree(jp(here, '../build'))
    except Exception as ex:
        print('*** ERROR: There were errors! Check output! ***', repr(ex), file=sys.stderr)
        raise

    sys.exit(rc)
Beispiel #20
0
    def include_directory(self, package_path, patterns_list, package_root=""):
        if not package_path or package_path.strip() == "":
            raise ValueError("Missing argument package_path.")

        if not patterns_list:
            raise ValueError("Missing argument patterns_list.")

        package_name = package_path.replace(PATH_SEPARATOR, '.')
        self._manifest_include_directory(package_path, patterns_list)

        package_full_path = jp(package_root, package_path)

        for root, dirnames, filenames in os.walk(package_full_path):
            filenames = list(fnmatch.filter(filenames, pattern) for pattern in patterns_list)

            for filename in itertools.chain.from_iterable(filenames):
                full_path = jp(root, filename)
                relative_path = full_path.replace(package_full_path, '', 1).lstrip(PATH_SEPARATOR)
                self._package_data.setdefault(package_name, []).append(relative_path)
Beispiel #21
0
def main():
    print("Creating temporary test installation in", repr(config.pseudo_install_dir), "to make files available to Jenkins.")
    install_script = jp(here, 'tmp_install.sh')
    rc = subprocess.call([install_script])
    if rc:
        print("Failed test installation to. Install script is:", repr(install_script), file=sys.stderr)
        print("Warning: Some tests will fail!", file=sys.stderr)

    print("\nRunning tests")
    if len(sys.argv) > 1:
        sys.exit(subprocess.call(['py.test', '--capture=sys', '--instafail'] + sys.argv[1:]))
    else:
        pre_delete_jobs = os.environ.get('JENKINSFLOW_SKIP_JOB_DELETE') != 'true'
        if is_mocked or pre_delete_jobs:
            rc = subprocess.call(('py.test', '--capture=sys', '--cov=' + here + '/..', '--cov-report=term-missing', '--instafail', '--ff'))
        else:
            rc = subprocess.call(('py.test', '--capture=sys', '--cov=' + here + '/..', '--cov-report=term-missing', '--instafail', '--ff', '-n', '8'))

    print("\nValidating demos")
    for demo in basic, hide_password, prefix:
        run_demo(demo)

    print("\nValidating demos with failing jobs")
    for demo in (errors,):
        try:
            run_demo(demo)
        except JobControlFailException as ex:
            print("Ok, got exception:", ex)
        else:
            raise Exception("Expected exception")

    print("\nTesting setup.py")
    user = getpass.getuser()
    install_prefix = '/tmp/' + user
    tmp_packages_dir = install_prefix + '/lib/python2.7/site-packages'
    os.environ['PYTHONPATH'] = tmp_packages_dir
    if not os.path.exists(tmp_packages_dir):
        os.makedirs(tmp_packages_dir)
    subprocess.check_call(['python', jp(here, '../setup.py'), 'install', '--prefix', install_prefix])

    if rc:
        print('*** ERROR: There were errors! Check output! ***', file=sys.stderr)
    sys.exit(rc)
Beispiel #22
0
def run_demo(demo):
    print("\n\n")
    print("==== Demo:", demo.__name__, "====")
    job_load_module_name = demo.__name__ + '_jobs'
    job_load = imp.load_source(job_load_module_name, jp(here, '../demo/jobs', job_load_module_name + '.py'))
    print("-- loading jobs --")
    api = job_load.create_jobs()
    print()
    print("-- running jobs --")
    demo.main(api)
    api.test_results()
Beispiel #23
0
    def toplevel_init(self, jenkins_api, securitytoken, username, password, top_level_job_name_prefix, poll_interval, direct_url, require_idle,
                      json_dir, json_indent, json_strip_top_level_prefix, params_display_order, just_dump, kill_all, description):
        self._start_msg()
        # pylint: disable=attribute-defined-outside-init
        # Note: Special handling in top level flow, these atributes will be modified in proper flow init
        self.parent_flow = self
        self.top_flow = self
        self.job_name_prefix = ''
        self.total_max_tries = 1
        self.nesting_level = -1
        self.current_nesting_level = -1
        self.report_interval = _default_report_interval
        self.secret_params_re = _default_secret_params_re
        self.allow_missing_jobs = None
        self.next_node_id = 0
        self.just_dump = just_dump

        self.kill = KillType.ALL if kill_all else KillType.NONE

        jenkins_job_name = os.environ.get('JOB_NAME')
        if jenkins_job_name:
            self.cause = "By flow job " + repr(jenkins_job_name) + ' #' +  os.environ.get('BUILD_NUMBER', ' None')
        else:
            import getpass
            user = getpass.getuser()
            self.cause = "By flow script, user: "******"\nGot SIGTERM: Killing all builds belonging to current flow")
            self.kill = KillType.CURRENT
            if self._can_raise_kill:
                raise Killed()
        signal.signal(signal.SIGTERM, set_kill)

        # Allow test framework to set securitytoken, so that we won't have to litter all the testcases with it
        return self.securitytoken or jenkins_api.securitytoken if hasattr(jenkins_api, 'securitytoken') else None
def test_set_build_result_call_script_help(capfd):
    # Invoke this in a subprocess to ensure that calling the script works
    # This will not give coverage as it not not traced through the subprocess call
    rc = subprocess.call([sys.executable, jp(here, '../cli/cli.py'), 'set_build_result', '--help'])
    assert rc == 0

    sout, _ = capfd.readouterr()
    assert '--result' in sout
    assert '--direct-url' in sout
    assert '--username' in sout
    assert '--password' in sout
    assert '--java' in sout
Beispiel #25
0
def use_bldsup(build_support_dir="bldsup"):
    """Specify a local build support directory for build specific extensions.

    use_plugin(name) and import will look for python modules in BUILD_SUPPORT_DIR.

    WARNING: The BUILD_SUPPORT_DIR must exist and must have an __init__.py file in it.
    """
    assert isdir(build_support_dir), "use_bldsup('{0}'): The {0} directory must exist!".format(
        build_support_dir)
    init_file = jp(build_support_dir, "__init__.py")
    assert isfile(init_file), "use_bldsup('{0}'): The {1} file must exist!".format(build_support_dir, init_file)
    sys.path.insert(0, build_support_dir)
Beispiel #26
0
    def __init__(self, jenkins, name, script_file, workspace, func):
        self.jenkins = jenkins
        self.name = name

        self.build = None
        self.public_uri = self.baseurl = script_file
        self.workspace = workspace
        self.func = func
        self.log_file = jp(self.jenkins.log_dir, self.name + '.log')
        self.build_num = None
        self._invocations = []
        self.queued_why = None
        self.old_build_number = None
Beispiel #27
0
 def set_build_description(self, job_name, build_number, description, replace=False, separator='\n'):
     """Utility to set/append build description. :py:obj:`description` will be written to a file in the workspace.
     Args
         job_name (str)
         build_number (int)
         description (str): The description to set on the build
         append (bool):     If True append to existing description, if any
         separator (str):   A separator to insert between any existing description and the new :py:obj:`description` if :py:obj:`append` is True.
     """
     workspace = self._workspace(job_name)
     mode = 'w' if replace else 'a'
     with open(jp(workspace, 'description.txt'), mode) as ff:
         ff.write(description)
Beispiel #28
0
    def include_file(self, package_name, filename):
        if not package_name or package_name.strip() == "":
            raise ValueError("Missing argument package name.")

        if not filename or filename.strip() == "":
            raise ValueError("Missing argument filename.")

        full_filename = jp(package_name, filename)
        self._manifest_include(full_filename)

        if package_name not in self._package_data:
            self._package_data[package_name] = [filename]
            return
        self._package_data[package_name].append(filename)
Beispiel #29
0
    def include_file(self, package_name, filename):
        if not package_name or package_name.strip() == "":
            raise ValueError("Missing argument package name.")

        if not filename or filename.strip() == "":
            raise ValueError("Missing argument filename.")

        full_filename = jp(package_name, filename)
        self._manifest_include(full_filename)

        if package_name not in self._package_data:
            self._package_data[package_name] = [filename]
            return
        self._package_data[package_name].append(filename)
Beispiel #30
0
def run_all(nodist=False, nostream=False):
    success = 0
    n = 0
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    for (dirpath, dirnames, filenames) in os.walk(exdir):
        for script in filenames:
            if any(script.endswith(x) for x in ['spmd.py', 'streaming.py', 'stream.py', 'batch.py']):
                n += 1
                logfn = jp(logdir, script.replace('.py', '.res'))
                with open(logfn, 'w') as logfile:
                    print('\n##### ' + jp(dirpath, script))
                    execute_string = get_exe_cmd(jp(dirpath, script), nodist, nostream)
                    if execute_string:
                        os.chdir(dirpath)
                        proc = subprocess.Popen(execute_string if IS_WIN else ['/bin/bash', '-c', execute_string],
                                                stdout=subprocess.PIPE,
                                                stderr=subprocess.STDOUT,
                                                shell=False)
                        out = proc.communicate()[0]
                        logfile.write(out.decode('ascii'))
                        if proc.returncode:
                            print(out)
                            print(strftime("%H:%M:%S", gmtime()) + '\tFAILED\t' + script + '\twith errno\t' + str(proc.returncode))
                        else:
                            success += 1
                            print(strftime("%H:%M:%S", gmtime()) + '\tPASSED\t' + script)
                    else:
                        success += 1
                        print(strftime("%H:%M:%S", gmtime()) + '\tSKIPPED\t' + script)

    if success != n:
        print('{}/{} examples passed/skipped, {} failed'.format(success,n, n - success))
        print('Error(s) occured. Logs can be found in ' + logdir)
        return 4711
    print('{}/{} examples passed/skipped'.format(success,n))
    return 0
Beispiel #31
0
def cli(time, profile, heap_check):
    global hp

    if time:
        def _test(name, ff, test, setup, repeat, number):
            times = sorted(timeit.repeat(test, setup=setup, repeat=repeat, number=number))
            times = ["{:.4f}".format(tt) for tt in times]
            print(name, times)
            print(name, times, file=ff)

        tfile = jp(here, "times.txt")
        if os.path.exists(tfile):
            with open(tfile) as ff:
                print('previous:')
                print(ff.read())
            print('new:')

        with open(tfile, 'w') as ff:
            _test("envs", ff, "envs_setup()", setup="from __main__ import envs_setup", repeat=10, number=20000)
            _test("load - lazy", ff, "config(validate_properties=False, lazy_load=True); from __main__ import conf; conf(prod)",
                  setup="from __main__ import config, conf, prod", repeat=10, number=10)
            _test("use_validate_lazy", ff, "use()", setup="from __main__ import use", repeat=1, number=1)
            _test("load - no @props", ff, "config(validate_properties=False)", setup="from __main__ import config", repeat=10, number=10)
            _test("load - @props", ff, "config(validate_properties=True)", setup="from __main__ import config", repeat=10, number=10)
            _test("use", ff, "use()", setup="from __main__ import use", repeat=10, number=300)

    if sys.version_info.major < 3 and heap_check:
        from guppy import hpy
        hp = hpy()
        envs_setup()
        config()
        use()

    if profile:
        cProfile.run("envs_setup()", jp(here, "envs_setup.profile"))
        cProfile.run("config()", jp(here, "config.profile"))
        cProfile.run("use()", jp(here, "use.profile"))
Beispiel #32
0
def register_images(moving: np.ndarray,
                    fixed: np.ndarray,
                    transform_type: str = 'a',
                    n_threads: int = 1) -> np.ndarray:
    """
    Apply RegistrationSynQuick to the input images.

    Parameters
    ----------
    moving: np.ndarray
    fixed: np.ndarray
    transform_type: str, optional
         |  t:  translation
         |  r:  rigid
         |  a:  rigid + affine (default)
         |  s:  rigid + affine + deformable syn
         |  sr: rigid + deformable syn
         |  b:  rigid + affine + deformable b-spline syn
         |  br: rigid + deformable b-spline syn
    n_threads: int, optional
        the number of threads used to apply the registration
    """
    with tempfile.TemporaryDirectory() as tempdir:
        template_path = jp(tempdir, 'template.nii.gz')
        moving_path = jp(tempdir, 'moving.nii.gz')
        nib.save(nib.Nifti1Image(fixed, np.eye(4)), template_path)
        nib.save(nib.Nifti1Image(moving, np.eye(4)), moving_path)

        reg = RegistrationSynQuick()
        reg.inputs.fixed_image = template_path
        reg.inputs.moving_image = moving_path
        reg.inputs.num_threads = n_threads
        reg.inputs.transform_type = transform_type
        reg.inputs.output_prefix = jp(tempdir, 'transform')
        reg.run()

        return nib.load(jp(tempdir, 'transformWarped.nii.gz')).get_data()
Beispiel #33
0
def run_tests(parallel, api_types, args, coverage=True, mock_speedup=1):
    args = copy.copy(args)

    test_cfg.select_speedup(mock_speedup)

    if coverage:
        engine = tenjin.Engine()

        if len(api_types) == 3:
            fail_under = 95
        elif ApiType.JENKINS in api_types:
            fail_under = 94
        elif ApiType.MOCK in api_types and ApiType.SCRIPT in api_types:
            fail_under = 90
        elif ApiType.MOCK in api_types:
            fail_under = 88
        else:
            fail_under = 86

        # Note: cov_rc_file_name hardcoded in .travis.yml
        cov_rc_file_name = jp(here, '.coverage_rc_' +  '_'.join(api_type.name.lower() for api_type in api_types))
        with open(cov_rc_file_name, 'w') as cov_rc_file:
            context = dict(api_types=api_types, top_dir=top_dir, major_version=major_version, fail_under=fail_under)
            cov_rc_file.write(engine.render(jp(here, "coverage_rc.tenjin"), context))
            args.extend(['--cov=' + top_dir, '--cov-report=term-missing', '--cov-config=' + cov_rc_file_name])

    if api_types != [ApiType.MOCK]:
        # Note: 'boxed' is required for the kill/abort_current test not to abort other tests
        args.append('--boxed')

        if parallel:
            args.extend(['-n', '16'])

    print('pytest.main', args)
    rc = pytest.main(args)
    if rc:
        raise Exception("pytest {args} failed with code {rc}".format(args=args, rc=rc))
Beispiel #34
0
def mtophase(ncycles=0,
             pulse_l=0,
             tmin=0,
             tmax=0):
    """
    Run mtophase.exe and loads return the conversion factor.
    :param ncycles: number of cycles of injected square wave (with 50% duty cycle)
    :param pulse_l: pulse length (in sec)
    :param tmin: t_min of chargeability time window (in sec)
    :param tmax: t_max of chargeability time window (in sec)
    :return: m2p factor
    """

    mpath = jp(os.path.dirname(os.path.abspath(__file__)), 'ip')

    if not os.path.exists(mpath):
        warnings.warn(mpath + ' folder not found')

    params = list(map(str, [ncycles, pulse_l, tmin, tmax]))  # Transforms the params input to string

    minf = jp(mpath, 'MtoPhase.cfg')  # Writing config file

    with open(minf, 'w') as ms:
        ms.write('\n'.join(params))
        ms.close()

    #  Running m2p exe file
    if not os.path.exists(jp(mpath, 'mtophase.exe')):
        warnings.warn('mtophase.exe not found')

    sp.call([jp(mpath, 'mtophase.exe')])  # Run

    mm = open('MtoPhase.dat', 'r').readlines()
    ms = mm[0].split()
    f = float(ms[0])

    return f
Beispiel #35
0
def post_examination(
    root: str, xlim: list = None, ylim: list = None, show: bool = False
):
    focus = Setup.Focus()
    if xlim is None:
        xlim = focus.x_range
    if ylim is None:
        ylim = focus.y_range  # [335, 700]
    md = Setup.Directories()
    ndir = jp(md.forecasts_dir, "base", "roots_whpa", f"{root}.npy")
    sdir = os.path.dirname(ndir)
    nn = np.load(ndir)
    whpa_plot(
        whpa=nn,
        x_lim=xlim,
        y_lim=ylim,
        labelsize=11,
        alpha=1,
        xlabel="X(m)",
        ylabel="Y(m)",
        cb_title="SD(m)",
        annotation=["B"],
        bkg_field_array=np.flipud(nn[0]),
        color="black",
        cmap="coolwarm",
    )

    # legend = proxy_annotate(annotation=['B'], loc=2, fz=14)
    # plt.gca().add_artist(legend)

    plt.savefig(
        jp(sdir, f"{root}_SD.png"), dpi=300, bbox_inches="tight", transparent=False
    )
    if show:
        plt.show()
    plt.close()
Beispiel #36
0
def use_bldsup(build_support_dir="bldsup"):
    """Specify a local build support directory for build specific extensions.

    use_plugin(name) and import will look for python modules in BUILD_SUPPORT_DIR.

    WARNING: The BUILD_SUPPORT_DIR must exist and must have an __init__.py file in it.
    """
    assert isdir(build_support_dir
                 ), "use_bldsup('{0}'): The {0} directory must exist!".format(
                     build_support_dir)
    init_file = jp(build_support_dir, "__init__.py")
    assert isfile(
        init_file), "use_bldsup('{0}'): The {1} file must exist!".format(
            build_support_dir, init_file)
    sys.path.insert(0, build_support_dir)
Beispiel #37
0
def test_set_build_description_call_script_help(capfd):
    # Invoke this in a subprocess to ensure that calling the script works
    # This will not give coverage as it not not traced through the subprocess call
    rc = subprocess.call([sys.executable, jp(_here, '../cli/cli.py'), 'set_build_description', '--help'])
    assert rc == 0

    sout, _ = capfd.readouterr()
    assert '--job-name' in sout
    assert '--build-number' in sout
    assert '--description' in sout
    assert '--direct-url' in sout
    assert '--replace' in sout
    assert '--separator' in sout
    assert '--username' in sout
    assert '--password' in sout
Beispiel #38
0
def main():
    tta_speed = 0.5  # slow down (i.e. &lt; 1.0)
    samples_per_sec = 16000
    test_fns = sorted(glob('data/one/*.wav'))
    tta_dir = './test/audio/one'
    for fn in tqdm(test_fns):
        basename = bn(fn)
        basename = "sp5-" + basename
        rate, data = wf.read(fn)
        assert len(data) == samples_per_sec
        data = np.float32(data) / 32767
        data = effects.time_stretch(data, tta_speed)
        data = data[-samples_per_sec:]
        out_fn = jp(tta_dir, basename)
        wf.write(out_fn, rate, np.int16(data * 32767))
Beispiel #39
0
def _verify_description(api, job, build_number, expected):
    if api.api_type == ApiType.MOCK:
        return

    # Read back description and verify
    if api.api_type == ApiType.JENKINS:
        build_url = "/job/" + job.name + '/' + str(build_number)
        dct = api.get_json(build_url, tree="description")
        description = dct['description']

    if api.api_type == ApiType.SCRIPT:
        with codecs.open(jp(job.workspace, 'description.txt'),
                         encoding='utf-8') as ff:
            description = ff.read()

    assert description == expected
Beispiel #40
0
def load_pred_from_exp(_id, exp_path, n_exp=5):
    """Finds and loads the ``_id`` prediction in ``n_exp`` validation series of ``exp_path`` experiment."""
    pred = None

    for n in range(n_exp):
        try:
            pred = load_pred(_id,
                             jp(exp_path, f'experiment_{n}/test_predictions'))
        except FileNotFoundError:
            pass

    if pred is None:
        raise FileNotFoundError(
            f'There is no such id `{_id}` over {n_exp} experiments.')

    return pred
Beispiel #41
0
def load_series(row: pd.Series,
                base_path: PathLike = None,
                orientation: Union[bool, None] = Default,
                scaling: bool = None) -> np.ndarray:
    """
    Loads an image based on its ``row`` in the metadata dataframe.

    If ``base_path`` is not None, PathToFolder is assumed to be relative to it.

    If ``orientation`` is True, the loaded image will be transposed and flipped
    to standard (Coronal, Sagittal, Axial) orientation. If None, the orientation
    will be standardized only if possible (i.e. all the necessary metadata is present).

    Required columns: PathToFolder, FileNames.
    """
    if orientation is Default:
        orientation = None
        warnings.warn(
            'The default value for `orientation` will be changed to `False` in next releases. '
            'Pass orientation=None, if you wish to keep the old behaviour.',
            UserWarning)

    folder, files = row.PathToFolder, row.FileNames.split('/')
    if base_path is not None:
        folder = os.path.join(base_path, folder)
    if contains_info(row, 'InstanceNumbers'):
        files = map(
            itemgetter(1),
            sorted(zip_equal(split_floats(row.InstanceNumbers), files)))

    x = np.stack([dcmread(jp(folder, file)).pixel_array for file in files],
                 axis=-1)

    if scaling and not contains_info(row, 'RescaleSlope', 'RescaleIntercept'):
        raise ValueError('Not enough information for scaling.')
    if scaling is not False and contains_info(row, 'RescaleSlope'):
        x = x * row.RescaleSlope
    if scaling is not False and contains_info(row, 'RescaleIntercept'):
        x = x + row.RescaleIntercept

    if orientation is None:
        orientation = contains_info(row, *ORIENTATION)
    if orientation:
        from .spatial import normalize_orientation
        x = normalize_orientation(x, row)

    return x
Beispiel #42
0
def gen_pyx(odir):
    gtr_files = glob.glob(jp(os.path.abspath('generator'), '*')) + ['./setup.py']
    src_files = [os.path.abspath('build/daal4py_cpp.h'),
                 os.path.abspath('build/daal4py_cpp.cpp'),
                 os.path.abspath('build/daal4py_cy.pyx')]
    if all(os.path.isfile(x) for x in src_files):
        src_files.sort(key=lambda x: os.path.getmtime(x))
        gtr_files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
        if os.path.getmtime(src_files[0]) > os.path.getmtime(gtr_files[0]):
            print('Generated files are all newer than generator code. Skipping code generation')
            return

    from generator.gen_daal4py import gen_daal4py
    odir = os.path.abspath(odir)
    if not os.path.isdir(odir):
        os.mkdir(odir)
    gen_daal4py(daal_root, odir, d4p_version, no_dist=no_dist, no_stream=no_stream)
Beispiel #43
0
def train_svm():
    # 1. Load the feature vectors of 4000 labeled mails
    data_train = loadmat(jp(dn(__file__), 'data', 'spamTrain.mat'))
    X = data_train['X']
    y = data_train['y'].ravel()

    # 2. Train a linear SVM model
    log.info(
        'Start training the Linear SVM classifier model with %d entries...',
        len(y))
    classifier_model = svm.LinearSVC()
    classifier_model.fit(X, y)

    log.info('Training done !')
    evaluate_model(classifier_model)

    return classifier_model
Beispiel #44
0
    def test__optimize_omit_module_files(self):
        module_files = ["/a/b/c/d/x.py",
                        "/a/b/c/d/y.py",
                        "/a/x/z.py",
                        "/a/b/o.py"
                        ]

        self.assertEqual(_optimize_omit_module_files(module_files, ["/a/b/c/d/x.py",
                                                                    "/a/b/c/d/y.py"]),
                         [jp("/a/b/c", "*")])

        self.assertEqual(_optimize_omit_module_files(module_files, ["/a/z.py",
                                                                    "/a/b/o.py"]),
                         ["/a/b/o.py", "/a/z.py"])

        self.assertEqual(_optimize_omit_module_files(module_files, ["/a/b/c/d/x.py"]),
                         ["/a/b/c/d/x.py"])
Beispiel #45
0
def get_build_options():
    include_dir_plat = [
        os.path.abspath('./src'),
        os.path.abspath('./onedal'),
        dal_root + '/include',
    ]
    # FIXME it is a wrong place for this dependency
    if not no_dist:
        include_dir_plat.append(mpi_root + '/include')
    using_intel = os.environ.get('cc', '') in ['icc', 'icpc', 'icl', 'dpcpp']
    eca = [
        '-DPY_ARRAY_UNIQUE_SYMBOL=daal4py_array_API',
        '-DD4P_VERSION="' + d4p_version + '"', '-DNPY_ALLOW_THREADS=1'
    ]
    ela = []

    if using_intel and IS_WIN:
        include_dir_plat.append(
            jp(os.environ.get('ICPP_COMPILER16', ''), 'compiler', 'include'))
        eca += ['-std=c++17', '-w', '/MD']
    elif not using_intel and IS_WIN:
        eca += ['-wd4267', '-wd4244', '-wd4101', '-wd4996', '/std:c++17']
    else:
        eca += [
            '-std=c++17',
            '-w',
        ]  # '-D_GLIBCXX_USE_CXX11_ABI=0']

    # Security flags
    eca += get_sdl_cflags()
    ela += get_sdl_ldflags()

    if IS_MAC:
        eca.append('-stdlib=libc++')
        ela.append('-stdlib=libc++')
        ela.append("-Wl,-rpath,{}".format(daal_lib_dir))
        ela.append("-Wl,-rpath,@loader_path/../..")
    elif IS_WIN:
        ela.append('-IGNORE:4197')
    elif IS_LIN and not any(x in os.environ and '-g' in os.environ[x]
                            for x in ['CPPFLAGS', 'CFLAGS', 'LDFLAGS']):
        ela.append('-s')
    if IS_LIN:
        ela.append("-fPIC")
        ela.append("-Wl,-rpath,$ORIGIN/../..")
    return eca, ela, include_dir_plat
Beispiel #46
0
def crtomo_file_shortener(f1, f2):
    """CRTOMO cannot deal with long folder path, especially not with spaces within it..."""
    fs = f2

    if f2:  # If file is different than None

        # Remove file name from path
        exefolder = f1.replace(jp("\\", path_leaf(f1)), "")

        if exefolder in f2:
            # CRtomo can not deal with long paths
            fs = ".\\" + f2.replace(exefolder, "")
        else:
            fs = f2
            if len(f2) == len(path_leaf(f2)):
                fs = ".\\" + f2
    return fs
Beispiel #47
0
    def read(self):
        """
        Walk through each directory in the root dir and read in C++ headers.
        Creating a namespace dictionary. Of course, it needs to go through every header file to find out
        what namespace it is affiliated with. Once done, we have a dictionary where the key is the namespace
        and the values are namespace class objects. These objects carry all information as extracted by parse.py.
        """
        print('reading headers from ' + self.include_root)
        for (dirpath, dirnames, filenames) in os.walk(self.include_root):
            for filename in filenames:
                if filename.endswith(
                        '.h') and not 'neural_networks' in dirpath and not any(
                            filename.endswith(x)
                            for x in cython_interface.ignore_files):
                    fname = jp(dirpath, filename)
                    #print('reading ' +  fname)
                    with open(fname, "r") as header:
                        parsed_data = parse_header(header,
                                                   cython_interface.ignores)

                    ns = cleanup_ns(fname, parsed_data['ns'])
                    # Now let's update the namespace; more than one file might constribute to the same ns
                    if ns:
                        if ns not in self.namespace_dict:
                            self.namespace_dict[ns] = namespace(ns)
                        pns = get_parent(ns)
                        if pns not in self.namespace_dict:
                            self.namespace_dict[pns] = namespace(pns)
                        if ns != 'daal':
                            self.namespace_dict[pns].children.add(ns)
                        self.namespace_dict[ns].includes = self.namespace_dict[
                            ns].includes.union(parsed_data['includes'])
                        self.namespace_dict[ns].steps = self.namespace_dict[
                            ns].steps.union(parsed_data['steps'])
                        self.namespace_dict[ns].classes.update(
                            parsed_data['classes'])
                        self.namespace_dict[ns].enums.update(
                            parsed_data['enums'])
                        self.namespace_dict[ns].typedefs.update(
                            parsed_data['typedefs'])
                        self.namespace_dict[ns].headers.append(
                            fname.replace(self.include_root, '').lstrip('/'))
                        if parsed_data['need_methods']:
                            self.namespace_dict[ns].need_methods = True
Beispiel #48
0
def test_json_strip_prefix(api_type):
    with api_select.api(__file__, api_type, login=True) as api:
        flow_name = api.flow_job()
        api.job('j1', max_fails=0, expect_invocations=1, expect_order=1)
        api.job('j2', max_fails=0, expect_invocations=1, expect_order=2)
        api.job('j3', max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j4', max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j5', max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j6', max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j7_unchecked',
                max_fails=0,
                expect_invocations=1,
                invocation_delay=0,
                exec_time=40,
                expect_order=None,
                unknown_result=True)
        api.job('j8_unchecked',
                max_fails=0,
                expect_invocations=1,
                invocation_delay=0,
                exec_time=40,
                expect_order=None,
                unknown_result=True)
        api.job('j9',
                max_fails=0,
                expect_invocations=1,
                expect_order=4,
                exec_time=5)

        ctrl1 = _flow(api, True, flow_graph_dir(flow_name))

        # Test pretty printing
        json_file = jp(flow_graph_dir(flow_name), "pretty.json")
        ctrl1.json(json_file, indent=4)
        with open(json_file) as jf:
            _assert_json(jf.read().strip(), _pretty_json, api.api_type)

        # Test default compact json
        with open(ctrl1.json_file) as jf:
            _assert_json(jf.read().strip(), _compact_json, api.api_type)

        # Test return json
        json = ctrl1.json(None)
        _assert_json(json, _compact_json, api.api_type)
Beispiel #49
0
def gen_daal4py(daalroot,
                outdir,
                version,
                warn_all=False,
                no_dist=False,
                no_stream=False):
    global no_warn
    if warn_all:
        no_warn = {}
    orig_path = jp(daalroot, 'include')
    assert os.path.isfile(jp(orig_path, 'algorithms', 'algorithm.h')) and os.path.isfile(jp(orig_path, 'algorithms', 'model.h')),\
           "Path/$DAALROOT '"+orig_path+"' doesn't seem host DAAL headers. Please provide correct daalroot."
    head_path = jp("build", "include")
    algo_path = jp(head_path, "algorithms")
    rmtree(head_path, ignore_errors=True)
    copytree(orig_path, head_path)
    for (dirpath, dirnames, filenames) in os.walk(algo_path):
        for filename in filenames:
            call("clang-format -i " + jp(dirpath, filename), shell=True)
    iface = cython_interface(algo_path)
    iface.read()
    print('Generating sources...')
    cpp_h, cpp_cpp, pyx_file = iface.hlapi(iface.version, no_dist, no_stream)

    # 'ridge_regression', parametertype is a template without any need
    with open(jp(outdir, 'daal4py_cpp.h'), 'w') as f:
        f.write(cpp_h)
    with open(jp(outdir, 'daal4py_cpp.cpp'), 'w') as f:
        f.write(cpp_cpp)

    with open(jp('src', 'gettree.pyx'), 'r') as f:
        pyx_gettree = f.read()

    with open(jp(outdir, 'daal4py_cy.pyx'), 'w') as f:
        f.write(pyx_file)
        f.write(pyx_gettree)
Beispiel #50
0
def exp2prc_df(exp_path, n_val=5, specific_ids=None):
    """Constructs pandas DataFrame with prc data from all predictions in ``exp_path``."""
    dfs = []
    for n in range(n_val):
        prc_path = jp(exp_path, f'experiment_{n}', 'test_metrics',
                      'prc_records.json')
        prc_dicts = load_json(prc_path)

        for _id in prc_dicts.keys():
            if specific_ids is None:
                [d.update({'id': _id}) for d in prc_dicts[_id]]
                dfs.append(pd.DataFrame.from_records(prc_dicts[_id]))
            else:
                if _id in specific_ids:
                    [d.update({'id': _id}) for d in prc_dicts[_id]]
                    dfs.append(pd.DataFrame.from_records(prc_dicts[_id]))

    df = pd.concat(dfs)
    return df
Beispiel #51
0
    def file_mapper(path):
        path = nc(path)
        best_candidate = None
        for p in normalized_paths:
            if path == p:
                return path

            if path.startswith(p):
                candidate = relpath(path, p)
                if not best_candidate or len(candidate) < len(best_candidate):
                    best_candidate = candidate

        if best_candidate:
            final_candidate = jp(source_path, best_candidate)
            if exists(final_candidate):
                return final_candidate
            return best_candidate
        else:
            return path
Beispiel #52
0
def test_json_no_strip_prefix(api_type):
    with api_select.api(__file__, api_type, login=True) as api:
        flow_name = api.flow_job()
        api.job('j1', max_fails=0, expect_invocations=1, expect_order=1)
        api.job('j2', max_fails=0, expect_invocations=1, expect_order=2)
        api.job('j3', max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j4', max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j5', max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j6', max_fails=0, expect_invocations=1, expect_order=3)
        api.job('j7_unchecked',
                max_fails=0,
                expect_invocations=1,
                invocation_delay=0,
                exec_time=40,
                expect_order=None,
                unknown_result=True)
        api.job('j8_unchecked',
                max_fails=0,
                expect_invocations=1,
                invocation_delay=0,
                exec_time=40,
                expect_order=None,
                unknown_result=True)
        api.job('j9',
                max_fails=0,
                expect_invocations=1,
                expect_order=4,
                exec_time=5)

        ctrl1 = _flow(api, False, flow_graph_dir(flow_name))

        # Test pretty printing with no stripping of top level prefix
        json_file = jp(flow_graph_dir(flow_name), "verbose_pretty.json")
        ctrl1.json(json_file, indent=4)
        with open(json_file) as jf:
            got_json = jf.read().strip()
            expect_json = _pretty_json.replace(
                'strip_prefix', 'no_strip_prefix').replace(
                    'name": "',
                    'name": "jenkinsflow_test__json_no_strip_prefix__')
            _assert_json(got_json, expect_json, api.api_type)
    def test_should_install_dependency_without_version(self, *_):
        dependency = Dependency("spam")

        install_dependencies(self.logger,
                             self.project,
                             dependency,
                             self.pyb_env,
                             "install_batch",
                             constraints_file_name="constraint_file")

        self.pyb_env.execute_command.assert_called_with(
            self.pyb_env.executable + PIP_MODULE_STANZA + [
                "install", "-c",
                nc(jp(self.pyb_env.env_dir, "constraint_file")), "spam"
            ],
            cwd=ANY,
            env=ANY,
            error_file_name=ANY,
            outfile_name=ANY,
            shell=False,
            no_path_search=True)
Beispiel #54
0
    def export_01(self, features: list = None):
        """
        Gives a list of point set names to be saved in sgems binary format and saves them to the result directory
        :param features: Names of features to export
        """

        if (not isinstance(features, list)) and (features is not None):
            features = [features]
        if features is None:
            features = self.dataframe.columns.values[3:]

        for pp in features:
            # Extract x, y, z, values
            subframe = self.dataframe[["x", "y", "z", pp]]
            ps_name = jp(self.res_dir, pp)  # Path of binary file
            write_point_set(ps_name, subframe)  # Write binary file
            if (
                pp not in self.parent.object_file_names
            ):  # Adding features name to load them within sgems
                self.parent.object_file_names.append(pp)
            logger.info(f"Feature {pp} exported to binary file")
Beispiel #55
0
def get_onedal_version(dal_root):
    """Parse oneDAL version strings"""

    header_version = jp(dal_root, 'include', 'services',
                        'library_version_info.h')
    version = ""

    major, minnor = "", ""
    with open(header_version, 'r') as header:
        for elem in header:
            if '#define __INTEL_DAAL__' in elem:
                match = re.match(r'#define __INTEL_DAAL__ (\d+)', elem)
                if match:
                    major = match.group(1)

            if '#define __INTEL_DAAL_MINOR__' in elem:
                match = re.match(r'#define __INTEL_DAAL_MINOR__ (\d+)', elem)
                if match:
                    minnor = match.group(1)
    version = int(major) * 10000 + int(minnor) * 100
    return version
Beispiel #56
0
def convert_folder(log_dir):
    """
    Function to convert folder with log files into one `pd.DataFrame`
    _ _ _ _ _ _ 
    Parameters:
    `log_dir` - path to the directory with log files extracted, str
   
    _ _ _ _ _ _ 
    Output:
    `data` - converted dataframe, `pd.DataFrame`
    
    _ _ _ _ _ _
    Note: check the `*.log` file with possibly incorrected files
    
    """
    for ix, file_path in tqdm(enumerate(os.listdir(log_dir))):
        df = log_to_dataframe(jp(log_dir, file_path))
        if ix == 0:
            data = df
        else:
            data = pd.concat([data, df], axis=0, ignore_index=True)

    return data
def deform(input_file, output_file, ref_file):
    """This function finds a deformable transformation using dramms. It calls
    the dramms program through the commmand line.

    Parameters:
    input_file (str): absolute path of the MRI file we want to register. It
                      must have been registered firstly using an affine or
                      rigid transformation.
    output_file (str): absolute path of the registered MRI file name. In this
                       path the transformation file is also saved with file
                       extension .nii.gz.
    ref_file (str): absolute path of the MRI file used as reference for the
                    registration (atlas space.)

    Returns:
    -----------
    """
    out_fol = dn(output_file)
    output_def = jp(out_fol, 'def_transf.nii.gz')

    command = abs_dramms + '/dramms -S ' + input_file + ' -T ' + ref_file + \
        ' -O ' + output_file + ' -D ' + output_def + ' -w 1 -a 0 -v -v'
    os.system(command)
Beispiel #58
0
    def flow_job(self, name=None, params=None):
        """
        Creates a flow job
        For running demo/test flow script as jenkins job
        Requires jenkinsflow to be copied to 'pseudo_install_dir' and all jobs to be loaded beforehand (e.g. test.py has been run)
        Returns job name
        """
        name = '0flow_' + name if name else '0flow'
        job_name = (self.job_name_prefix or '') + name
        # TODO Handle script api
        if self.api_type == ApiType.SCRIPT:
            return job_name

        #  Note: Use -B to avoid permission problems with .pyc files created from commandline test
        if self.func_name:
            script = "export PYTHONPATH=" + test_tmp_dir + "\n"
            script += test_cfg.skip_job_load_sh_export_str() + "\n"
            # script += "export " + ApiType.JENKINS.env_name() + "=true\n"  # pylint: disable=no-member
            # Supply dummy args for the py.test fixtures
            dummy_args = ','.join(['0' for _ in range(self.func_num_params)])
            script += sys.executable + " -Bc &quot;import sys; from jenkinsflow.test." + self.file_name.replace(
                '.py', ''
            ) + " import *; sys.exit(test_" + self.func_name + "(" + dummy_args + "))&quot;"
        else:
            script = sys.executable + " -B " + jp(pseudo_install_dir, 'demo',
                                                  self.file_name)
        self._jenkins_job(job_name,
                          exec_time=0.5,
                          params=params,
                          script=script,
                          print_env=False,
                          create_job=None,
                          always_load=False,
                          num_builds_to_keep=4,
                          final_result_use_cli=False,
                          set_build_descriptions=())
        return job_name
Beispiel #59
0
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

import sys
from os.path import dirname, join as jp, normcase as nc

# This is only necessary in PyBuilder sources for bootstrap
build_sources = nc(jp(dirname(__file__), "src/main/python"))
if build_sources not in sys.path:
    sys.path.insert(0, build_sources)

from pybuilder import bootstrap
from pybuilder.core import Author, init, use_plugin

bootstrap()

use_plugin("python.core")
use_plugin("python.pytddmon")
use_plugin("python.distutils")
use_plugin("python.install_dependencies")

use_plugin("copy_resources")
use_plugin("filter_resources")
import pandas as pd
from tqdm import tqdm
import os
from os.path import join as jp
from shutil import copy

sub1 = pd.read_csv('submission_098_leftloud_tta_all_labels.csv')  # 87% PLB
sub2 = pd.read_csv('submission_096_leftloud_tta_all_labels.csv')  # 87% PLB
sub3 = pd.read_csv('submission_091_leftloud_tta_all_labels.csv')  # 88% PLB

consistend = ((sub1.label == sub2.label) & (sub1.label == sub3.label))
print("All: ", sub1.shape[0], " consistend: ", consistend.sum())

for i in tqdm(range(sub1.shape[0])):
    fn = sub1.loc[i, 'fname']
    if fn != sub2.loc[i, 'fname'] or fn != sub3.loc[i, 'fname']:
        print("Fatal error")
        break

    if consistend[i]:
        label = sub1.loc[i, 'label']
        dst_dir = jp('data', 'pseudo', 'audio', label)
        if not os.path.exists(dst_dir):
            os.makedirs(dst_dir)
        dst_fn = jp(dst_dir, fn)
        src_fn = jp('data', 'test', 'audio', fn)
        copy(src_fn, dst_fn, follow_symlinks=False)