Пример #1
0
def _copy_contents(dst_dir, contents):
    items = {"dirs": set(), "files": set()}

    for path in contents:
        if isdir(path):
            items['dirs'].add(path)
        elif isfile(path):
            items['files'].add(path)

    dst_dir_name = basename(dst_dir)

    if dst_dir_name == "src" and len(items['dirs']) == 1:
        copytree(list(items['dirs']).pop(), dst_dir, symlinks=True)
    else:
        makedirs(dst_dir)
        for d in items['dirs']:
            copytree(d, join(dst_dir, basename(d)), symlinks=True)

    if not items['files']:
        return

    if dst_dir_name == "lib":
        dst_dir = join(dst_dir, mkdtemp(dir=dst_dir))

    for f in items['files']:
        copyfile(f, join(dst_dir, basename(f)))
Пример #2
0
    def _setup_region_dict(self, name, reg, outsubdir, systematic, ntuple,
                           basedir):
        regdic = reg.get_config_dict()
        modedir = regdic['hists'].lower()
        regdic['name'] = name
        if outsubdir:
            systdir = outsubdir
        else:
            if systematic == 'NONE':
                systdir = 'baseline'
            else:
                systdir = systematic.lower()
        full_out_dir = join(basedir, modedir, systdir)
        if not isdir(full_out_dir):
            if self.make_dirs:
                make_dir_if_none(full_out_dir)
            else:
                raise IOError(99,"no dir",full_out_dir)
        histname = '{}.h5'.format(basename(splitext(ntuple)[0]))
        full_out_path = join(full_out_dir, histname)
        if isfile(full_out_path):
            if self.rerun:
                os.remove(full_out_path)
            else:
                return None
        regdic['output_name'] = full_out_path
        regdic['systematic'] = systematic

        return regdic
Пример #3
0
    def handle_finish(self):
        self.info(bold('dumping search index... '), nonl=True)
        self.indexer.prune(self.env.all_docs)
        searchindexfn = path.join(self.outdir, self.searchindex_filename)
        # first write to a temporary file, so that if dumping fails,
        # the existing index won't be overwritten
        f = open(searchindexfn + '.tmp', 'wb')
        try:
            self.indexer.dump(f, self.indexer_format)
        finally:
            f.close()
        movefile(searchindexfn + '.tmp', searchindexfn)
        self.info('done')

        self.info(bold('dumping object inventory... '), nonl=True)
        f = open(path.join(self.outdir, INVENTORY_FILENAME), 'w')
        try:
            f.write('# Sphinx inventory version 1\n')
            f.write('# Project: %s\n' % self.config.project.encode('utf-8'))
            f.write('# Version: %s\n' % self.config.version)
            for modname, info in self.env.modules.iteritems():
                f.write('%s mod %s\n' % (modname, self.get_target_uri(info[0])))
            for refname, (docname, desctype) in self.env.descrefs.iteritems():
                f.write('%s %s %s\n' % (refname, desctype,
                                        self.get_target_uri(docname)))
        finally:
            f.close()
        self.info('done')
Пример #4
0
    def build_geckolib(self, jobs=None, verbose=False, release=False):
        self.set_use_stable_rust()
        self.ensure_bootstrapped()

        ret = None
        opts = []
        if jobs is not None:
            opts += ["-j", jobs]
        if verbose:
            opts += ["-v"]
        if release:
            opts += ["--release"]

        env = self.build_env(is_build=True)
        env["CARGO_TARGET_DIR"] = path.join(self.context.topdir, "target", "geckolib").encode("UTF-8")

        build_start = time()
        with cd(path.join("ports", "geckolib")):
            ret = call(["cargo", "build"] + opts, env=env, verbose=verbose)
        elapsed = time() - build_start

        # Generate Desktop Notification if elapsed-time > some threshold value
        notify_build_done(elapsed)

        print("GeckoLib build completed in %s" % format_duration(elapsed))

        return ret
Пример #5
0
    def handle_page(self, pagename, ctx, templatename='page.html',
                    outfilename=None, event_arg=None):
        ctx['current_page_name'] = pagename
        sidebarfile = self.config.html_sidebars.get(pagename)
        if sidebarfile:
            ctx['customsidebar'] = sidebarfile

        if not outfilename:
            outfilename = path.join(self.outdir,
                                    os_path(pagename) + self.out_suffix)

        self.app.emit('html-page-context', pagename, templatename,
                      ctx, event_arg)

        ensuredir(path.dirname(outfilename))
        f = open(outfilename, 'wb')
        try:
            self.implementation.dump(ctx, f, 2)
        finally:
            f.close()

        # if there is a source file, copy the source file for the
        # "show source" link
        if ctx.get('sourcename'):
            source_name = path.join(self.outdir, '_sources',
                                    os_path(ctx['sourcename']))
            ensuredir(path.dirname(source_name))
            copyfile(self.env.doc2path(pagename), source_name)
Пример #6
0
    def _preprocess(self, library_name, file_name, preprocessors):
        """
        Preprocess file_name within library_name using explicit preprocessors
        if preprocessors is None then use implicit globally defined processors
        """
        # @TODO dependency checking etc...

        if preprocessors is None:
            preprocessors = [self._location_preprocessor, self._check_preprocessor]
            preprocessors = [p for p in preprocessors if p is not None]
            preprocessors = self._external_preprocessors + preprocessors

        if len(preprocessors) == 0:
            return file_name

        code = ostools.read_file(file_name)
        for preprocessor in preprocessors:
            code = preprocessor.run(code, basename(file_name))

        pp_file_name = join(self._preprocessed_path, library_name, basename(file_name))

        idx = 1
        while ostools.file_exists(pp_file_name):
            LOGGER.debug("Preprocessed file exists '%s', adding prefix", pp_file_name)
            pp_file_name = join(self._preprocessed_path,
                                library_name, "%i_%s" % (idx, basename(file_name)))
            idx += 1

        ostools.write_file(pp_file_name, code)
        return pp_file_name
Пример #7
0
def module_names(path, recursive=False):
    """
    Return a list of modules which can be imported from *path*.

    :arg path: a directory to scan.
    :type path: string
    :arg recursive: Also return submodule names for packages.
    :type recursive: bool
    :return: a list of string pairs (module_name, module_file).
    :rtype: list
    """

    from os.path import join, isfile

    modules = []

    for filename in sorted(_os.listdir(path)):
        if filename == "modules":
            pass  # XXX, hard coded exception.
        elif filename.endswith(".py") and filename != "__init__.py":
            fullpath = join(path, filename)
            modules.append((filename[0:-3], fullpath))
        elif "." not in filename:
            directory = join(path, filename)
            fullpath = join(directory, "__init__.py")
            if isfile(fullpath):
                modules.append((filename, fullpath))
                if recursive:
                    for mod_name, mod_path in module_names(directory, True):
                        modules.append(("%s.%s" % (filename, mod_name),
                                        mod_path,
                                        ))

    return modules
Пример #8
0
def cat_counter_references(counter_references=None, target_dir=curdir,
                           path_to_bowtie2='bowtie2',
                           logger=None, **kwargs):
    if counter_references is None:
        return
    try:
        makedirs(target_dir, mode=0755)
    except OSError:
        pass
    debug('Validating counter-references and building counter-reference index')
    valid_references = validate_references(references=counter_references,
                                           target_dir=target_dir,
                                           path_to_bowtie2=path_to_bowtie2,
                                           logger=logger,
                                           environ_key=
                                           'SOT_DEFAULT_COUNTER_REFERENCES')
    crefs_fa = open(join(target_dir, 'counter_references.fa'), 'w')
    for ref in valid_references:
        Popen([path_to_bowtie2 + '-inspect', ref], stdout=crefs_fa).wait()
    crefs_index = join(target_dir, counter_references)
    args = [path_to_bowtie2 + '-build', crefs_fa, crefs_index]
    P = Popen(args, stderr=PIPE)
    stderr = P.communicate()[1]
    if stderr.startswith('Error'):
        critical(stderr)
        critical('No counter-references will be used.')
    return crefs_index
Пример #9
0
def walk_prefix(prefix, ignore_predefined_files=True, windows_forward_slashes=True):
    """
    Return the set of all files in a given prefix directory.
    """
    res = set()
    prefix = abspath(prefix)
    ignore = {'pkgs', 'envs', 'conda-bld', 'conda-meta', '.conda_lock',
              'users', 'LICENSE.txt', 'info', 'conda-recipes', '.index',
              '.unionfs', '.nonadmin'}
    binignore = {'conda', 'activate', 'deactivate'}
    if sys.platform == 'darwin':
        ignore.update({'python.app', 'Launcher.app'})
    for fn in os.listdir(prefix):
        if ignore_predefined_files and fn in ignore:
            continue
        if isfile(join(prefix, fn)):
            res.add(fn)
            continue
        for root, dirs, files in os.walk(join(prefix, fn)):
            should_ignore = ignore_predefined_files and root == join(prefix, 'bin')
            for fn2 in files:
                if should_ignore and fn2 in binignore:
                    continue
                res.add(relpath(join(root, fn2), prefix))
            for dn in dirs:
                path = join(root, dn)
                if islink(path):
                    res.add(relpath(path, prefix))

    if on_win and windows_forward_slashes:
        return {path.replace('\\', '/') for path in res}
    else:
        return res
Пример #10
0
    def getdeps(self):
        """Download and Extract Sources"""
        for source in self.sources:
            self.log.info("")
            self.log.info("#####################################################")

            # Skip anything already extracted
            extractdir = abspath(join(DepSource.RootExtractDir, source.destsubdir))
            if exists(extractdir):
                self.log.warn("Deps Subdir: " + source.destsubdir + " already exists, skipping")
                continue

            extracted = False
            downloaded = source.download()
            if downloaded == False:
                self.log.error("Download Failed")
            else:
                extracted = source.extract()

            # Remove the archive file
            if source.destsubdir != "atmel-asf":
                source.remove_archivefile()

        # Re-jig the directories for those that need it
        for source in self.sources:
            source.movetoparent_multiple()
        return

        # Check for ASF Sources
        if not exists(join(self.DepsDirectory, "atmel-asf")):
            self.log.warn("There was no Atmel ASF Archive file found")
            self.log.warn("asf is not required but you can manually download the below file for the Atmel Source")
            self.log.warn("http://www.atmel.com/tools/avrsoftwareframework.aspx?tab=overview")
            self.log.warn("So far this is only used for porting mbed to sam based mcu's")
        return
Пример #11
0
def find_bowtie2_index(r, path_to_bowtie2='bowtie2'):
    """check for bowtie2 index as given.
    return True if found, else return False
    """
    args = [path_to_bowtie2 + '-inspect', '-v', '-s', r]
    debug(' '.join(args))
    P = Popen(args, stdout=open(devnull, 'w'), stderr=PIPE, cwd=mkdtemp())
    stderr = P.communicate()[1].splitlines()
    if not stderr[0].startswith('Could not locate'):
        for line in stderr:
            if line.startswith('Opening'):
                index_bt2 = line[(1 + line.find('"')):line.rfind('"')]
                index_basename = index_bt2[0:index_bt2.find('.1.bt2')]
                return index_basename
    for d in [getcwd(), os.path.split(path_to_bowtie2)[0],
              join(os.path.split(path_to_bowtie2)[0], 'indexes')]:
        rprime = join(d, r)
        args = [path_to_bowtie2 + '-inspect', '-v', '-s', rprime]
        debug(' '.join(args))
        P = Popen(args, stdout=open(devnull, 'w'), stderr=PIPE, cwd=mkdtemp())
        stderr = P.communicate()[1].splitlines()
        if not stderr[0].startswith('Could not locate'):
            for line in stderr:
                if line.startswith('Opening'):
                    index_bt2 = line[(1 + line.find('"')):line.rfind('"')]
                    index_basename = index_bt2[0:index_bt2.find('.1.bt2')]
                    return index_basename
    return None
Пример #12
0
def main_menu():
    global screen

    menu_song = pygame.mixer.music.load(path.join(sound_folder, "menu.ogg"))
    pygame.mixer.music.play(-1)

    title = pygame.image.load(path.join(img_dir, "main.png")).convert()
    title = pygame.transform.scale(title, (WIDTH, HEIGHT), screen)

    screen.blit(title, (0,0))
    pygame.display.update()

    while True:
        ev = pygame.event.poll()
        if ev.type == pygame.KEYDOWN:
            if ev.key == pygame.K_RETURN:
                break
            elif ev.key == pygame.K_q:
                pygame.quit()
                quit()
        else:
            draw_text(screen, "Press [ENTER] To Begin", 30, WIDTH/2, HEIGHT/2)
            draw_text(screen, "or [Q] To Quit", 30, WIDTH/2, (HEIGHT/2)+40)
            pygame.display.update()

    #pygame.mixer.music.stop()
    ready = pygame.mixer.Sound(path.join(sound_folder,'getready.ogg'))
    ready.play()
    screen.fill(BLACK)
    draw_text(screen, "GET READY!", 40, WIDTH/2, HEIGHT/2)
    pygame.display.update()
Пример #13
0
def compile(args):
    """
    %prog compile directory

    Extract telomere length and ccn.
    """
    p = OptionParser(compile.__doc__)
    p.set_outfile(outfile="age.tsv")
    opts, args = p.parse_args(args)

    if len(args) < 1:
        sys.exit(not p.print_help())

    dfs = []
    for folder in args:
        ofolder = os.listdir(folder)

        # telomeres
        subdir = [x for x in ofolder if x.startswith("telomeres")][0]
        subdir = op.join(folder, subdir)
        filename = op.join(subdir, "tel_lengths.txt")
        df = pd.read_csv(filename, sep="\t")
        d1 = df.ix[0].to_dict()

        # ccn
        subdir = [x for x in ofolder if x.startswith("ccn")][0]
        subdir = op.join(folder, subdir)
        filename = iglob(subdir, "*.ccn.json")[0]
        js = json.load(open(filename))
        d1.update(js)
        df = pd.DataFrame(d1, index=[0])
        dfs.append(df)

    df = pd.concat(dfs, ignore_index=True)
    df.to_csv(opts.outfile, sep="\t", index=False)
Пример #14
0
  def test_upload_cleanup(self):
    dpath = mkdtemp()
    _write = self.client.write

    def write(hdfs_path, *args, **kwargs):
      if 'bar' in hdfs_path:
        raise RuntimeError()
      return _write(hdfs_path, *args, **kwargs)

    try:
      self.client.write = write
      npath = osp.join(dpath, 'hi')
      os.mkdir(npath)
      with open(osp.join(npath, 'foo'), 'w') as writer:
        writer.write('hello!')
      os.mkdir(osp.join(npath, 'bar'))
      with open(osp.join(npath, 'bar', 'baz'), 'w') as writer:
        writer.write('world!')
      try:
        self.client.upload('foo', dpath)
      except RuntimeError:
        ok_(not self._exists('foo'))
      else:
        ok_(False) # This shouldn't happen.
    finally:
      rmtree(dpath)
      self.client.write = _write
Пример #15
0
  def test_upload_with_progress(self):

    def callback(path, nbytes, history=defaultdict(list)):
      history[path].append(nbytes)
      return history

    dpath = mkdtemp()
    try:
      path1 = osp.join(dpath, 'foo')
      with open(path1, 'w') as writer:
        writer.write('hello!')
      os.mkdir(osp.join(dpath, 'bar'))
      path2 = osp.join(dpath, 'bar', 'baz')
      with open(path2, 'w') as writer:
        writer.write('the world!')
      self.client.upload(
        'up',
        dpath,
        chunk_size=4,
        n_threads=1, # Callback isn't thread-safe.
        progress=callback
      )
      eq_(self._read('up/foo'), b'hello!')
      eq_(self._read('up/bar/baz'), b'the world!')
      eq_(
        callback('', 0),
        {path1: [4, 6, -1], path2: [4, 8, 10, -1], '': [0]}
      )
    finally:
      rmtree(dpath)
Пример #16
0
def rpt_compare(args):
    '''
    Analyze package test results by comparing test output with GnuR output.
    Uses either a specific directory, i.e. the 'test' subdirectory of the --testdir argument
    or (default) the latest downloaded results from the --logdir directory
    Return 0 if passed, non-zero if failed
    '''
    parser = ArgumentParser(prog='mx rpt-compare')
    _add_common_args(parser)
    parser.add_argument('--testdir', action='store', help='specific dir containing fastr results')
    parser.add_argument('--pkg', action='store', help='pkg to compare')
    parser.add_argument('--diff', action='store_true', help='execute given diff program on differing outputs')
    parser.add_argument('--difftool', action='store', help='diff tool', default='diff')
    _add_pattern_arg(parser)
    args = _check_verbose(parser.parse_args(args))

    if args.pkg:
        # backwards compatibility
        args.pattern = args.pkg

    gnur = _gather_test_outputs(join(os.getcwd(), "test_gnur"))

    if args.testdir:
        fastr = _gather_test_outputs(join(args.testdir, "test"))
    else:
        fastr = _get_test_outputs(_gather_all_test_outputs(args.logdir))

    rdict = _rpt_compare_pkgs(gnur, fastr, args.verbose, args.pattern, args.diff, args.difftool)
    for _, rc in rdict.iteritems():
        if rc == 1:
            return 1
    return 0
Пример #17
0
def test_fetch_file(url, tmpdir):
    """Test URL retrieval."""
    tempdir = str(tmpdir)
    archive_name = op.join(tempdir, "download_test")
    with catch_logging() as log:
        _fetch_file(url, archive_name, timeout=30., verbose='debug')
    log = log.getvalue()
    assert 'Resuming at' not in log
    with open(archive_name, 'rb') as fid:
        data = fid.read()
    stop = len(data) // 2
    assert 0 < stop < len(data)
    with open(archive_name + '.part', 'wb') as fid:
        fid.write(data[:stop])
    with catch_logging() as log:
        _fetch_file(url, archive_name, timeout=30., verbose='debug')
    log = log.getvalue()
    assert 'Resuming at %s' % stop in log
    with pytest.raises(Exception, match='unknown url type'):
        _fetch_file('NOT_AN_ADDRESS', op.join(tempdir, 'test'), verbose=False)
    resume_name = op.join(tempdir, "download_resume")
    # touch file
    with open(resume_name + '.part', 'w'):
        os.utime(resume_name + '.part', None)
    _fetch_file(url, resume_name, resume=True, timeout=30.,
                verbose=False)
    with pytest.raises(ValueError, match='Bad hash value'):
        _fetch_file(url, archive_name, hash_='a', verbose=False)
    with pytest.raises(RuntimeError, match='Hash mismatch'):
        _fetch_file(url, archive_name, hash_='a' * 32, verbose=False)
Пример #18
0
def setMaster():
    if exists('/etc/hosts0'):
        print 'etc/hosts0 exists'
    else:
        sudo('cp /etc/hosts /etc/hosts0')

    sudo('rm /etc/hosts')
    sudo('cp /etc/hosts0 /etc/hosts')
    put('hosts')
    sudo('cat hosts|sudo tee -a /etc/hosts')
    run('rm hosts')

    run('cat /etc/hosts')

    path1 = '/home/{0}'.format(parm['USER'])
    rsync_project(path1, exclude=['result'])

    path2 = join(path1, basename(realpath('.')))
    path3 = join(path2, parm['programdir'])
    for dst in (path2, path3):
        fi = '{0}/{1}'.format(dst, parm['keyfile'])
        if not exists(fi, use_sudo=True):
            put(parm['keyfile'], dst)
            sudo('chmod 400 {0}'.format(fi))
    execute('genkey')
Пример #19
0
def rpt_list_testdates(args):
    parser = ArgumentParser(prog='mx rpt-list-testdates')
    _add_common_args(parser)
    parser.add_argument('--printdir', action='store_true', help='print directory containing tests')
    _add_pattern_arg(parser)
    args = _check_verbose(parser.parse_args(args))
    fastr = dict()
    local_dirs = get_local_dirs(args.logdir)
    for local_dir in local_dirs:
        resultInfo = ResultInfo(local_dir)
        result_outputs = _gather_test_outputs(join(args.logdir, local_dir, "test"))
        for pkg, _ in result_outputs.iteritems():
            if re.search(args.pattern, pkg) is None:
                continue
            if not fastr.has_key(pkg):
                testdates = []
                fastr[pkg] = testdates
            else:
                testdates = fastr[pkg]
            testdates.append(resultInfo)

    for pkg, testdates in fastr.iteritems():
        sortedList = sorted(testdates, reverse=True)
        print pkg
        for resultInfo in sortedList:
            if args.printdir:
                print '  ' + join(args.logdir, resultInfo.localdir)
            else:
                print '  ' + str(resultInfo.date)
Пример #20
0
    def testNoStartDateIfLastLogLineIsDeletedIds(self):
        f = open(join(self.tempdir, 'repository.stats'), 'w')
        f.write('''Started: 2005-01-03 16:10:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-03 16:11:45, ResumptionToken:
Started: 2005-01-04 16:12:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-04 16:13:45, ResumptionToken: ^^^oai_dc^45231
Started: 2005-01-06 16:16:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-06 16:17:45, ResumptionToken:
Started: 2005-01-07 16:18:56, Harvested/Uploaded/Deleted/Total: 0/0/0/0, Done: Deleted all ids
''')
        f.close()

        s = State(self.tempdir, 'repository')
        self.assertEquals(None, s.from_)
        self.assertEquals(None, s.token)

        # and now with 'ids' misspelled as used to be the case
        f = open(join(self.tempdir, 'repository.stats'), 'w')
        f.write('''Started: 2005-01-03 16:10:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-03 16:11:45, ResumptionToken:
Started: 2005-01-04 16:12:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-04 16:13:45, ResumptionToken: ^^^oai_dc^45231
Started: 2005-01-06 16:16:56, Harvested/Uploaded/Deleted/Total: 1/2/3/4, Done: 2005-01-06 16:17:45, ResumptionToken:
Started: 2005-01-07 16:18:56, Harvested/Uploaded/Deleted/Total: 0/0/0/0, Done: Deleted all id's
''')
        f.close()

        s = State(self.tempdir, 'repository')
        self.assertEquals(None, s.from_)
        self.assertEquals(None, s.token)
Пример #21
0
def test_io_inverse_operator():
    """Test IO of inverse_operator
    """
    tempdir = _TempDir()
    inverse_operator = read_inverse_operator(fname_inv)
    x = repr(inverse_operator)
    assert_true(x)
    assert_true(isinstance(inverse_operator['noise_cov'], Covariance))
    # just do one example for .gz, as it should generalize
    _compare_io(inverse_operator, '.gz')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        inv_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_inverse_operator(inv_badname, inverse_operator)
        read_inverse_operator(inv_badname)
    assert_naming(w, 'test_inverse.py', 2)

    # make sure we can write and read
    inv_fname = op.join(tempdir, 'test-inv.fif')
    args = (10, 1. / 9., 'dSPM')
    inv_prep = prepare_inverse_operator(inverse_operator, *args)
    write_inverse_operator(inv_fname, inv_prep)
    inv_read = read_inverse_operator(inv_fname)
    _compare(inverse_operator, inv_read)
    inv_read_prep = prepare_inverse_operator(inv_read, *args)
    _compare(inv_prep, inv_read_prep)
    inv_prep_prep = prepare_inverse_operator(inv_prep, *args)
    _compare(inv_prep, inv_prep_prep)
Пример #22
0
def _check_line_endings():
    """Check all files in the repository for CR characters"""
    if sys.platform == 'win32':
        print('Skipping line endings check on Windows')
        sys.stdout.flush()
        return
    print('Running line endings check... ')
    sys.stdout.flush()
    report = []
    root_dir, dev = _get_root_dir()
    if not dev:
        root_dir = op.join(root_dir, 'vispy')
    for dirpath, dirnames, filenames in os.walk(root_dir):
        for fname in filenames:
            if op.splitext(fname)[1] in ('.pyc', '.pyo', '.so', '.dll'):
                continue
            # Get filename
            filename = op.join(dirpath, fname)
            relfilename = op.relpath(filename, root_dir)
            # Open and check
            try:
                text = open(filename, 'rb').read().decode('utf-8')
            except UnicodeDecodeError:
                continue  # Probably a binary file
            crcount = text.count('\r')
            if crcount:
                lfcount = text.count('\n')
                report.append('In %s found %i/%i CR/LF' %
                              (relfilename, crcount, lfcount))

    # Process result
    if len(report) > 0:
        raise RuntimeError('Found %s files with incorrect endings:\n%s'
                           % (len(report), '\n'.join(report)))
Пример #23
0
def get_soup(*path):
	from bs4 import BeautifulSoup
	assert exists(join(*path))
	index_html = None
	with open(join(*path),"rb") as f:
	    index_html = f.read().decode("utf-8")
	return BeautifulSoup(index_html,"html5lib")
Пример #24
0
def snapshot(source, destination, name=None):
    """Snapshot one directory to another. Specify names to snapshot small, named differences."""
    source = source + sep
    destination = destination + sep

    if not path.isdir(source):
        raise RuntimeError("source is not a directory")

    if path.exists(destination):
        if not path.isdir(destination):
            raise RuntimeError("destination is not a directory")

        if name is None:
            raise RuntimeError("can't snapshot base snapshot if destination exists")

    snapdir = path.join(destination, ".snapdir")
    
    if path.exists(path.join(source, ".snapdir")):
        raise RuntimeError("snapdir exists in source directory")

    if name is None:
        check_call(["rsync", "--del", "-av", source, destination])
        makedirs(snapdir)
    else:
        if not path.exists(snapdir):
            raise RuntimeError("No snapdir in destination directory")

        check_call(["rsync", "--del", "-av", "--only-write-batch={}".format(path.join(snapdir, name)), source, destination])
Пример #25
0
def test_populate_areas():
	from webpages.populate import populate, save_expander
	
	populate(save_expander,prep_site_config("w9",**{"browser": "desktop"}))
	assert exists(join(pages_test_root,"output","index.html"))

	soup = get_soup(join(pages_test_root,"output","index.html"))
	a1 = soup.find(id="a1")
	a2 = soup.find(id="a2")
	config = eval_config_script(soup("script")[2].string)

	# The elements with parts content and matter
	assert a1.contents[0].strip() == "top bit"
	assert a1.contents[1].string.strip() == "section one"
	assert a1.contents[3].string.strip() == "section two"
	assert a1["class"].split() == [u"splash-area-inactive", u"upper-area-inactive", u"lower-area-inactive"]
	assert soup.find("section",id="s1")["class"].split() == [u"in-splash-area", u"in-splash-order-0", u"in-upper-area", u"in-upper-order-0", u"in-upper-order-last"]
	assert soup.find("section",id="s1")["role"] == "deck"
	assert soup.find("section",id="s2")["class"].split() == [u"in-splash-area", u"in-splash-order-1", u"in-lower-area", u"in-lower-order-0", u"in-lower-order-last", u"in-splash-order-last"]

	assert config["a1"] == {"area-names": ["splash","upper", "lower"], "charset": "utf-8", "layouter": "area-stage"}
	assert config["s2"] == {"area-names": ["splash","lower"], "charset": "utf-8", "laidout": "area-member"}
	assert config["s1"] == {"area-names": ["splash","upper"], "charset": "utf-8", "laidout": "area-member"}

	# The elements without parts, simply inline in HTML
	assert soup.find(id="a2")

	assert config["s4"] == {"area-names": ["second"], "laidout": "area-member"}
	assert config["s3"] == {"area-names": ["first"], "laidout": "area-member"}
	assert config["a2"] == {"area-names": ["first", "second"], "layouter": "area-stage"}
Пример #26
0
def get_conf_path(filename=None):
    """Return absolute path for configuration file with specified filename"""
    # Define conf_dir
    if PYTEST:
        import py
        from _pytest.tmpdir import get_user
        conf_dir = osp.join(str(py.path.local.get_temproot()),
                            'pytest-of-{}'.format(get_user()),
                            SUBFOLDER)
    elif sys.platform.startswith('linux'):
        # This makes us follow the XDG standard to save our settings
        # on Linux, as it was requested on Issue 2629
        xdg_config_home = os.environ.get('XDG_CONFIG_HOME', '')
        if not xdg_config_home:
            xdg_config_home = osp.join(get_home_dir(), '.config')
        if not osp.isdir(xdg_config_home):
            os.makedirs(xdg_config_home)
        conf_dir = osp.join(xdg_config_home, SUBFOLDER)
    else:
        conf_dir = osp.join(get_home_dir(), SUBFOLDER)

    # Create conf_dir
    if not osp.isdir(conf_dir):
        if PYTEST:
            os.makedirs(conf_dir)
        else:
            os.mkdir(conf_dir)
    if filename is None:
        return conf_dir
    else:
        return osp.join(conf_dir, filename)
Пример #27
0
def setup_python3():
    # Taken from "distribute" setup.py
    from distutils.filelist import FileList
    from distutils import dir_util, file_util, util, log
    from os.path import join

    tmp_src = join("build", "src")
    log.set_verbosity(1)
    fl = FileList()
    for line in open("MANIFEST.in"):
        if not line.strip():
            continue
        fl.process_template_line(line)
    dir_util.create_tree(tmp_src, fl.files)
    outfiles_2to3 = []
    for f in fl.files:
        outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
        if copied and outf.endswith(".py"):
            outfiles_2to3.append(outf)

    util.run_2to3(outfiles_2to3)

    # arrange setup to use the copy
    sys.path.insert(0, tmp_src)

    return tmp_src
Пример #28
0
def make_image_path(outdoc, src):
    filename = outdoc.name
    basedir = join(dirname(filename), dirname(src))
    if not exists(basedir):
        makedirs(basedir)
    path = join(dirname(filename), src)
    return path
Пример #29
0
def test_source_space_from_label():
    """Test generating a source space from volume label."""
    tempdir = _TempDir()
    aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
    label_names = get_volume_labels_from_aseg(aseg_fname)
    volume_label = label_names[int(np.random.rand() * len(label_names))]

    # Test pos as dict
    pos = dict()
    pytest.raises(ValueError, setup_volume_source_space, 'sample', pos=pos,
                  volume_label=volume_label, mri=aseg_fname)

    # Test no mri provided
    pytest.raises(RuntimeError, setup_volume_source_space, 'sample', mri=None,
                  volume_label=volume_label)

    # Test invalid volume label
    pytest.raises(ValueError, setup_volume_source_space, 'sample',
                  volume_label='Hello World!', mri=aseg_fname)

    src = setup_volume_source_space('sample', subjects_dir=subjects_dir,
                                    volume_label=volume_label, mri=aseg_fname,
                                    add_interpolator=False)
    assert_equal(volume_label, src[0]['seg_name'])

    # test reading and writing
    out_name = op.join(tempdir, 'temp-src.fif')
    write_source_spaces(out_name, src)
    src_from_file = read_source_spaces(out_name)
    _compare_source_spaces(src, src_from_file, mode='approx')
    def handle(self, **options):

        for party_slug in sorted(os.listdir(ppc_data_directory)):
            json_directory = join(
                ppc_data_directory,
                party_slug
            )
            for leafname in sorted(os.listdir(json_directory)):
                if not leafname.endswith('.json'):
                    continue
                filename = join(json_directory, leafname)
                image = re.sub(r'\.json$', '-cropped.png', filename)
                if not exists(image):
                    image = None
                print '==============================================================='
                print "filename:", filename
                with open(filename) as f:
                    ppc_data = json.load(f)
                ppc_data['party_slug'] = party_slug
                ppc_data['party_object'] = party_slug_to_popit_party[party_slug]
                ppc_data['constituency_object'] = get_constituency_from_name(
                    ppc_data['constituency']
                )
                if options['check']:
                    continue
                self.handle_person(ppc_data, image)
Пример #31
0
    def _wait_until_wake_word(self, source, sec_per_buffer):
        """Listen continuously on source until a wake word is spoken

        Args:
            source (AudioSource):  Source producing the audio chunks
            sec_per_buffer (float):  Fractional number of seconds in each chunk
        """
        num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
                               source.SAMPLE_WIDTH)

        silence = get_silence(num_silent_bytes)

        # bytearray to store audio in
        byte_data = silence

        buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
        buffers_since_check = 0.0

        # Max bytes for byte_data before audio is removed from the front
        max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)
        test_size = self.sec_to_bytes(self.TEST_WW_SEC, source)

        said_wake_word = False

        # Rolling buffer to track the audio energy (loudness) heard on
        # the source recently.  An average audio energy is maintained
        # based on these levels.
        energies = []
        idx_energy = 0
        avg_energy = 0.0
        energy_avg_samples = int(5 / sec_per_buffer)  # avg over last 5 secs
        counter = 0

        while not said_wake_word and not self._stop_signaled:
            if self._skip_wake_word():
                break
            chunk = self.record_sound_chunk(source)

            energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
            if energy < self.energy_threshold * self.multiplier:
                self._adjust_threshold(energy, sec_per_buffer)

            if len(energies) < energy_avg_samples:
                # build the average
                energies.append(energy)
                avg_energy += float(energy) / energy_avg_samples
            else:
                # maintain the running average and rolling buffer
                avg_energy -= float(energies[idx_energy]) / energy_avg_samples
                avg_energy += float(energy) / energy_avg_samples
                energies[idx_energy] = energy
                idx_energy = (idx_energy + 1) % energy_avg_samples

                # maintain the threshold using average
                if energy < avg_energy * 1.5:
                    if energy > self.energy_threshold:
                        # bump the threshold to just above this value
                        self.energy_threshold = energy * 1.2

            # Periodically output energy level stats.  This can be used to
            # visualize the microphone input, e.g. a needle on a meter.
            if counter % 3:
                with open(self.mic_level_file, 'w') as f:
                    f.write("Energy:  cur=" + str(energy) + " thresh=" +
                            str(self.energy_threshold))
                f.close()
            counter += 1

            # At first, the buffer is empty and must fill up.  After that
            # just drop the first chunk bytes to keep it the same size.
            needs_to_grow = len(byte_data) < max_size
            if needs_to_grow:
                byte_data += chunk
            else:  # Remove beginning of audio and add new chunk to end
                byte_data = byte_data[len(chunk):] + chunk

            buffers_since_check += 1.0
            self.wake_word_recognizer.update(chunk)
            if buffers_since_check > buffers_per_check:
                buffers_since_check -= buffers_per_check
                chopped = byte_data[-test_size:] \
                    if test_size < len(byte_data) else byte_data
                audio_data = chopped + silence
                said_wake_word = \
                    self.wake_word_recognizer.found_wake_word(audio_data)

                # Save positive wake words as appropriate
                if said_wake_word:
                    audio = None
                    mtd = None
                    if self.save_wake_words:
                        # Save wake word locally
                        audio = self._create_audio_data(byte_data, source)
                        mtd = self._compile_metadata()
                        if not isdir(self.saved_wake_words_dir):
                            os.mkdir(self.saved_wake_words_dir)
                        module = self.wake_word_recognizer.__class__.__name__

                        fn = join(
                            self.saved_wake_words_dir,
                            '_'.join([str(mtd[k])
                                      for k in sorted(mtd)]) + '.wav')
                        with open(fn, 'wb') as f:
                            f.write(audio.get_wav_data())

                    if self.config['opt_in'] and not self.upload_disabled:
                        # Upload wake word for opt_in people
                        Thread(
                            target=self._upload_wake_word,
                            daemon=True,
                            args=[
                                audio
                                or self._create_audio_data(byte_data, source),
                                mtd or self._compile_metadata()
                            ]).start()
Пример #32
0
import os.path as osp
import sys

def add_path(path):
  if path not in sys.path: sys.path.insert(0, path)

this_dir = osp.dirname(__file__)

# Add LIB_PATH to PYTHONPATH
lib_path = osp.abspath( osp.join(this_dir, '..') )
add_path(lib_path)
Пример #33
0
"""
Programatically build  the gunicorn test suite from the specs.
"""

from os import listdir
from os.path import dirname, join, realpath, splitext, basename

spec_dir = realpath(join(dirname(__file__), "specs/"))
valid_spec_dir = join(spec_dir, "requests", "valid")
output_file = join(spec_dir, "gunicorn.lisp")
test_file = realpath(join(dirname(__file__), "gunicorn-tests.lisp"))

prelude="""
(in-package :nclack-tests)

;; Testing for the conversion between HTTP request to the Clack 'environ'
;; representation.
(defsuite (gunicorn-request-suite :in root-suite)
    (run-child-tests))
(in-suite gunicorn-request-suite)

"""

test_template = """
(deftest test-{name} ()
      (let ((created-request
             (with-open-file (input-stream #P"{http_request_path}"
                              :direction :input)
                (nclack:make-request input-stream)))
            (pattern-request {clack_file_contents}))
Пример #34
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    #### Validate flags
    if FLAGS.save_steps is not None:
        FLAGS.iterations = min(FLAGS.iterations, FLAGS.save_steps)

    if FLAGS.do_predict:
        predict_dir = FLAGS.predict_dir
        if not tf.gfile.Exists(predict_dir):
            tf.gfile.MakeDirs(predict_dir)

    processors = {
        "mnli_matched": MnliMatchedProcessor,
        "mnli_mismatched": MnliMismatchedProcessor,
        'sts-b': StsbProcessor,
        'imdb': ImdbProcessor,
        "yelp5": Yelp5Processor
    }

    if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
        raise ValueError(
            "At least one of `do_train`, `do_eval, `do_predict` or "
            "`do_submit` must be True.")

    if not tf.gfile.Exists(FLAGS.output_dir):
        tf.gfile.MakeDirs(FLAGS.output_dir)

    task_name = FLAGS.task_name.lower()

    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
    label_list = processor.get_labels() if not FLAGS.is_regression else None

    sp = spm.SentencePieceProcessor()
    sp.Load(FLAGS.spiece_model_file)

    def tokenize_fn(text):
        text = preprocess_text(text, lower=FLAGS.uncased)
        return encode_ids(sp, text)

    run_config = model_utils.configure_tpu(FLAGS)

    model_fn = get_model_fn(
        len(label_list) if label_list is not None else None)

    spm_basename = os.path.basename(FLAGS.spiece_model_file)

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.
    if FLAGS.use_tpu:
        estimator = tf.contrib.tpu.TPUEstimator(
            use_tpu=FLAGS.use_tpu,
            model_fn=model_fn,
            config=run_config,
            train_batch_size=FLAGS.train_batch_size,
            predict_batch_size=FLAGS.predict_batch_size,
            eval_batch_size=FLAGS.eval_batch_size)
    else:
        estimator = tf.estimator.Estimator(model_fn=model_fn,
                                           config=run_config)

    if FLAGS.do_train:
        train_file_base = "{}.len-{}.train.tf_record".format(
            spm_basename, FLAGS.max_seq_length)
        train_file = os.path.join(FLAGS.output_dir, train_file_base)
        tf.logging.info("Use tfrecord file {}".format(train_file))

        train_examples = processor.get_train_examples(FLAGS.data_dir)
        np.random.shuffle(train_examples)
        tf.logging.info("Num of train samples: {}".format(len(train_examples)))

        file_based_convert_examples_to_features(train_examples, label_list,
                                                FLAGS.max_seq_length,
                                                tokenize_fn, train_file,
                                                FLAGS.num_passes)

        train_input_fn = file_based_input_fn_builder(
            input_file=train_file,
            seq_length=FLAGS.max_seq_length,
            is_training=True,
            drop_remainder=True)

        estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_steps)

    if FLAGS.do_eval or FLAGS.do_predict:
        if FLAGS.eval_split == "dev":
            eval_examples = processor.get_dev_examples(FLAGS.data_dir)
        else:
            eval_examples = processor.get_test_examples(FLAGS.data_dir)

        tf.logging.info("Num of eval samples: {}".format(len(eval_examples)))

    if FLAGS.do_eval:
        # TPU requires a fixed batch size for all batches, therefore the number
        # of examples must be a multiple of the batch size, or else examples
        # will get dropped. So we pad with fake examples which are ignored
        # later on. These do NOT count towards the metric (all tf.metrics
        # support a per-instance weight, and these get a weight of 0.0).
        #
        # Modified in XL: We also adopt the same mechanism for GPUs.
        while len(eval_examples) % FLAGS.eval_batch_size != 0:
            eval_examples.append(PaddingInputExample())

        eval_file_base = "{}.len-{}.{}.eval.tf_record".format(
            spm_basename, FLAGS.max_seq_length, FLAGS.eval_split)
        eval_file = os.path.join(FLAGS.output_dir, eval_file_base)

        file_based_convert_examples_to_features(eval_examples, label_list,
                                                FLAGS.max_seq_length,
                                                tokenize_fn, eval_file)

        assert len(eval_examples) % FLAGS.eval_batch_size == 0
        eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)

        eval_input_fn = file_based_input_fn_builder(
            input_file=eval_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=True)

        # Filter out all checkpoints in the directory
        steps_and_files = []
        filenames = tf.gfile.ListDirectory(FLAGS.model_dir)

        for filename in filenames:
            if filename.endswith(".index"):
                ckpt_name = filename[:-6]
                cur_filename = join(FLAGS.model_dir, ckpt_name)
                global_step = int(cur_filename.split("-")[-1])
                tf.logging.info("Add {} to eval list.".format(cur_filename))
                steps_and_files.append([global_step, cur_filename])
        steps_and_files = sorted(steps_and_files, key=lambda x: x[0])

        # Decide whether to evaluate all ckpts
        if not FLAGS.eval_all_ckpt:
            steps_and_files = steps_and_files[-1:]

        eval_results = []
        for global_step, filename in sorted(steps_and_files,
                                            key=lambda x: x[0]):
            ret = estimator.evaluate(input_fn=eval_input_fn,
                                     steps=eval_steps,
                                     checkpoint_path=filename)

            ret["step"] = global_step
            ret["path"] = filename

            eval_results.append(ret)

            tf.logging.info("=" * 80)
            log_str = "Eval result | "
            for key, val in sorted(ret.items(), key=lambda x: x[0]):
                log_str += "{} {} | ".format(key, val)
            tf.logging.info(log_str)

        key_name = "eval_pearsonr" if FLAGS.is_regression else "eval_accuracy"
        eval_results.sort(key=lambda x: x[key_name], reverse=True)

        tf.logging.info("=" * 80)
        log_str = "Best result | "
        for key, val in sorted(eval_results[0].items(), key=lambda x: x[0]):
            log_str += "{} {} | ".format(key, val)
        tf.logging.info(log_str)

    if FLAGS.do_predict:
        eval_file_base = "{}.len-{}.{}.predict.tf_record".format(
            spm_basename, FLAGS.max_seq_length, FLAGS.eval_split)
        eval_file = os.path.join(FLAGS.output_dir, eval_file_base)

        file_based_convert_examples_to_features(eval_examples, label_list,
                                                FLAGS.max_seq_length,
                                                tokenize_fn, eval_file)

        pred_input_fn = file_based_input_fn_builder(
            input_file=eval_file,
            seq_length=FLAGS.max_seq_length,
            is_training=False,
            drop_remainder=False)

        predict_results = []
        with tf.gfile.Open(
                os.path.join(predict_dir, "{}.tsv".format(task_name)),
                "w") as fout:
            fout.write("index\tprediction\n")

            for pred_cnt, result in enumerate(
                    estimator.predict(input_fn=pred_input_fn,
                                      yield_single_examples=True,
                                      checkpoint_path=FLAGS.predict_ckpt)):
                if pred_cnt % 1000 == 0:
                    tf.logging.info(
                        "Predicting submission for example: {}".format(
                            pred_cnt))

                logits = [float(x) for x in result["logits"].flat]
                predict_results.append(logits)

                if len(logits) == 1:
                    label_out = logits[0]
                elif len(logits) == 2:
                    if logits[1] - logits[0] > FLAGS.predict_threshold:
                        label_out = label_list[1]
                    else:
                        label_out = label_list[0]
                elif len(logits) > 2:
                    max_index = np.argmax(np.array(logits, dtype=np.float32))
                    label_out = label_list[max_index]
                else:
                    raise NotImplementedError

                fout.write("{}\t{}\n".format(pred_cnt, label_out))

        predict_json_path = os.path.join(predict_dir,
                                         "{}.logits.json".format(task_name))

        with tf.gfile.Open(predict_json_path, "w") as fp:
            json.dump(predict_results, fp, indent=4)
Пример #35
0
TEST_SOLVERS_DIR
    absolute path to the directory holding the solvers
"""

import sys
import os
import os.path as osp
import unittest
from contextlib import contextmanager
import numpy as np

# Environment variables.

_HERE = osp.realpath(osp.dirname(__file__))
PROJECT_BASE = osp.abspath(osp.join(_HERE, '..', '..'))

TEST_DATA_DIR = osp.join(PROJECT_BASE, 'tests', 'data')
assert osp.isdir(
    TEST_DATA_DIR), "The test data dir does not exists '%s'" % TEST_DATA_DIR

TEST_SOLVERS_DIR = os.environ.get('TYMPAN_SOLVERDIR')
if not TEST_SOLVERS_DIR:
    for d in ('pluginsd', 'plugins'):
        TEST_SOLVERS_DIR = osp.join(PROJECT_BASE, d)
        if osp.isdir(TEST_SOLVERS_DIR):
            break
    else:
        raise RuntimeError("The test solver plugins dir wasn't found")

TEST_PROBLEM_DIR = osp.join(TEST_DATA_DIR, 'projects-panel')
Пример #36
0
 def load_model(self):
     if isfile(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME)):
         self.model.load(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
         print('[+] Model loaded from ' + SAVE_MODEL_FILENAME)
Пример #37
0
 def save_model(self):
     self.model.save(join(SAVE_DIRECTORY, SAVE_MODEL_FILENAME))
     print('[+] Model trained and saved at ' + SAVE_MODEL_FILENAME)
Пример #38
0
 def load_data(self):
     self.game_folder = path.dirname(__file__)
     self.img_folder = path.join(self.game_folder, 'Images')
     self.map_folder = path.join(self.game_folder, 'maps')
     self.music_folder = path.join(self.game_folder, 'music')
     self.snd_folder = path.join(self.game_folder, 'snd')
     self.player_img_front = pg.image.load(path.join(self.img_folder, 'Main Character Front.png')).convert_alpha()
     self.player_img_back = pg.image.load(path.join(self.img_folder, 'Main Character Back.png')).convert_alpha()
     self.player_img_left = pg.image.load(path.join(self.img_folder, 'Main Character Left.png')).convert_alpha()
     self.player_img_right = pg.image.load(path.join(self.img_folder, 'Main Character Right.png')).convert_alpha()
     self.collect_sound = pg.mixer.Sound(path.join(self.snd_folder, 'Collect_Ore_Chime.wav'))
     self.collect_sound.set_volume(0.3)
     self.place_sound = pg.mixer.Sound(path.join(self.snd_folder, 'Ore_Drop_Place.wav'))
     self.place_sound.set_volume(0.5)
     pg.mixer.music.load(path.join(self.music_folder, 'Stone.ogg'))
Пример #39
0
from conftest import BASE_TEMPLATE_DIR
from elasticapm.conf.constants import TRANSACTION
from tests.utils.compat import middleware_setting

try:
    # Django 1.10+
    from django.urls import reverse
except ImportError:
    from django.core.urlresolvers import reverse


# Testing Django 1.8+ backends
TEMPLATES = (
    {"BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [BASE_TEMPLATE_DIR]},
    {"BACKEND": "django.template.backends.jinja2.Jinja2", "DIRS": [join(BASE_TEMPLATE_DIR, "jinja2")]},
)


def test_template_rendering(instrument, django_elasticapm_client, client):
    with override_settings(
        **middleware_setting(django.VERSION, ["elasticapm.contrib.django.middleware.TracingMiddleware"])
    ):
        client.get(reverse("render-heavy-template"))
        client.get(reverse("render-heavy-template"))
        client.get(reverse("render-heavy-template"))

    transactions = django_elasticapm_client.events[TRANSACTION]

    assert len(transactions) == 3
    spans = django_elasticapm_client.spans_for_transaction(transactions[0])
Пример #40
0
    def update(self):
        # update portion of the game loop
        self.all_sprites.update()
        self.camera.update(self.player)
        if self.next_level:
            self.playing = False
        green_hit = pg.sprite.spritecollide(self.player, self.green, False)
        if green_hit:
            if self.map == self.imgs['stone']:
                if not self.green_collected:
                    status = 'Green ore collected.'
                    print(status)
                    self.collect_sound.play()
                    self.green_collected = True
            elif self.map == self.imgs['lava']:
                if not self.green_placed:
                    status = 'Green ore placed.'
                    print(status)
                    self.place_sound.play()
                    self.green_placed = True

        purple_hit = pg.sprite.spritecollide(self.player, self.purple, False)
        if purple_hit:
            if self.map == self.imgs['stone']:
                if not self.purple_collected:
                    status = 'Purple ore collected.'
                    print(status)
                    self.collect_sound.play()
                    self.purple_collected = True
            elif self.map == self.imgs['lava']:
                if not self.purple_placed:
                    status = 'Purple ore placed.'
                    print(status)
                    self.place_sound.play()
                    self.purple_placed = True

        orange_hit = pg.sprite.spritecollide(self.player, self.orange, False)
        if orange_hit:
            if self.map == self.imgs['stone']:
                if not self.orange_collected:
                    status = 'Orange ore collected.'
                    print(status)
                    self.collect_sound.play()
                    self.orange_collected = True
            elif self.map == self.imgs['lava']:
                if not self.orange_placed:
                    status = 'Orange ore placed.'
                    print(status)
                    self.place_sound.play()
                    self.orange_placed = True

        yellow_hit = pg.sprite.spritecollide(self.player, self.yellow, False)
        if yellow_hit:
            if self.map == self.imgs['stone']:
                if not self.yellow_collected:
                    status = 'Yellow ore collected.'
                    print(status)
                    self.collect_sound.play()
                    self.yellow_collected = True
            elif self.map == self.imgs['lava']:
                if not self.yellow_placed:
                    status = 'Yellow ore placed.'
                    print(status)
                    self.place_sound.play()
                    self.yellow_placed = True

        bossfight = pg.sprite.spritecollide(self.player, self.bossfight, False)
        if bossfight:
            if self.map == self.imgs['stone']:
                if self.yellow_collected and self.orange_collected and self.purple_collected and self.green_collected:
                    status = 'Next Level!'
                    print(status)
                    self.next_level = True
                else:
                    status = 'Collect all the ores!'
                    print(status)
            if self.map == self.imgs['lava']:
                if self.yellow_placed and self.orange_placed and self.purple_placed and self.green_placed:
                    status = 'Boss Fight!'
                    print(status)
                    self.bf = True
                else:
                    status = 'Place all the ores!'
                    print(status)

        if self.next_level:
            self.show_load_screen()
            self.new()
            pg.mixer.music.fadeout(1000)
            pg.mixer.music.load(path.join(self.music_folder, 'Lava.ogg'))
            pg.mixer.music.play(-1, 0.0)
            pg.mixer.music.set_volume(0.25)
            self.next_level = False

        if self.bf:
            exec(open('main_game_loop.py').read())
            self.quit()
Пример #41
0
#!/usr/bin/env python3

import argparse
import os
from os import path
import sys
import json
import paramiko

CONFIG_PATH = path.join(path.dirname(path.realpath(__file__)), '..', '..','config')
SUBMITTY_CONFIG_PATH = path.join(CONFIG_PATH, 'submitty.json')
AUTOGRADING_WORKERS_PATH = path.join(CONFIG_PATH, 'autograding_workers.json')

with open(os.path.join(CONFIG_PATH, 'submitty_users.json')) as open_file:
    OPEN_JSON = json.load(open_file)
DAEMON_UID = OPEN_JSON['daemon_uid']
DAEMON_USER = OPEN_JSON['daemon_user']

with open(os.path.join(SUBMITTY_CONFIG_PATH)) as open_file:
    SUBMITT_CONFIG = json.load(open_file)
INSTALL_DIR = SUBMITT_CONFIG['submitty_install_dir']

if os.path.isfile(AUTOGRADING_WORKERS_PATH) :
  with open(AUTOGRADING_WORKERS_PATH, 'r') as open_file:
    WORKERS = json.load(open_file)
else:
  WORKERS = None

EXIT_CODES = {
  'inactive'  : 0,
  'active'    : 1,
Пример #42
0
import pytest
import numpy as np
import os.path as osp
import sisl
from sisl import Hamiltonian, DynamicalMatrix, DensityMatrix
from sisl import EnergyDensityMatrix
from sisl.io.siesta import *


pytestmark = [pytest.mark.io, pytest.mark.siesta]
_dir = osp.join('sisl', 'io', 'siesta')


def test_nc1(sisl_tmp, sisl_system):
    f = sisl_tmp('gr.nc', _dir)
    tb = Hamiltonian(sisl_system.gtb)
    tb.construct([sisl_system.R, sisl_system.t])
    tb.write(ncSileSiesta(f, 'w'))

    ntb = ncSileSiesta(f).read_hamiltonian()

    # Assert they are the same
    assert np.allclose(tb.cell, ntb.cell)
    assert np.allclose(tb.xyz, ntb.xyz)
    tb.finalize()
    assert np.allclose(tb._csr._D[:, 0], ntb._csr._D[:, 0])
    assert sisl_system.g.atom.equal(ntb.atom, R=False)


def test_nc2(sisl_tmp, sisl_system):
    f = sisl_tmp('grS.nc', _dir)
Пример #43
0
vim-flake8 https://github.com/nvie/vim-flake8
vim-pug https://github.com/digitaltoad/vim-pug
lightline.vim https://github.com/itchyny/lightline.vim
vim-abolish https://github.com/tpope/tpope-vim-abolish
mru.vim https://github.com/vim-scripts/mru.vim
rust.vim https://github.com/rust-lang/rust.vim
tabular https://github.com/godlygeek/tabular
tlib_vim https://github.com/tomtom/tlib_vim
vim-airline https://github.com/vim-airline/vim-airline
vim-ingo-library https://github.com/inkarkat/vim-ingo-library
vim-mark https://github.com/inkarkat/vim-mark
""".strip()

GITHUB_ZIP = '%s/archive/master.zip'

SOURCE_DIR = path.join(path.dirname(__file__), 'sources_non_forked')


def download_extract_replace(plugin_name, zip_path, temp_dir, source_dir):
    temp_zip_path = path.join(temp_dir, plugin_name)

    # Download and extract file in temp dir
    req = requests.get(zip_path)
    open(temp_zip_path, 'wb').write(req.content)

    zip_f = zipfile.ZipFile(temp_zip_path)
    zip_f.extractall(temp_dir)

    plugin_temp_path = path.join(temp_dir,
                                 path.join(temp_dir, '%s-master' % plugin_name))
Пример #44
0
def test_AlTi_setup(AlTi):
    """Test the setup of the vacancy database.
    """
    assert not AlTi.collections['vacancy'].steps['Vacancy'].is_setup()

    AlTi.setup()

    dbs = "Vacancy/vacancy.Vacancy/Al14Ti6"

    folders = {
        "__files__": ["compute.pkl", "vuids.pkl", "jobfile.sh", "index.json"],
        "V.1": {
            "__files__": ["INCAR", "PRECALC", "POSCAR"]
        },
        "V.2": {
            "__files__": ["INCAR", "PRECALC", "POSCAR"]
        },
        "V.3": {
            "__files__": ["INCAR", "PRECALC", "POSCAR"]
        },
        "V.4": {
            "__files__": ["INCAR", "PRECALC", "POSCAR"]
        },
        "V.5": {
            "__files__": ["INCAR", "PRECALC", "POSCAR"]
        },
        "V.6": {
            "__files__": ["INCAR", "PRECALC", "POSCAR"]
        },
        "V.7": {
            "__files__": ["INCAR", "PRECALC", "POSCAR"]
        }
    }

    from matdb.utility import compare_tree
    dbfolder = path.join(AlTi.root, dbs)
    compare_tree(dbfolder, folders)

    assert AlTi.collections['vacancy'].steps['Vacancy'].is_setup()

    # test the vuid and index creation for the entire database.
    assert path.isfile(
        path.join(AlTi.root, "Vacancy/vacancy.Vacancy/vuids.pkl"))
    assert path.isfile(
        path.join(AlTi.root, "Vacancy/vacancy.Vacancy/index.json"))

    vac = AlTi.collections['vacancy'].steps['Vacancy']
    assert len(vac.index) == 50
    assert len(vac.vuids) == 50
    assert vac.ready()

    # We need to fake some VASP output so that we can cleanup the
    # database and get the rset

    src = relpath(
        "./tests/data/Pd/complete/OUTCAR__DynMatrix_phonon_Pd_dim-27.00")

    dbfolder = path.join(AlTi.root, dbs)
    for j in range(1, 51):
        dest = path.join(dbfolder, "V.{}".format(j), "OUTCAR")
        symlink(src, dest)

    dbfolder = path.join(AlTi.root, dbs)
    for j in range(1, 51):
        src = path.join(dbfolder, "V.{}".format(j), "POSCAR")
        dest = path.join(dbfolder, "V.{}".format(j), "CONTCAR")
        symlink(src, dest)

    assert len(vac.atoms_paths()) == 50
    assert len(vac.rset()) == 50
Пример #45
0
        UserAuthorizationResource
    )
    from moov_backend.api.v1.views.driver import (
        DriverResource, DriverConfirmRideResouce
    )
    from moov_backend.api.v1.views.profile_page import BasicInfoResource
    from moov_backend.api.v1.views.transaction import (
        TransactionResource, AllTransactionsResource
    )
    from moov_backend.api.v1.views.free_ride import FreeRideResource
    from moov_backend.api.v1.views.notification import NotificationResource
    from moov_backend.api.v1.views.forgot_password import ForgotPasswordResource
    from moov_backend.api.v1.views.school import SchoolResource
    

dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)


def create_flask_app(environment):
    app = Flask(__name__, instance_relative_config=True, static_folder=None, template_folder='./api/emails/templates')
    app.config.from_object(app_configuration[environment])
    app.config['BUNDLE_ERRORS'] = True

    try:
        from api import models
    except ImportError:
        from moov_backend.api import models

    # to allow cross origin resource sharing
    CORS(app)
Пример #46
0
 def __init__(self, opts):
     super().__init__()
     self.options = opts
     self.list_of_contents = []
     self.tempdir = (
         "/tmp"
         if platform.system() == "Darwin"
         else tempfile.gettempdir()
     )
     self.data_dir = "{0}{1}{2}".format(
         abspath(normpath(options.source)),
         sep,
         "data" + sep,
     )
     self.content_dir = "{0}{1}{2}".format(
         abspath(normpath(options.source)),
         sep,
         "content" + sep + "en" + sep,
     )
     self.data_integrations_dir = (
         join(self.data_dir, "integrations") + sep
     )
     self.data_service_checks_dir = (
         join(self.data_dir, "service_checks") + sep
     )
     self.content_integrations_dir = (
         join(self.content_dir, "integrations") + sep
     )
     self.extract_dir = "{0}".format(
         join(self.tempdir, "extracted") + sep
     )
     self.integration_datafile = "{0}{1}{2}".format(
         abspath(normpath(self.options.source)),
         sep,
         "integrations.json",
     )
     self.regex_h1 = re.compile(
         r"^#{1}(?!#)(.*)", re.MULTILINE
     )
     self.regex_h1_replace = re.compile(
         r"^(#{1})(?!#)(.*)", re.MULTILINE
     )
     self.regex_metrics = re.compile(
         r"(#{3} Metrics\n)([\s\S]*this integration.|[\s\S]*this check.)([\s\S]*)(#{3} Events\n)",
         re.DOTALL,
     )
     self.regex_service_check = re.compile(
         r"(#{3} Service Checks\n)([\s\S]*does not include any service checks at this time.)([\s\S]*)(#{2} Troubleshooting\n)",
         re.DOTALL,
     )
     self.regex_fm = re.compile(
         r"(?:-{3})(.*?)(?:-{3})(.*)", re.DOTALL
     )
     self.regex_source = re.compile(
         r"(\S*FROM_DISPLAY_NAME\s*=\s*\{)(.*?)\}",
         re.DOTALL,
     )
     self.datafile_json = []
     self.pool_size = 5
     self.integration_mutations = OrderedDict(
         {
             "hdfs": {
                 "action": "create",
                 "target": "hdfs",
                 "remove_header": False,
                 "fm": {
                     "is_public": True,
                     "kind": "integration",
                     "integration_title": "Hdfs",
                     "short_description": "Track cluster disk usage, volume failures, dead DataNodes, and more.",
                 },
             },
             "mesos": {
                 "action": "create",
                 "target": "mesos",
                 "remove_header": False,
                 "fm": {
                     "aliases": [
                         "/integrations/mesos_master/",
                         "/integrations/mesos_slave/",
                     ],
                     "is_public": True,
                     "kind": "integration",
                     "integration_title": "Mesos",
                     "short_description": "Track cluster resource usage, master and slave counts, tasks statuses, and more.",
                 },
             },
             "activemq_xml": {
                 "action": "merge",
                 "target": "activemq",
                 "remove_header": False,
             },
             "cassandra_nodetool": {
                 "action": "merge",
                 "target": "cassandra",
                 "remove_header": False,
             },
             "gitlab_runner": {
                 "action": "merge",
                 "target": "gitlab",
                 "remove_header": False,
             },
             "hdfs_datanode": {
                 "action": "merge",
                 "target": "hdfs",
                 "remove_header": False,
             },
             "hdfs_namenode": {
                 "action": "merge",
                 "target": "hdfs",
                 "remove_header": False,
             },
             "mesos_master": {
                 "action": "merge",
                 "target": "mesos",
                 "remove_header": True,
             },
             "mesos_slave": {
                 "action": "merge",
                 "target": "mesos",
                 "remove_header": False,
             },
             "kafka_consumer": {
                 "action": "merge",
                 "target": "kafka",
                 "remove_header": False,
             },
             "kube_dns": {
                 "action": "discard",
                 "target": "none",
                 "remove_header": False,
             },
             "kube_proxy": {
                 "action": "discard",
                 "target": "none",
                 "remove_header": False,
             },
             "kubernetes_state": {
                 "action": "discard",
                 "target": "none",
                 "remove_header": False,
             },
             "system_core": {
                 "action": "discard",
                 "target": "system",
                 "remove_header": False,
             },
             "system_swap": {
                 "action": "discard",
                 "target": "system",
                 "remove_header": False,
             },
             "hbase_regionserver": {
                 "action": "merge",
                 "target": "hbase_master",
                 "remove_header": False,
             },
         }
     )
     self.initial_integration_files = glob.glob(
         "{}*.md".format(self.content_integrations_dir)
     )
     makedirs(self.data_integrations_dir, exist_ok=True)
     makedirs(
         self.data_service_checks_dir, exist_ok=True
     )
     makedirs(
         self.content_integrations_dir, exist_ok=True
     )
Пример #47
0
setup(name='AccessControl',
      version='3.0.2',
      url='http://pypi.python.org/pypi/AccessControl',
      license='ZPL 2.1',
      description="Security framework for Zope2.",
      author='Zope Foundation and Contributors',
      author_email='*****@*****.**',
      long_description=open('README.txt').read() + '\n' +
                       open('CHANGES.txt').read(),
      packages=find_packages('src'),
      package_dir={'': 'src'},
      ext_modules=[Extension(
            name='AccessControl.cAccessControl',
            include_dirs=['include', 'src'],
            sources=[join('src', 'AccessControl', 'cAccessControl.c')],
            depends=[join('include', 'ExtensionClass', 'ExtensionClass.h'),
                     join('include', 'Acquisition', 'Acquisition.h')]),
      ],
      install_requires=[
        'Acquisition',
        'DateTime',  # optional dependency of RestrictedPython
        'ExtensionClass',
        'Persistence',
        'Record',
        'RestrictedPython',
        'transaction',
        'zExceptions',
        'ZODB3',
        'zope.component',
        'zope.configuration',
Пример #48
0
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
from io import open
from pretty_format_json import *

here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
    long_description = f.read()

gh_repo = "https://github.com/weaming/pretty-format-json"

setup(
    name="pretty-format-json",  # Required
    version=version,  # Required
    description=
    "Pretty print json contains python style coments, string literal.",  # Required
    long_description=long_description,  # Optional
    long_description_content_type="text/markdown",  # Optional (see note above)
    url=gh_repo,  # Optional
    author="weaming",  # Optional
    author_email="*****@*****.**",  # Optional
    packages=find_packages(exclude=["contrib", "docs", "tests"]),  # Required
    keywords="json format",  # Optional
    install_requires=["oyaml", "data-process>=0.3.*"],  # Optional
    entry_points={  # Optional
        "console_scripts": [
            "pretty_format_json=pretty_format_json.format:main",
            "yaml_json=pretty_format_json.yaml_json:main",
            "csv_json=pretty_format_json.csv_json:main",
        ]
Пример #49
0
import setuptools

# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), "r") as readme_file:
    long_description = readme_file.read()

setuptools.setup(
    name='minerva-cli',
    version='0.0.3',
    author="Juha Ruokonen",
    description='Minerva Command Line Interface',
    long_description=long_description,
    long_description_content_type="text/markdown",
    url="https://github.com/labsyspharm/minerva-cli",
    packages=setuptools.find_packages(),
    classifiers=[
        "Programming Language :: Python :: 3",
        "License :: OSI Approved :: MIT License",
        "Operating System :: OS Independent",
    ],
    python_requires='>=3.7',
    install_requires=[
        "boto3>=1.14.44",
        "requests",
        "tabulate",
        "minerva-lib==0.0.5",
        "tqdm"
    ],
    dependency_links=[
Пример #50
0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""

import sys
import imp
from os.path import dirname, join, abspath, exists
from optparse import OptionParser

# Figure out where we're installed
BIN_DIR = dirname(abspath(__file__))
ROOT_DIR = dirname(BIN_DIR)
CONF_DIR = join(ROOT_DIR, 'conf')
default_relayrules = join(CONF_DIR, 'relay-rules.conf')

# Make sure that carbon's 'lib' dir is in the $PYTHONPATH if we're running from
# source.
LIB_DIR = join(ROOT_DIR, 'lib')
sys.path.insert(0, LIB_DIR)

try:
  from twisted.internet import epollreactor
  epollreactor.install()
except ImportError:
  pass

from twisted.internet import stdio, reactor, defer
from twisted.protocols.basic import LineReceiver
Пример #51
0
try:
    QEMU = environ["MSP430_QEMU"]
except:
    print("""MSP430_QEMU environment variable should point to Qemu binaries. \
Ex. /home/user/qemu/bin/qemu-system-msp430.
Qemu MSP430 available on github: \
https://github.com/dimas3452/qemu/tree/target-msp430""")
    raise

c2t_cfg = C2TConfig(
    rsp_target=DebugClient(march="msp430f5x",
                           new_rsp=get_new_rsp(regs=list("r%d" % i
                                                         for i in range(16)),
                                               pc="r0",
                                               regsize=16),
                           test_timeout=30.,
                           sp="r1"),
    qemu=DebugServer(
        Run(executable=QEMU,
            args="-M msp430f5x -S -gdb tcp:localhost:{port} -nographic "
            "-kernel {bin}")),
    gdbserver=DebugServer(
        Run(executable="/usr/bin/gdbserver", args="localhost:{port} {bin}")),
    target_compiler=TestBuilder(
        Run(executable=join(TOOLCHAIN, "bin", "msp430-elf-gcc"),
            args="-I{0} -L{0} -mmcu=msp430f5xx_6xxgeneric -g -O0"
            " -o {{bin}} {{src}}".format(join(SUPPORT, "include")))),
    oracle_compiler=TestBuilder(
        Run(executable="/usr/bin/gcc", args="-g -O0 {src} -o {bin} -no-pie")))
Пример #52
0
# This cludge is necessary for horrible reasons: see comment below and
# http://stackoverflow.com/q/19919905/447288
#
class build_ext(_build_ext):
    def finalize_options(self):
        _build_ext.finalize_options(self)
        # Prevent numpy from thinking it is still in its setup process:
        __builtins__.__NUMPY_SETUP__ = False
        import numpy
        self.include_dirs.append(numpy.get_include())


here = path.abspath(path.dirname(__file__))

# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
    long_description = f.read()

setup(
    name='pyzcm',

    # Versions should comply with PEP440.  For a discussion on single-sourcing
    # the version across setup.py and the project code, see
    # https://packaging.python.org/en/latest/single_source_version.html
    version='1.0.0',
    description='Python Zcash CPU/GPU miner',
    long_description=long_description,

    # The project's main homepage.
    url='https://github.com/honzik666/pyzcm',
Пример #53
0
    def test(self,
             params,
             render_mode=DEFAULT_RENDER_MODE,
             release=False,
             faster=False,
             no_progress=False,
             self_test=False):
        suites = OrderedDict([
            ("tidy", {
                "kwargs": {
                    "faster": faster,
                    "no_progress": no_progress,
                    "self_test": self_test
                },
                "include_arg": "include"
            }),
            ("wpt", {
                "kwargs": {
                    "release": release
                },
                "paths": [
                    path.abspath(
                        path.join("tests", "wpt", "web-platform-tests")),
                    path.abspath(path.join("tests", "wpt", "mozilla"))
                ],
                "include_arg":
                "include"
            }),
            ("css", {
                "kwargs": {
                    "release": release
                },
                "paths":
                [path.abspath(path.join("tests", "wpt", "css-tests"))],
                "include_arg": "include"
            }),
            ("unit", {
                "kwargs": {},
                "paths": [path.abspath(path.join("tests", "unit"))],
                "include_arg": "test_name"
            }),
            ("compiletest", {
                "kwargs": {
                    "release": release
                },
                "paths": [path.abspath(path.join("tests", "compiletest"))],
                "include_arg": "test_name"
            })
        ])

        suites_by_prefix = {
            path: k
            for k, v in suites.iteritems() if "paths" in v
            for path in v["paths"]
        }

        selected_suites = OrderedDict()

        if params is None:
            params = suites.keys()

        for arg in params:
            found = False
            if arg in suites and arg not in selected_suites:
                selected_suites[arg] = []
                found = True

            elif os.path.exists(path.abspath(arg)):
                abs_path = path.abspath(arg)
                for prefix, suite in suites_by_prefix.iteritems():
                    if abs_path.startswith(prefix):
                        if suite not in selected_suites:
                            selected_suites[suite] = []
                        selected_suites[suite].append(arg)
                        found = True
                        break

            if not found:
                print("%s is not a valid test path or suite name" % arg)
                return 1

        test_start = time()
        for suite, tests in selected_suites.iteritems():
            props = suites[suite]
            kwargs = props.get("kwargs", {})
            if tests:
                kwargs[props["include_arg"]] = tests

            Registrar.dispatch("test-%s" % suite,
                               context=self.context,
                               **kwargs)

        elapsed = time() - test_start

        print("Tests completed in %0.2fs" % elapsed)
Пример #54
0
        xyz2 = cpv.add(xyz2, diff)

    xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)

    obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [
        cgo.CONE
    ] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
    return obj


dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
    hs_zip.extractall(dirpath)

cmd.load(join(dirpath, "protein.pdb"), "protein")
cmd.show("cartoon", "protein")

if dirpath:
    f = join(dirpath, "label_threshold_10.mol2")
else:
    f = "label_threshold_10.mol2"

cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)

if dirpath:
    f = join(dirpath, "label_threshold_14.mol2")
Пример #55
0
 def test_css(self, **kwargs):
     self.ensure_bootstrapped()
     run_file = path.abspath(path.join("tests", "wpt", "run_css.py"))
     return self.wptrunner(run_file, **kwargs)
Пример #56
0
def main(args=None):
    # read the input ontology file into a graph
    parser = argparse.ArgumentParser()
    overarching_group = parser.add_mutually_exclusive_group()
    inputs_group = overarching_group.add_mutually_exclusive_group()

    inputs_group.add_argument(
        "-i",
        "--inputfile",
        help="The RDF ontology file you wish to generate HTML for. "
        "Must be in either Turtle, RDF/XML, JSON-LD or N-Triples formats indicated by the file type extensions:"
        "{} respectively.".format(", ".join(RDF_FILE_EXTENSIONS)),
        type=lambda x: is_valid_file(parser, x),
    )

    inputs_group.add_argument(
        "-u",
        "--url",
        help="The RDF ontology you wish to generate HTML for, online. "
        "Must be an absolute URL that can be resolved to RDF, preferably via Content Negotiation.",
    )

    overarching_group.add_argument(
        "-lp",
        "--listprofiles",
        help=
        "This flag, if used, must be the only flag supplied. It will cause the program to list all the ontology"
        " documentation profiles that it supports, indicating both their URI and their short token for use with the"
        " -p (--profile) flag when creating HTML documentation",
        action="store_true",
    )

    parser.add_argument(
        "-p",
        "--profile",
        help=
        "A profile - a specified information model - for an ontology. You can indicate this via the profile's URI"
        "or its token. The list of profiles - URIs and their corresponding tokens - supported by pyLODE, can be "
        "found by running the program with the flag -lp or --listprofiles.",
        default="ontdoc",
    )

    parser.add_argument(
        "-c",
        "--css",
        help=
        "Whether (true) or not (false) to copy the default CSS file to the output directory.",
        choices=["true", "false"],
        default="false",
    )

    parser.add_argument(
        "-xc",
        "--excludecss",
        help=
        "Whether (true) or not (false) to exclude the standard pyLODE CSS content from an HTML file output.",
        choices=["true", "false"],
        default="false",
    )

    parser.add_argument(
        "-q",
        "--getcuriesonline",
        help=
        "Whether (true) or not (false) to look up CURIEs for unknown namespaces using http://prefix.cc.",
        choices=["true", "false"],
        default="false",
    )

    parser.add_argument(
        "-o",
        "--outputfile",
        help=
        "A name you wish to assign to the output file. Will be postfixed with .html/.md if not already added. If "
        "no output file is given, output will be printed to screen",
        default=None)

    parser.add_argument(
        "-f",
        "--outputformat",
        help="The output format of the documentation.",
        choices=["html", "md"],
        default="html",
    )

    parser.add_argument("-v",
                        "--version",
                        help="The version of this copy of pyLODE.",
                        action="store_true")

    args = parser.parse_args()

    if args.listprofiles:
        print(MakeDocco.list_profiles())
        exit()
    elif args.version:
        from pylode import __version__
        print(__version__)
        exit()
    elif args.inputfile or args.url:
        if args.excludecss == "true":
            exclude_css = True
        else:
            exclude_css = False

        # args are present so getting RDF from input file or uri into an rdflib Graph
        if args.inputfile:
            h = MakeDocco(input_data_file=args.inputfile,
                          outputformat=args.outputformat,
                          profile=args.profile,
                          exclude_css=exclude_css)
        elif args.url:
            h = MakeDocco(input_uri=args.url,
                          outputformat=args.outputformat,
                          profile=args.profile,
                          exclude_css=exclude_css)
        else:
            # we have neither an input file or a URI supplied
            parser.error(
                "Either an input file or a url is required to access the ontology's RDF"
            )

        if args.profile:
            if not MakeDocco.is_supported_profile(args.profile):
                parser.error(
                    "The profile you requested, '{}', is not supported. "
                    "Please choose from those given by List Profiles (-lp)".
                    format(args.profile))
    else:
        parser.error(
            "An ontology source (-i/--inputfile or -u/--url) is required")

    # here we have a parsed graph from either a local file or a URI

    # set up output directories and resources
    main_dir = path.dirname(path.realpath(__file__))
    style_dir = path.join(main_dir, "style")
    if h.publication_dir == "":
        h.publication_dir = "."
    os.makedirs(h.publication_dir, exist_ok=True)

    # include CSS
    if args.css == "true":
        shutil.copyfile(path.join(style_dir, "pylode.css"),
                        path.join(h.publication_dir, "style.css"))

    if args.outputfile is not None:
        output_file_name = (args.outputfile if args.outputfile else "doc.html")

        if args.outputformat == "html":
            if not output_file_name.endswith(".html"):
                output_file_name += ".html"
        elif args.outputformat == "md":
            if not output_file_name.endswith(".md"):
                output_file_name += ".md"

        # generate the HTML doc
        h.document(destination=output_file_name)

        print("Finished. {} documentation in {}".format(
            args.profile, output_file_name))
    else:
        print(h.document())
Пример #57
0
def all_cnf_files(folder):
    return [
        join(folder, f) for f in listdir(folder)
        if isfile(join(folder, f)) and f.endswith(".cnf")
    ]
Пример #58
0
 def update_css(self, **kwargs):
     self.ensure_bootstrapped()
     run_file = path.abspath(path.join("tests", "wpt", "update_css.py"))
     run_globals = {"__file__": run_file}
     execfile(run_file, run_globals)
     return run_globals["update_tests"](**kwargs)
Пример #59
0
        # self.pytest_args = '--pyargs iminuit'
        # self.pytest_args = ['--strict', '--verbose', '--tb=long', 'tests']
        self.pytest_args = ''

    def run_tests(self):
        # import here, cause outside the eggs aren't loaded
        import pytest
        import shlex
        errno = pytest.main(shlex.split(self.pytest_args))
        del sys.exitfunc  # needed to avoid a bug caused by IPython's exitfunc
        sys.exit(errno)


# Static linking
cwd = dirname(__file__)
minuit_src = glob(join(cwd, 'Minuit/src/*.cxx'))
minuit_header = join(cwd, 'Minuit/inc')

# We follow the recommendation how to distribute Cython modules:
# http://docs.cython.org/src/reference/compilation.html#distributing-cython-modules
try:
    from Cython.Build import cythonize

    USE_CYTHON = True  # TODO: add command line option?
except ImportError:
    print('Cython is not available ... using pre-generated C file.')
    USE_CYTHON = False

ext = '.pyx' if USE_CYTHON else '.cpp'

libiminuit = Extension(
Пример #60
0
def all_dimacs_files(folder):
    return [
        join(folder, f) for f in listdir(folder)
        if isfile(join(folder, f)) and f.endswith(".dimacs")
    ]