Example #1
0
def pytest_sessionstart(session):
    """
    Set up the test environment.

    Set DJANGO_SETTINGS_MODULE and sets up a test database.

    """
    sys.path.append(dirname(dirname(__file__)))
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "treeherder.settings")
    from django.conf import settings
    from django.test.runner import DiscoverRunner

    # we don't actually let Django run the tests, but we need to use some
    # methods of its runner for setup/teardown of dbs and some other things
    session.django_runner = DiscoverRunner()
    # this provides templates-rendered debugging info and locmem mail storage
    session.django_runner.setup_test_environment()

    settings.DATABASES["default"]["TEST_NAME"] = "test_treeherder"

    # this makes celery calls synchronous, useful for unit testing
    settings.CELERY_ALWAYS_EAGER = True
    settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True

    # Don't attempt to submit bug associations to Elasticsearch.
    settings.MIRROR_CLASSIFICATIONS = False

    # Reconfigure pulse to operate on default vhost of rabbitmq
    settings.PULSE_URI = settings.BROKER_URL
    settings.PULSE_EXCHANGE_NAMESPACE = 'test'
Example #2
0
    def onOpenImageDir(self):
        self.cbar.clearContours()
        self.viewer.clearRects()
        self.viewer.clearPolygons()

        idir = self.imageDir.text()
        ofile = self.dataFile.text()
        if isdir(idir):
            path = dirname(idir)
        elif isfile(ofile):
            path = dirname(ofile)
        else:
            path = expanduser("~")

        # TODO use getOpenFileNames instead
        idir = QFileDialog.getExistingDirectory(self, "Select an image directory", path)
        # cancel button
        if not idir:
            return

        self.imageDir.setText(idir)

        scanner = FileScanner(self.structType.currentText(), idir)
        self._files = scanner()

        if not self._files:
            QMessageBox.warning(self, "Error", "No files found")
            return

        try:
            self._dimUpdate(self._files[0].file)
            self._initialImage()
        except IOError as e:
            pass
def desitemplate_test_suite():
    """Returns unittest.TestSuite of desitemplate tests"""
    from os.path import dirname
    desitemplate_dir = dirname(dirname(__file__))
    # print(desitemplate_dir)
    return unittest.defaultTestLoader.discover(desitemplate_dir,
        top_level_dir=dirname(desitemplate_dir))
Example #4
0
def download_file(url, name, root_destination='~/data/', zipfile=False,
                  replace=False):
    """Download a file from dropbox, google drive, or a URL.

    This will download a file and store it in a '~/data/` folder,
    creating directories if need be. It will also work for zip
    files, in which case it will unzip all of the files to the
    desired location.

    Parameters
    ----------
    url : string
        The url of the file to download. This may be a dropbox
        or google drive "share link", or a regular URL. If it
        is a share link, then it should point to a single file and
        not a folder. To download folders, zip them first.
    name : string
        The name / path of the file for the downloaded file, or
        the folder to zip the data into if the file is a zipfile.
    root_destination : string
        The root folder where data will be downloaded.
    zipfile : bool
        Whether the URL points to a zip file. If yes, it will be
        unzipped to root_destination + name.
    replace : bool
        If True and the URL points to a single file, overwrite the
        old file if possible.
    """
    # Make sure we have directories to dump files
    home = op.expanduser('~')
    tmpfile = home + '/tmp/tmp'
    if not op.isdir(home + '/data/'):
        print('Creating data folder...')
        os.makedirs(home + '/data/')

    if not op.isdir(home + '/tmp/'):
        print('Creating tmp folder...')
        os.makedirs(home + '/tmp/')

    download_path = _convert_url_to_downloadable(url)

    # Now save to the new destination
    out_path = root_destination.replace('~', home) + name
    if not op.isdir(op.dirname(out_path)):
        print('Creating path {} for output data'.format(out_path))
        os.makedirs(op.dirname(out_path))

    if zipfile is True:
        _fetch_file(download_path, tmpfile)
        myzip = ZipFile(tmpfile)
        myzip.extractall(out_path)
        os.remove(tmpfile)
    else:
        if len(name) == 0:
            raise ValueError('Cannot overwrite the root data directory')
        if replace is False and op.exists(out_path):
            raise ValueError('Path {} exists, use `replace=True` to '
                             'overwrite'.format(out_path))
        _fetch_file(download_path, out_path)
    print('Successfully moved file to {}'.format(out_path))
Example #5
0
File: html.py Project: 89sos98/main
    def handle_page(self, pagename, ctx, templatename='page.html',
                    outfilename=None, event_arg=None):
        ctx['current_page_name'] = pagename
        sidebarfile = self.config.html_sidebars.get(pagename)
        if sidebarfile:
            ctx['customsidebar'] = sidebarfile

        if not outfilename:
            outfilename = path.join(self.outdir,
                                    os_path(pagename) + self.out_suffix)

        self.app.emit('html-page-context', pagename, templatename,
                      ctx, event_arg)

        ensuredir(path.dirname(outfilename))
        f = open(outfilename, 'wb')
        try:
            self.implementation.dump(ctx, f, 2)
        finally:
            f.close()

        # if there is a source file, copy the source file for the
        # "show source" link
        if ctx.get('sourcename'):
            source_name = path.join(self.outdir, '_sources',
                                    os_path(ctx['sourcename']))
            ensuredir(path.dirname(source_name))
            copyfile(self.env.doc2path(pagename), source_name)
Example #6
0
def find_module(mod):
    '''find the .ko file for kernel module named mod.
    Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
    modules directory and finally under the parent directory of
    the script '''
    # check $RTE_SDK/$RTE_TARGET directory
    if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
        path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],\
                                     os.environ['RTE_TARGET'], mod)
        if exists(path):
            return path

    # check using depmod
    try:
        depmod_out = check_output(["modinfo", "-n", mod], \
                                  stderr=subprocess.STDOUT).lower()
        if "error" not in depmod_out:
            path = depmod_out.strip()
            if exists(path):
                return path
    except: # if modinfo can't find module, it fails, so continue
        pass

    # check for a copy based off current path
    tools_dir = dirname(abspath(sys.argv[0]))
    if (tools_dir.endswith("tools")):
        base_dir = dirname(tools_dir)
        find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
        if len(find_out) > 0: #something matched
            path = find_out.splitlines()[0]
            if exists(path):
                return path
Example #7
0
def make_image_path(outdoc, src):
    filename = outdoc.name
    basedir = join(dirname(filename), dirname(src))
    if not exists(basedir):
        makedirs(basedir)
    path = join(dirname(filename), src)
    return path
Example #8
0
def get_grammar(build_mode: bool):
    data = []
    grammar_file = path.join(path.dirname(path.dirname(__file__)), 'grammar.xml')
    if not os.path.isfile(grammar_file):
        if build_mode:
            raise FileNotFoundError("Grammar file %s not found" % grammar_file)
        else:
            print("Grammar page is NOT included. No file ", grammar_file)
            return []

    grammar_xml = ElementTree(file=grammar_file).getroot()
    for grammar_set in grammar_xml:
        result_set = {
            'file-name': grammar_set.attrib['file-name'],
            'content': []
        }
        for node in grammar_set:
            if node.tag == 'doc':
                result_set['content'].append({
                    'type': 'comment',
                    'content': customized_markdown(node.text)
                })
            elif node.tag == 'item':
                result_set['content'].append(_get_item_content(node))
        data.append(result_set)
    return data
Example #9
0
 def load_names(cls, db=None):
     """Populate the locale_label table."""
     from os.path import dirname, join
     db = db or cls.default_db
     fname = join(dirname(dirname(__file__)),
                  'nlp/data/language-names.json')
     with open(fname) as f:
         names = json.load(f)
     locales = {x[0] for x in names}.union({x[1] for x in names})
     Locale.bulk_get_or_create(locales, db)
     db.flush()
     Locale.reset_cache()
     db.execute("lock table %s in exclusive mode" % cls.__table__.name)
     existing = set(db.query(
         cls.named_locale_id, cls.locale_id_of_label).all())
     c = Locale.locale_collection
     values = []
     for (lcode, tcode, name) in names:
         l, t = c[lcode], c[tcode]
         if (l, t) not in existing:
             values.append({
                 'named_locale_id': l,
                 'locale_id_of_label': t,
                 'name': name})
     if values:
         db.execute(cls.__table__.insert().values(values))
         db.flush()
Example #10
0
def start():
    """Start the CherryPy application server."""

    setupdir = dirname(dirname(__file__))
    curdir = os.getcwd()

    # First look on the command line for a desired config file,
    # if it's not on the command line, then look for 'setup.py'
    # in the current directory. If there, load configuration
    # from a file called 'dev.cfg'. If it's not there, the project
    # is probably installed and we'll look first for a file called
    # 'prod.cfg' in the current directory and then for a default
    # config file called 'default.cfg' packaged in the egg.
    if len(sys.argv) > 1:
        configfile = sys.argv[1]
    elif exists(join(setupdir, "setup.py")):
        configfile = join(setupdir, "dev.cfg")
    elif exists(join(curdir, "prod.cfg")):
        configfile = join(curdir, "prod.cfg")
    else:
        try:
            configfile = pkg_resources.resource_filename(
              pkg_resources.Requirement.parse("gordonweb"),
                "config/default.cfg")
        except pkg_resources.DistributionNotFound:
            raise ConfigurationError("Could not find default configuration.")

    turbogears.update_config(configfile=configfile,
        modulename="gordonweb.config")

    from gordonweb.controllers import Root

    turbogears.start_server(Root())
    def test_get_set_sample_network(self):
        ndex = nc.Ndex(host= tt.TESTSERVER, username=tt.testUser1, password=tt.testUserpasswd, debug=True)

        with open(path.join(path.abspath(path.dirname(__file__)),example_network_1), 'r') as file_handler:
            network_in_cx = file_handler.read()

        # test save_cx_stream_as_new_network
        test_network_1_uri = ndex.save_cx_stream_as_new_network(network_in_cx)
        self.assertTrue(test_network_1_uri.startswith(tt.TESTSERVER + ndex_network_resource))

        network_UUID = str(test_network_1_uri.split("/")[-1])

        time.sleep(20)

        with open(path.join(path.abspath(path.dirname(__file__)),sample_network), 'r') as file_handler:
            sample_cx = file_handler.read()
        ndex.set_network_sample(network_UUID, sample_cx)

        time.sleep(3)
        # get network summary with the new properties
        sample_from_server = ndex.get_sample_network(network_UUID)
        putJson = json.loads(sample_cx)
        self.assertTrue(len(putJson) == len(sample_from_server))


        # test delete_network
        del_network_return = ndex.delete_network(network_UUID)
        self.assertTrue(del_network_return == '')
Example #12
0
def main():
    chdir(dirname(dirname(abspath(__file__))))

    books = []
    for _file in glob('meta/*.json'):
        with open(_file) as _f:
            meta = json.load(_f)
            books.append({
                'slug': _file[5:-5],
                'title': 'Unnamed Book',
                'author': 'Anonymous',
            })
            for _meta in meta:
                if _meta['key'] == 'dc.creator':
                    books[-1]['author'] = _meta['value']
                if _meta['key'] == 'dc.title':
                    books[-1]['title'] = _meta['value']
                if _meta['key'] == 'dc.language' and _meta['value'][0:2] == 'de':
                    books[-1]['language'] = 'german'

    # render the template
    with open("src/index.mustache") as _f:
        template = _f.read()
    result = pystache.render(template.encode("utf-8"), {
        'books': books,
    })
    with open("docs/index.html", "w") as _f:
        _f.write(result)
Example #13
0
    def test_permissions_warnings(self):
        """Make sure letsencrypt-auto properly warns about permissions problems."""
        # This test assumes that only the parent of the directory containing
        # letsencrypt-auto (usually /tmp) may have permissions letsencrypt-auto
        # considers insecure.
        with temp_paths() as (le_auto_path, venv_dir):
            le_auto_path = abspath(le_auto_path)
            le_auto_dir = dirname(le_auto_path)
            le_auto_dir_parent = dirname(le_auto_dir)
            install_le_auto(self.NEW_LE_AUTO, le_auto_path)

            run_letsencrypt_auto = partial(
                run_le_auto, le_auto_path, venv_dir,
                le_auto_args_str='--install-only --no-self-upgrade',
                PIP_FIND_LINKS=join(tests_dir(), 'fake-letsencrypt', 'dist'))
            # Run letsencrypt-auto once with current permissions to avoid
            # potential problems when the script tries to write to temporary
            # directories.
            run_letsencrypt_auto()

            le_auto_dir_mode = stat(le_auto_dir).st_mode
            le_auto_dir_parent_mode = S_IMODE(stat(le_auto_dir_parent).st_mode)
            try:
                # Make letsencrypt-auto happy with the current permissions
                chmod(le_auto_dir, S_IRUSR | S_IXUSR)
                sudo_chmod(le_auto_dir_parent, 0o755)

                self._test_permissions_warnings_about_path(le_auto_path, run_letsencrypt_auto)
                self._test_permissions_warnings_about_path(le_auto_dir, run_letsencrypt_auto)
            finally:
                chmod(le_auto_dir, le_auto_dir_mode)
                sudo_chmod(le_auto_dir_parent, le_auto_dir_parent_mode)
Example #14
0
def get_elem_configs(*args,**kw):
    datareqs,processors,outputs = [],[],[]

    kw = udict(kw)
    base = dirname(kw['__FILE'])

    # ... to acquire data
    d = kw.xget('DATAREQ')
    if d:
        pd = _i(d,base,options.process_path)
        if pd:
            datareqs.append(pd)
        else:
            logger.warn('Data Request file "%s" not exists',d)

    # ..to process 'em
    d = kw.xget('PROCESSORS')
    if d:
        pd = _i(d,base,options.process_path)
        if pd:
            processors.append(pd)
        else:
            logger.warn('Processor Conf file "%s" not exists',d)
            
    # Job Label
    # Use section name if not exists LABEL key
    l = kw.xget('LABEL',args[0].upper())

    #
    _os = kw.xget_list('OUTPUT')
    if _os:
        for d in _os:
            pd = _i(d,base,options.process_path)
            if pd:
                outputs.append(pd)
            else:
                logger.warn('Output Conf file "%s" not exists',d)
    
        for o in outputs:
            cfg = cfg2hash(o)
            jcfg = cfg.xget(l.upper())
            if jcfg:
                base = dirname(jcfg.xget('__FILE'))
                d = jcfg.xget('DATAREQ')
                pd = _i(d,base,options.process_path)
                if pd:
                    datareqs.append(pd)
                else:
                    logger.warn('*Data Request file "%s" not exists',d)

                d = jcfg.xget('PROCESSOR')
                if d:
                    pd = _i(d,base,options.process_path)
                    if pd:
                        processors.append(pd)
                    else:
                        logger.warn('*Processor Conf file "%s" not exists',d)


    return datareqs,processors,outputs
Example #15
0
    def _load(self):
        log.debug("reading `%s'", self.path)
        sys.path.insert(0, dirname(self.path))
        try:
            source = open(self.path).read()
            code = compile(source, self.path, 'exec')
            globs = {
                '__builtins__': sys.modules['__builtin__'],
                '__file__': str(self.path),
                '__name__': splitext(basename(self.path))[0],
                '_mk_makefile_': self,
                #TODO: is _mk_ns_ necessary?
                '_mk_ns_': self.ns,
            }
            mod = eval(code, globs)
        finally:
            sys.path.remove(dirname(self.path))

        self.doc = mod.__doc__
        default_tasks = [t for t in self.tasks.values() if t.default]
        if not default_tasks:
            self.default_task = None
        elif len(default_tasks) == 1:
            self.default_task = default_tasks[0]
        else:
            raise IllegalMakefileError("more than one default task: %s"
                                       % ', '.join(map(str, default_tasks)))
Example #16
0
        def __init__(self):

            # http://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
            self.app_dir = join(getenv('XDG_DATA_HOME', expanduser('~/.local/share')), appname)
            if not isdir(self.app_dir):
                makedirs(self.app_dir)

            self.plugin_dir = join(self.app_dir, 'plugins')
            if not isdir(self.plugin_dir):
                mkdir(self.plugin_dir)

            self.home = expanduser('~')

            self.respath = dirname(__file__)

            self.filename = join(getenv('XDG_CONFIG_HOME', expanduser('~/.config')), appname, '%s.ini' % appname)
            if not isdir(dirname(self.filename)):
                makedirs(dirname(self.filename))

            self.config = RawConfigParser()
            try:
                self.config.readfp(codecs.open(self.filename, 'r', 'utf-8'))
            except:
                self.config.add_section('config')

            if not self.get('outdir') or not isdir(self.get('outdir')):
                self.set('outdir', expanduser('~'))
Example #17
0
	def test_write1(self):
		fname = '___testwrite1__99001324.scn'
		G = scn.read(path.join(path.dirname(__file__),'test1.scn'),directed=False)
		
		self.assertEqual(G.node_data('a')[0],'1')
		self.assertEqual(G.node_data('a')[1],'1')
		
		# write all attributes back
		scn.write(	G,fname,num_node_props=2,num_edge_props=2,
					node_data_fxn=lambda idx,nobj,data: None if data == None else tuple([a for a in data]),
					edge_data_fxn=lambda idx,n1,n2,data: tuple([a for a in data]))
		G = scn.read(fname,directed=False)
		
		self.assertEqual(len(G),5)
		self.assertEqual(G.size(),5)
		
		self.assertNotEqual(G.node_data('a'),None)
		self.assertEqual(G.node_data('a')[0],'1')
		self.assertEqual(G.edge_data('a','b')[0],'X')
		
		# write with no edge attributes
		G = scn.read(path.join(path.dirname(__file__),'test1.scn'),directed=False)
		scn.write(	G,fname,num_node_props=2,num_edge_props=2,
					node_data_fxn=lambda idx,nobj,data: None if data == None else tuple([a for a in data]),
					edge_data_fxn=lambda idx,n1,n2,data: None)
		G = scn.read(fname,directed=False)
		
		self.assertEqual(len(G),5)
		self.assertEqual(G.size(),5)
		
		self.assertNotEqual(G.node_data('a'),None)
		self.assertEqual(G.node_data('a')[0],'1')
		self.assertEqual(G.edge_data('a','b'),None)
		
		os.remove(fname)
Example #18
0
    def train(self, sf_pickle = ''):
        # load precomputed descriptors and target values
        self.train_descs = np.loadtxt(dirname(__file__) + '/NNScore/train_descs.csv', delimiter=',', dtype=float)
        self.train_target = np.loadtxt(dirname(__file__) + '/NNScore/train_target.csv', delimiter=',', dtype=float)
        self.test_descs = np.loadtxt(dirname(__file__) + '/NNScore/test_descs.csv', delimiter=',', dtype=float)
        self.test_target = np.loadtxt(dirname(__file__) + '/NNScore/test_target.csv', delimiter=',', dtype=float)

        n_dim = (~((self.train_descs == 0).all(axis=0) | (self.train_descs.min(axis=0) == self.train_descs.max(axis=0)))).sum()

        # number of network to sample; original implementation did 1000, but 100 give results good enough.
        n = 1000
        trained_nets = Parallel(n_jobs=self.n_jobs)(delayed(_parallel_helper)(neuralnetwork([n_dim,5,1]), 'fit', self.train_descs, self.train_target, train_alg='tnc', maxfun=1000) for i in xrange(n))
        # get 20 best
        best_idx = np.array([net.score(self.test_descs, self.test_target.flatten()) for net in trained_nets]).argsort()[::-1][:20]
        self.model = ensemble_model([trained_nets[i] for i in best_idx])

        r2 = self.model.score(self.test_descs, self.test_target)
        r = np.sqrt(r2)
        print 'Test set: R**2:', r2, ' R:', r

        r2 = self.model.score(self.train_descs, self.train_target)
        r = np.sqrt(r2)
        print 'Train set: R**2:', r2, ' R:', r

        if sf_pickle:
            return self.save(sf_pickle)
        else:
            return self.save('NNScore.pickle')
Example #19
0
File: app.py Project: lihuaijun/vj4
  def __init__(self):
    super(Application, self).__init__(
        handler_factory=functools.partial(web.RequestHandlerFactory, access_log=None),
        debug=options.options.debug)
    globals()[self.__class__.__name__] = lambda: self  # singleton

    # Initialize components.
    translation_path = path.join(path.dirname(__file__), 'locale')
    locale.load_translations(translation_path)
    self.loop.run_until_complete(asyncio.gather(tools.ensure_all_indexes(), bus.init()))
    smallcache.init()

    # Load views.
    from vj4.view import contest
    from vj4.view import discussion
    from vj4.view import home
    from vj4.view import judge
    from vj4.view import main
    from vj4.view import problem
    from vj4.view import record
    from vj4.view import training
    from vj4.view import user
    from vj4.view import i18n
    if options.options.static:
      self.router.add_static('/', path.join(path.dirname(__file__), '.uibuild'), name='static')
Example #20
0
def get_json_model_Ydata(json_model_fn, level='Run', verbose=VERB['none']):
    """
    Reads a json model, then search in the base_dir to return the data
    or set of data to which the model should be applied

    """

    # json file like .../models/something.json,
    basedir_to_search = osp.dirname(osp.dirname(json_model_fn))
    if verbose <= VERB['info']:
        print('base dir', basedir_to_search)
        print('json_model_fn', json_model_fn)

    dict_level = get_json_dict(json_model_fn, level)

    if level == 'Run':
        returned_list = get_runs_data(basedir_to_search, dict_level)

    else:
        raise NotImplementedError("Level {} not yet implemented".format(level))

#    if level == 'Session':
#        returned_list = get_sessions_data(basedir_to_search, dict_level)

    return returned_list
Example #21
0
	def to_xml(self):
		q = Element('dictionary')
		q.attrib["value"] = basename(dirname(self.dct))
		
		r = SubElement(q, "revision", 
					value=str(self._svn_revision(dirname(self.dct))),
					timestamp=datetime.utcnow().isoformat(),
					checksum=self._checksum(open(self.dct, 'rb').read()))
		
		s = SubElement(r, 'corpus')
		s.attrib["value"] = basename(self.fn)
		s.attrib["checksum"] = self._checksum(open(self.fn, 'rb').read())
		
		SubElement(r, 'percent').text = "%.2f" % self.get_coverage()
		SubElement(r, 'total').text = str(len(self.get_words()))
		SubElement(r, 'known').text = str(len(self.get_known_words()))
		SubElement(r, 'unknown').text = str(len(self.get_unknown_words()))
		
		wrx = re.compile(r"\^(.*)/")
		s = SubElement(r, 'top')
		for word, count in self.get_top_unknown_words():
			SubElement(s, 'word', count=str(count)).text = wrx.search(word).group(1)
		
		s = SubElement(r, 'system')
		SubElement(s, 'time').text = "%.4f" % self.timer
		
		return ("coverage", etree.tostring(q))
Example #22
0
    def build_all(self):
        filtered_archs = self.filtered_archs
        print("Build {} for {} (filtered)".format(
            self.name,
            ", ".join([x.arch for x in filtered_archs])))
        for arch in self.filtered_archs:
            self.build(arch)

        name = self.name
        if self.library:
            print("Create lipo library for {}".format(name))
            if not name.startswith("lib"):
                name = "lib{}".format(name)
            static_fn = join(self.ctx.dist_dir, "lib", "{}.a".format(name))
            ensure_dir(dirname(static_fn))
            print("Lipo {} to {}".format(self.name, static_fn))
            self.make_lipo(static_fn)
        if self.libraries:
            print("Create multiple lipo for {}".format(name))
            for library in self.libraries:
                static_fn = join(self.ctx.dist_dir, "lib", basename(library))
                ensure_dir(dirname(static_fn))
                print("  - Lipo-ize {}".format(library))
                self.make_lipo(static_fn, library)
        print("Install include files for {}".format(self.name))
        self.install_include()
        print("Install frameworks for {}".format(self.name))
        self.install_frameworks()
        print("Install sources for {}".format(self.name))
        self.install_sources()
        print("Install {}".format(self.name))
        self.install()
Example #23
0
    def test_tree_determinism(self):
        """Check that the tree is drawn identically upon receiving the same
        dataset with no parameter changes."""
        n_tries = 10
        # Make sure the tree are deterministic for iris
        scene_nodes = []
        for _ in range(n_tries):
            self.send_signal(self.widget.Inputs.tree, self.signal_data)
            scene_nodes.append([n.pos() for n in self.get_visible_squares()])
        for node_row in zip(*scene_nodes):
            self.assertTrue(
                self._check_all_same(node_row),
                "The tree was not drawn identically in the %d times it was "
                "sent to widget after receiving the iris dataset." % n_tries
            )

        # Make sure trees are deterministic with data where some variables have
        # the same entropy
        data_same_entropy = Table(path.join(
            path.dirname(path.dirname(path.dirname(__file__))), "tests",
            "datasets", "same_entropy.tab"))
        data_same_entropy = TreeLearner()(data_same_entropy)
        scene_nodes = []
        for _ in range(n_tries):
            self.send_signal(self.widget.Inputs.tree, data_same_entropy)
            scene_nodes.append([n.pos() for n in self.get_visible_squares()])
        for node_row in zip(*scene_nodes):
            self.assertTrue(
                self._check_all_same(node_row),
                "The tree was not drawn identically in the %d times it was "
                "sent to widget after receiving a dataset with variables with "
                "same entropy." % n_tries
            )
Example #24
0
 def loadController(self, filename):
     fName = splitext(filename)
     if(fName[1] == ".py"):
         self.execfile(filename)
         for x in dir():
             if(isinstance(eval(x),type)):
                 if(issubclass(eval(x),IRobotController) and x != "IRobotController"):
                     for i in inspect.getmembers(__import__(x)):
                         if inspect.isclass(i[1]):
                             if (issubclass(i[1],IRobotController) and i[0]!="IRobotController"):
                                 r = i[1]()
                                 return r
                 elif(hasattr(eval(x),'controlRobot')):
                     pcw = PolledControllerWrapper(eval(x)())
                     for k,v in locals().items():
                         if hasattr(v,'__module__'):
                             if hasattr(sys.modules[v.__module__],'__file__'):
                                 if dirname(sys.modules[v.__module__].__file__) not in [getcwd(),""]:
                                     globals()[k] = v
                         elif k in sys.modules.keys():
                             if hasattr(sys.modules[k],'__file__'):
                                 if dirname(sys.modules[k].__file__) not in [getcwd(),""]:
                                     globals()[k] = v
                     return pcw
     else:
         raise RuntimeError("File given is not a Python file")
Example #25
0
def scraper(config_file, queue):
    try:
        # START
        sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
        short_name = get_output_json_name(config_file)
        output_file = os.path.join(OUTPUT_JSON_PATH, short_name)
        suspect_urls_log = os.path.join(LOGS_PATH, short_name.replace(".json", "_error_urls.log"))
        log_file = os.path.join(LOGS_PATH, short_name.replace(".json", ".log"))
        limit = LIMIT if LIMIT else -1
        #print "Starting : %s " % (config_file)
        command = "scrapy crawl web_scraper -a config_file=%s -s LOG_FILE=%s -a suspect_urls_log=%s -o %s -a summary=default -a limit=%s " % (
            config_file, log_file, suspect_urls_log, output_file, limit)
        print command + "\n"
        if S3:
            command = "%s -a compress=%s -a s3=%s" % (command, COMPRESS_PATH, S3)
        collector = StatsCollector(config_file=config_file, output_file=output_file)
        queue.put(collector)
        call(command, shell=True)
        # get data generate from sub process
        summary_path = collector.default_path()
        with open(summary_path) as summary_file:
            json_value = json.load(summary_file)
            collector = StatsCollector.load_from_json(json_value=json_value)
        queue.put(collector)

    except:
        traceback.print_exc()
        print "Error when processing the job for %s" % (config_file)
        pass
Example #26
0
 def setUp(self):
     # Every test needs access to the request factory.
     self.url = reverse('receive_inbound_email')
     self.factory = RequestFactory()
     self.parser = MailgunRequestParser()
     self.test_upload_txt = path.join(path.dirname(__file__), 'test_files/test_upload_file.txt')
     self.test_upload_png = path.join(path.dirname(__file__), 'test_files/test_upload_file.jpg')
Example #27
0
def getIcons(filename=None):
    """Creates wxBitmaps ``self.icon`` and ``self.iconAdd`` based on the the image.
    The latter has a plus sign added over the top.

    png files work best, but anything that wx.Image can import should be fine
    """
    icons = {}
    if filename is None:
        filename = join(dirname(abspath(__file__)), 'base.png')
        
    # get the low-res version first
    im = Image.open(filename)
    icons['24'] = pilToBitmap(im, scaleFactor=0.5)
    icons['24add'] = pilToBitmap(im, scaleFactor=0.5)
    # try to find a 128x128 version
    filename128 = filename[:-4]+'128.png'
    if False: # TURN OFF FOR NOW os.path.isfile(filename128):
        im = Image.open(filename128)
    else:
        im = Image.open(filename)
    icons['48'] = pilToBitmap(im)
    # add the plus sign
    add = Image.open(join(dirname(abspath(__file__)), 'add.png'))
    im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)
    # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],
    #               im.size[0], im.size[1]], mask=add)
    icons['48add'] = pilToBitmap(im)

    return icons
Example #28
0
def main():
    """Main method.
    """
    global TCloc,volume,keyspace,keylen,nptot,permutation,passphrase,done,nperm
    if(len(sys.argv) < 3):
        print "Usage: johnny.py [path to TrueCrypt volume] [set of possible first characters] [set of possible second characters] ...\n"
        print "Recover a TrueCrypt password based on a rememberd mnemonic. Pass johnny.py a space-delineated list of strings each"
        print "representing the set of possible characters for each character in the password. If your passphrase contains a space, use quotes ;-)"
        sys.exit(0)
    TCloc = ((subprocess.Popen(['which','truecrypt'],stdout=subprocess.PIPE)).communicate())[0][0:-1]
    volume=sys.argv[1]
    keyspace = sys.argv[2:]
    keylen = len(keyspace)
    nptot = reduce(lambda x, y: x*y,[len(chars) for chars in keyspace])
    permutation = ['' for x in keyspace]
    done = 0
    nperm = 0

    if (not path.exists(path.dirname(volume))):
        print('File ' + volume + ': no such file or directory.')
    elif (not path.exists(volume)):
        print('File ' + volume + ': not found in ' + path.dirname(volume))
    else:
        passfile = open('pass.txt','w')
        print("Trying each of the %d possible keys..." % nptot)
        if(tryKey(0)):
            print('Successfully determined TrueCrypt password: '******'Did not succeed in finding TrueCrypt password in specified keyspace.')
Example #29
0
def filter_move(path_old, path_new):
    if dirname(path_old) == dirname(path_new):
        label = "rename"
        path_new = basename(path_new)
    else:
        label = "move"
    return "%s %s to %s" % (T.green(label), T.underline(path_old), T.underline(path_new)), 0
Example #30
0
    def _do_discovery(self, argv, Loader=None):
        """Upstream _do_discovery doesn't find our load_tests() functions."""

        loader = TestLoader() if Loader is None else Loader()
        topdir = abspath(dirname(dirname(__file__)))
        tests = loader.discover(join(topdir, 'numba/tests'), '*.py', topdir)
        self.test = unittest.TestSuite(tests)
Example #31
0
 def output_file_path(self):
     """path of file where output variables will be written"""
     path = str(self._serializer.data[KEY_OUTPUT_FILE])
     if not pth.isabs(path):
         path = pth.normpath(pth.join(pth.dirname(self._conf_file), path))
     return path
Example #32
0
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <*****@*****.**>
#          Joan Massich <*****@*****.**>
#
# License: BSD Style.

import os
import os.path as op
import numpy as np
from distutils.version import LooseVersion

from ...utils import (_fetch_file, verbose, _TempDir, _check_pandas_installed,
                      _on_missing)
from ..utils import _get_path

AGE_SLEEP_RECORDS = op.join(op.dirname(__file__), 'age_records.csv')
TEMAZEPAM_SLEEP_RECORDS = op.join(op.dirname(__file__),
                                  'temazepam_records.csv')

TEMAZEPAM_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/ST-subjects.xls'  # noqa: E501
TEMAZEPAM_RECORDS_URL_SHA1 = 'f52fffe5c18826a2bd4c5d5cb375bb4a9008c885'

AGE_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/SC-subjects.xls'  # noqa: E501
AGE_RECORDS_URL_SHA1 = '0ba6650892c5d33a8e2b3f62ce1cc9f30438c54f'

sha1sums_fname = op.join(op.dirname(__file__), 'SHA1SUMS')


def _fetch_one(fname, hashsum, path, force_update, base_url):
    # Fetch the file
    url = base_url + '/' + fname
# SPDX-License-Identifier: MIT

"""A setuptools based setup module.

See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""

from setuptools import setup, find_packages

# To use a consistent encoding
from codecs import open
from os import path

here = path.abspath(path.dirname(__file__))

# Get the long description from the README file
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
    long_description = f.read()

setup(
    name="adafruit-circuitpython-sht4x",
    use_scm_version=True,
    setup_requires=["setuptools_scm"],
    description="Python library for Sensirion SHT4x temperature and humidity sensors",
    long_description=long_description,
    long_description_content_type="text/x-rst",
    # The project's main homepage.
    url="https://github.com/adafruit/Adafruit_CircuitPython_SHT4x",
    # Author details
Example #34
0
# GUI for sekiro-corrupt-save-fix
# https://github.com/z0gSh1u/sekiro-corrupt-save-fix

import os.path as path
dirname__ = path.dirname(path.abspath(__file__))

from tkinter import *
from tkinter import filedialog, ttk, messagebox
from fix import main as fix_main

root = Tk()
root.title('Sekiro Corrupt Save Fix')

sl2_path = ''
slot_index = 0

cbo_slot = ttk.Combobox(root)
lbl_path = lbl_path = Label(root, {
    'text': 'No S0000.sl2 selected.',
    'fg': 'red'
})
lbl_status = Label(root, {'text': 'Nothing now.'})
lbl_status_text = ''


def ask_open_file():
    global sl2_path, lbl_path
    temp_path = filedialog.askopenfilename(title='Open S0000.sl2',
                                           filetypes=[('sl2 file', '*.sl2')])
    if len(temp_path) > 0:
        sl2_path = temp_path
Example #35
0
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.

from __future__ import print_function, absolute_import
import json
import os.path as osp
from jupyterlab_server.process import which
from jupyterlab_server.process_app import ProcessApp

HERE = osp.dirname(osp.realpath(__file__))

class NodeApp(ProcessApp):

    def get_command(self):
        """Get the command and kwargs to run.
        """
        # Run the node script with command arguments.
        config = dict(baseUrl=self.connection_url, token=self.token)

        with open(osp.join(HERE, 'config.json'), 'w') as fid:
            json.dump(config, fid)

        # cmd = [which('node'),
        #        'index.js', '--jupyter-config-data=./config.json']
        cmd = ["time"]
        return cmd, dict(cwd=HERE)


if __name__ == '__main__':
    NodeApp.launch_instance()
import boto3
import re
import sys
import json
import botocore
import os
from rllab.misc import console
from string import Template
import os.path as osp

CHESTER_DIR = osp.dirname(__file__)
ACCESS_KEY = os.environ["AWS_ACCESS_KEY"]
ACCESS_SECRET = os.environ["AWS_ACCESS_SECRET"]
S3_BUCKET_NAME = os.environ["RLLAB_S3_BUCKET"]

ALL_REGION_AWS_SECURITY_GROUP_IDS = {}
ALL_REGION_AWS_KEY_NAMES = {}

CONFIG_TEMPLATE = Template("""
import os.path as osp
import os

PROJECT_PATH = osp.abspath(osp.join(osp.dirname(__file__), '..'))

AWS_NETWORK_INTERFACES = []

MUJOCO_KEY_PATH = osp.expanduser("~/.mujoco")

USE_GPU = False

USE_TF = True
Example #37
0
    def _generate_examples(self, label_images: Union[str, dict]):
        """Generate example for each image in the dict."""

        temp_dir = mkdtemp(prefix=self.name)

        if isinstance(label_images, str):
            assert path.isdir(label_images)
            print("label_images:", label_images, ";")
            (
                self._split_examples,
                labels,
            ) = tfds.folder_dataset.image_folder._get_split_label_images(
                path.dirname(label_images)
            )
            self.info.features["label"].names = sorted(labels)
            split_dict = tfds.core.SplitDict(self.name)

            label_images = {label: [] for label in self.info.features["label"].names}

            for split_name, examples in self._split_examples.items():
                split_dict.add(
                    tfds.core.SplitInfo(
                        name=split_name,
                        shard_lengths=[len(examples)],
                    )
                )

                # TODO: This in a generator so it doesn't fill memory
                for example in examples:
                    label_images[example.label].append(example.image_path)
            self.info.update_splits_if_different(split_dict)

        for label, image_paths in label_images.items():
            for image_path in image_paths:
                key = posixpath.sep.join((label, posixpath.basename(image_path)))

                temp_image_filename = os.path.join(
                    temp_dir,
                    key.replace(posixpath.sep, "_").replace(os.path.sep, "_"),
                )

                if BaseImageLabelFolder.session._closed:
                    BaseImageLabelFolder.session = tf.compat.v1.Session()
                    BaseImageLabelFolder.session.__enter__()

                image_decoded = tf.image.decode_jpeg(
                    tf.io.read_file(image_path), channels=3 if self.rgb else 1
                )
                resized = tf.image.resize(image_decoded, self.resolution)
                enc = tf.image.encode_jpeg(
                    tf.cast(resized, tf.uint8),
                    "rgb" if self.rgb else "grayscale",
                    quality=100,
                    chroma_downsampling=False,
                )
                fwrite = tf.io.write_file(tf.constant(temp_image_filename), enc)
                result = BaseImageLabelFolder.session.run(fwrite)

                yield key, {
                    "image/filename": temp_image_filename,
                    "image": temp_image_filename,
                    "label": label,
                }

        print("resolved all files, now you should delete: {!r}".format(temp_dir))
        if not BaseImageLabelFolder.session._closed:
            BaseImageLabelFolder.session.__exit__(None, None, None)
Example #38
0
# SPDX-License-Identifier: Apache-2.0

import os
import sys
import unittest
import keras_segmentation
from os.path import dirname, abspath

sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_image
img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')


class TestSegNet(unittest.TestCase):

    def setUp(self):
        self.model_files = []

    def tearDown(self):
        for fl in self.model_files:
            os.remove(fl)

    def test_segnet(self):
        # From https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/segnet.py
        model = keras_segmentation.models.segnet.segnet(101)
        res = run_image(model, self.model_files, img_path, target_size=(416, 608))
        self.assertTrue(*res)

    def test_vgg_segnet(self):
        # From https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/segnet.py
        model = keras_segmentation.models.segnet.vgg_segnet(101)
Example #39
0
File: setup.py Project: 0xmjk/qgrid
from __future__ import print_function
from setuptools import setup, find_packages, Command
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
from subprocess import check_call
import os
import sys
import platform
from os.path import (
    join, dirname, abspath, exists
)

here = dirname(abspath(__file__))
node_root = join(here, 'js')
is_repo = exists(join(here, '.git'))

npm_path = os.pathsep.join([
    join(node_root, 'node_modules', '.bin'),
                os.environ.get('PATH', os.defpath),
])

from distutils import log
log.set_verbosity(log.DEBUG)
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])

LONG_DESCRIPTION = 'An Interactive Grid for Sorting and Filtering DataFrames in Jupyter Notebook'

def js_prerelease(command, strict=False):
    """decorator for building minified js/css prior to another command"""
Example #40
0
 def input_file_path(self):
     """path of file with input variables of the problem"""
     path = str(self._serializer.data[KEY_INPUT_FILE])
     if not pth.isabs(path):
         path = pth.normpath(pth.join(pth.dirname(self._conf_file), path))
     return path
Example #41
0
def slices(heatmap_image,
	bg_image='/usr/share/mouse-brain-atlases/dsurqec_40micron_masked.nii',
	contour_image='',
	heatmap_threshold=3,
	contour_threshold=3,
	auto_figsize=False,
	invert=False,
	contour_alpha=0.9,
	contour_color='g',
	cmap='autumn_r',
	dimming=0.,
	figure_title='',
	force_reverse_slice_order=True,
	legend=False,
	aspect='portrait',
	save_as='',
	ratio=3/4.,
	slice_spacing=0.4,
	style='light',
	title_color='#BBBBBB',
	position_hspace=0.0,
	positive_only=False,
	negative_only=False,
	):
	"""
	Plot coronal `bg_image` slices at a given spacing, and overlay contours from a list of NIfTI files.

	Parameters
	----------

	heatmap_image : str
		Path to an overlay image to be printed as a heatmap.
	bg_image : str, optional
		Path to the NIfTI image to draw in grayscale as th eplot background.
		This would commonly be some sort of brain template.
	contour_image : str, optional
		Path to an overlay image to be printed as a contour.
	heatmap_threshold : float, optional
		Value at which to threshold the heatmap_image.
	contour_threshold : float, optional
		Value at which to threshold the contour_image.
	auto_figsize : boolean, optional
		Whether to automatically determine the size of the figure.
	invert : boolean, optional
		Whether to automatically invert data matrix values (useful if the image consists of negative values, e.g. when dealing with negative contrast agent CBV scans).
	contour_alpha : float, optional
		Alpha (transparency) with which to draw the contour image.
	contour_color : str, optional
		Color with which to draw the contour image.
	cmap : str, optional
		Colormap with which to draw the heatmap image.
	dimming : float, optional
		Dimming factor, generally between -2 and 2 (-2 increases contrast, 2 decreases it).
		This parameter is passed directly to `nilearn.plotting.plot_anat()`
		Set to 'auto', to use nilearn automagick dimming.
	figure_title : str, optional
		Title for the figure.
	force_reverse_slice_order : bool, optional
		Whether to force the reversal of the slice order.
		This can be done to enforce a visual presentation without having to modify the underlying data (i.e. visualize neurological-order slices in radiological order).
		This option should generally be avoided, ideally one would not obfuscate the data orientation when plotting.
	legend : string, optional
		The legend text.
	aspect : list or {'landscape', 'portrait'}, optional
		Either a list of 2 integers giving the desired number of rows and columns (in this order), or a string, which is either 'landscape' or 'portrait', and which prompts the function to auto-determine the best number of rows and columns given the number of slices and the `scale` attribute.
	save_as : str, optional
		Path under which to save the output figure.
	ratio : float, optional
		The desired ratio between the number of columns and the number of rows in the desired figure layout.
	slice_spacing : float
		Slice spacing in mm.
	style : {'light', 'dark', ''}, optional
		Default SAMRI styling which to apply, set to an empty string to apply no styling and leave it to the environment matplotlibrc.
	title_color : string, optional
		String specifying the desired color for the title.
		This needs to be specified in-function, because the matplotlibrc styling standard does not provide for title color specification [matplotlibrc_title]

	References
	----------

	.. [matplotlibrc_title] https://stackoverflow.com/questions/30109465/matplotlib-set-title-color-in-stylesheet
	"""

	plotting_module_path = path.dirname(path.realpath(__file__))
	if style=='light':
		black_bg=False
		anatomical_cmap = 'binary'
		style_path = path.join(plotting_module_path,'contour_slices.conf')
		plt.style.use([style_path])
	elif style=='dark':
		black_bg=True
		anatomical_cmap = 'binary_r'
		style_path = path.join(plotting_module_path,'contour_slices_dark.conf')
		plt.style.use([style_path])
	else:
		anatomical_cmap = 'binary'
		black_bg=False

	bg_image = path.abspath(path.expanduser(bg_image))
	bg_img = nib.load(bg_image)
	if bg_img.header['dim'][0] > 3:
		bg_img = collapse(bg_img)

	slice_order_is_reversed = 0
	heatmap_image = path.abspath(path.expanduser(heatmap_image))
	heatmap_img = nib.load(heatmap_image)
	heatmap_data = heatmap_img.get_data()
	if heatmap_img.header['dim'][0] > 3:
		img = collapse(heatmap_img)
	if contour_image:
		contour_image = path.abspath(path.expanduser(contour_image))
		contour_img = nib.load(contour_image)
		if contour_img.header['dim'][0] > 3:
			contour_img = collapse(contour_img)
		# We apply thresholding here, rather than when drawing the contours, to ensure the same contour color in all slices.
		# This is possibly a bug in nilearn.
		contour_img = from_img_threshold(contour_img, contour_threshold)

	#we should only be looking at the percentile of the entire data matrix, rather than just the active slice
	slice_row = heatmap_img.affine[1]
	subthreshold_start_slices = 0
	while True:
		for i in np.arange(heatmap_data.shape[1]):
			my_slice = heatmap_data[:,i,:]
			if math.isnan(my_slice.max()) or my_slice.max() < heatmap_threshold:
				subthreshold_start_slices += 1
			else:
				break
		break
	subthreshold_end_slices = 0
	while True:
		for i in np.arange(heatmap_data.shape[1])[::-1]:
			my_slice = heatmap_data[:,i,:]
			if math.isnan(my_slice.max()) or my_slice.max() < heatmap_threshold:
				subthreshold_end_slices += 1
			else:
				break
		break
	slice_thickness = (slice_row[0]**2+slice_row[1]**2+slice_row[2]**2)**(1/2)
	best_guess_negative = abs(min(slice_row[0:3])) > abs(max(slice_row[0:3]))
	slices_number = heatmap_data.shape[list(slice_row).index(max(slice_row))]
	img_min_slice = slice_row[3] + subthreshold_start_slices*slice_thickness
	img_max_slice = slice_row[3] + (slices_number-subthreshold_end_slices)*slice_thickness
	bounds = [img_min_slice,img_max_slice]
	if best_guess_negative:
		slice_order_is_reversed += 1
	else:
		slice_order_is_reversed -= 1

	min_slice = min(bounds)
	max_slice = max(bounds)
	cut_coords = np.arange(min_slice, max_slice, slice_spacing)
	if slice_order_is_reversed > 0:
		cut_coords = cut_coords[::-1]
	if force_reverse_slice_order:
		cut_coords = cut_coords[::-1]

	linewidth = rcParams['lines.linewidth']

	cut_coord_length = len(cut_coords)
	if legend:
		cut_coord_length += 1
	try:
		nrows, ncols = aspect
	except ValueError:
		if aspect == "portrait":
			ncols = np.ceil((cut_coord_length*ratio)**(1/2))
			nrows = np.ceil(cut_coord_length/ncols)
		elif aspect == "landscape":
			nrows = np.ceil((cut_coord_length*ratio)**(1/2))
			ncols = np.ceil(cut_coord_length/nrows)
	# we adjust the respective rc.Param here, because it needs to be set before drawing to take effect
	if legend and cut_coord_length == ncols*(nrows-1)+1:
		rcParams['figure.subplot.bottom'] = np.max([rcParams['figure.subplot.bottom']-0.05,0.])

	if auto_figsize:
		figsize = np.array(rcParams['figure.figsize'])
		figsize_scales = figsize/np.array([float(ncols),float(nrows)])
		figsize_scale = figsize_scales.min()
		fig, ax = plt.subplots(figsize=(ncols*figsize_scale,nrows*figsize_scale),
				nrows=int(nrows), ncols=int(ncols),
				)
	else:
		figsize = np.array(rcParams['figure.figsize'])
		fig, ax = plt.subplots(
				nrows=int(nrows), ncols=int(ncols),
				)
	flat_axes = list(ax.flatten())

	if cmap and heatmap_image:
		cax, kw,vmin,vmax,cmap = _draw_colorbar(heatmap_image,ax,
			threshold=heatmap_threshold,
			aspect=40,
			fraction=0.05,
			anchor=(0,-0.5),
			pad=0.05,
			panchor=(10.0, 0.5),
			shrink=0.99,
			cut_coords = cut_coords,
			positive_only = positive_only,
			negative_only = negative_only,
			cmap=cmap,
			really_draw=True,
			)
	if positive_only:
		vmin = 0
	elif negative_only:
		vmax = 0
	for ix, ax_i in enumerate(flat_axes):
		try:
			display = nilearn.plotting.plot_anat(bg_img,
				axes=ax_i,
				display_mode='y',
				cut_coords=[cut_coords[ix]],
				annotate=False,
				black_bg=black_bg,
				dim=dimming,
				cmap=anatomical_cmap,
				)
		except IndexError:
			ax_i.axis('off')
		else:
			display.add_overlay(heatmap_img,
				threshold=heatmap_threshold,
				cmap=cmap,
				vmin = vmin,vmax = vmax,
				)
			if contour_image:
				display.add_contours(contour_img,
					alpha=contour_alpha,
					levels=[0.8],
					linewidths=linewidth,
					)
			ax_i.set_xlabel('{} label'.format(ix))
			slice_title = '{0:.2f}mm'.format(cut_coords[ix])
			text = ax_i.text(0.5,position_hspace,
				slice_title,
				horizontalalignment='center',
				fontsize=rcParams['font.size'],
				)
	if legend:
		for ix, img in enumerate(imgs):
			insertion_legend, = plt.plot([],[], color=colors[ix], label=legend)
		if cut_coord_length == ncols*(nrows-1)+1:
			plt.legend(loc='upper left',bbox_to_anchor=(-0.1, -0.3))
		else:
			plt.legend(loc='lower left',bbox_to_anchor=(1.1, 0.))

	if figure_title:
		fig.suptitle(figure_title, color=title_color)

	if save_as:
		save_as = path.abspath(path.expanduser(save_as))
		save_dir,_ = os.path.split(save_as)
		try:
			os.makedirs(save_dir)
		except FileExistsError:
			pass
		plt.savefig(save_as)
		plt.close()
Example #42
0
File: setup.py Project: 0xmjk/qgrid
def read_requirements(basename):
    reqs_file = join(dirname(abspath(__file__)), basename)
    with open(reqs_file) as f:
        return [req.strip() for req in f.readlines()]
Example #43
0
import pygame
import math
from os import path
from konfigurationen import *

alle_figuren = pygame.sprite.Group()
alle_knoepfe = pygame.sprite.Group()
alle_bilder = pygame.sprite.Group()
alle_bloons = pygame.sprite.Group()

spiel_ordner = path.dirname(__file__)
bilder_ordner = path.join(spiel_ordner, "Bilder")
extras_ordner = path.join(bilder_ordner, "Extras")
bloons_ordner = path.join(bilder_ordner, "Bloons")

px_pro_frame = px_pro_sekunde / FPS

class Knopf(pygame.sprite.Sprite):
    def __init__(self, konfiguration):
        pygame.sprite.Sprite.__init__(self)
        self.image = pygame.image.load(path.join(extras_ordner, konfiguration["datei"])).convert()
        self.rect = self.image.get_rect()
        self.rect.topleft = konfiguration["position"]
        if "groesse" in konfiguration.keys():
            self.image = pygame.transform.scale(self.image, konfiguration["groesse"])
        if "colorkey" in konfiguration.keys():
            self.image.set_colorkey(konfiguration["colorkey"])
        # self.aktion = konfiguration["aktion"]
        self.gehe_zu_bildschirm = konfiguration["gehe zu bildschirm"]
        alle_figuren.add(self)
        alle_knoepfe.add(self)
Example #44
0
def contour_slices(bg_image, file_template,
	auto_figsize=False,
	invert=False,
	alpha=[0.9],
	colors=['r','g','b'],
	dimming=0.,
	figure_title='',
	force_reverse_slice_order=True,
	legend_template='',
	levels_percentile=[80],
	linewidths=(),
	ratio='portrait',
	save_as='',
	scale=0.4,
	slice_spacing=0.5,
	substitutions=[{},],
	style='light',
	title_color='#BBBBBB',
	):
	"""
	Plot coronal `bg_image` slices at a given spacing, and overlay contours from a list of NIfTI files.

	Parameters
	----------

	bg_image : str
		Path to the NIfTI image to draw in grayscale as th eplot background.
		This would commonly be some sort of brain template.
	file_template : str
		String template giving the path to the overlay stack.
		To create multiple overlays, this template will iteratively be substituted with each of the substitution dictionaries in the `substitutions` parameter.
	auto_figsize : boolean, optional
		Whether to automatically determine the size of the figure.
	invert : boolean, optional
		Whether to automatically invert data matrix values (useful if the image consists of negative values, e.g. when dealing with negative contrast agent CBV scans).
	alpha : list, optional
		List of floats, specifying with how much alpha to draw each contour.
	colors : list, optional
		List of colors in which to plot the overlays.
	dimming : float, optional
		Dimming factor, generally between -2 and 2 (-2 increases contrast, 2 decreases it).
		This parameter is passed directly to `nilearn.plotting.plot_anat()`
		Set to 'auto', to use nilearn automagick dimming.
	figure_title : str, optional
		Title for the figure.
	force_reverse_slice_order : bool, optional
		Whether to force the reversal of the slice order.
		This can be done to enforce a visual presentation without having to modify the underlying data (i.e. visualize neurological-order slices in radiological order).
		This option should generally be avoided, ideally one would not obfuscate the data orientation when plotting.
	legend_template : string, optional
		String template which can be formatted with the dictionaries contained in the `substitutions` parameter.
		The resulting strings will give the legend text.
	levels_percentile : list, optional
		List of integers, specifying at which percentiles of each overlay to draw contours.
	line_widths : tuple, optional
		Tuple of desired contour line widths (one per substitution).
	ratio : list or {'landscape', 'portrait'}, optional
		Either a list of 2 integers giving the desired number of rows and columns (in this order), or a string, which is either 'landscape' or 'portrait', and which prompts the function to auto-determine the best number of rows and columns given the number of slices and the `scale` attribute.
	save_as : str, optional
		Path under which to save the output figure.
		The string may contain formatting fields from the first dictionary in the `substitutions` variable.
	scale : float, optional
		The expected ratio of the slice height divided by the sum of the slice height and width.
		This somewhat complex metric controls the row and column distribution of slices in the 'landscape' and 'portrait' plotting shapes.
	slice_spacing : float
		Slice spacing in mm.
	substitutions : list of dicts, optional
		A list of dictionaries, with keys including all substitution keys found in the `file_template` parameter, and values giving desired substitution values which point the `file_template` string templated to existing filed which are to be included in the overlay stack.
		Such a dictionary is best obtained via `samri.utilities.bids_substitution_iterator()`.
	style : {'light', 'dark', ''}, optional
		Default SAMRI styling which to apply, set to an empty string to apply no styling and leave it to the environment matplotlibrc.
	title_color : string, optional
		String specifying the desired color for the title.
		This needs to be specified in-function, because the matplotlibrc styling standard does not provide for title color specification [matplotlibrc_title]

	References
	----------

	.. [matplotlibrc_title] https://stackoverflow.com/questions/30109465/matplotlib-set-title-color-in-stylesheet
	"""

	if len(substitutions) == 0:
		print('ERROR: You have specified a substitution dictionary of length 0. There needs to be at least one set of substitutions. If your string contains no formatting fields, please pass a list containing an empty dictionary to the `sbstitution parameter` (this is also its default value).')

	plotting_module_path = path.dirname(path.realpath(__file__))
	if style=='light':
		black_bg=False
		anatomical_cmap = 'binary'
		style_path = path.join(plotting_module_path,'contour_slices.conf')
		plt.style.use([style_path])
	elif style=='dark':
		black_bg=True
		anatomical_cmap = 'binary_r'
		style_path = path.join(plotting_module_path,'contour_slices_dark.conf')
		plt.style.use([style_path])
	else:
		anatomical_cmap = 'binary'
		black_bg=False

	bg_image = path.abspath(path.expanduser(bg_image))
	bg_img = nib.load(bg_image)
	if bg_img.header['dim'][0] > 3:
		bg_data = bg_img.get_data()
		ndim = 0
		for i in range(len(bg_img.header['dim'])-1):
			current_dim = bg_img.header['dim'][i+1]
			if current_dim == 1:
				break
			ndim += 1
		bg_img.header['dim'][0] = ndim
		bg_img.header['pixdim'][ndim+1:] = 0
		bg_data = bg_data.T[0].T
		bg_img = nib.nifti1.Nifti1Image(bg_data, bg_img.affine, bg_img.header)

	imgs = []
	bounds = []
	levels = []
	slice_order_is_reversed = 0
	for substitution in substitutions:
		filename = file_template.format(**substitution)
		filename = path.abspath(path.expanduser(filename))
		img = nib.load(filename)
		data = img.get_data()
		if img.header['dim'][0] > 3:
			img = collapse(img)
		if invert:
			data = -data
			img = nib.nifti1.Nifti1Image(data, img.affine, img.header)
		#we should only be looking at the percentile of the entire data matrix, rather than just the active slice
		for level_percentile in levels_percentile:
			level = np.percentile(data,level_percentile)
			levels.append(level)
		slice_row = img.affine[1]
		subthreshold_start_slices = 0
		while True:
			for i in np.arange(data.shape[1]):
				my_slice = data[:,i,:]
				if my_slice.max() < min(levels):
					subthreshold_start_slices += 1
				else:
					break
			break
		subthreshold_end_slices = 0
		while True:
			for i in np.arange(data.shape[1])[::-1]:
				my_slice = data[:,i,:]
				if my_slice.max() < min(levels):
					subthreshold_end_slices += 1
				else:
					break
			break
		slice_thickness = (slice_row[0]**2+slice_row[1]**2+slice_row[2]**2)**(1/2)
		best_guess_negative = abs(min(slice_row[0:3])) > abs(max(slice_row[0:3]))
		slices_number = data.shape[list(slice_row).index(max(slice_row))]
		img_min_slice = slice_row[3] + subthreshold_start_slices*slice_thickness
		img_max_slice = slice_row[3] + (slices_number-subthreshold_end_slices)*slice_thickness
		bounds.extend([img_min_slice,img_max_slice])
		if best_guess_negative:
			slice_order_is_reversed += 1
		else:
			slice_order_is_reversed -= 1
		imgs.append(img)

	if len(alpha) == 1:
		alpha = alpha * len(imgs)
	min_slice = min(bounds)
	max_slice = max(bounds)
	cut_coords = np.arange(min_slice, max_slice, slice_spacing)
	if slice_order_is_reversed > 0:
		cut_coords = cut_coords[::-1]
	if force_reverse_slice_order:
		cut_coords = cut_coords[::-1]

	if not linewidths:
		linewidths = (rcParams['axes.linewidth'],)*len(imgs)

	if len(cut_coords) > 3:
		cut_coord_length = len(cut_coords)
		if legend_template:
			cut_coord_length += 1
		try:
			nrows, ncols = ratio
		except ValueError:
			if ratio == "portrait":
				ncols = np.floor(cut_coord_length**scale)
				nrows = np.ceil(cut_coord_length/float(ncols))
			elif ratio == "landscape":
				nrows = np.floor(cut_coord_length**(scale))
				ncols = np.ceil(cut_coord_length/float(nrows))
		# we adjust the respective rc.Param here, because it needs to be set before drawing to take effect
		if legend_template and cut_coord_length == ncols*(nrows-1)+1:
			rcParams['figure.subplot.bottom'] = np.max([rcParams['figure.subplot.bottom']-0.05,0.])

		if auto_figsize:
			figsize = np.array(rcParams['figure.figsize'])
			figsize_scales = figsize/np.array([float(ncols),float(nrows)])
			figsize_scale = figsize_scales.min()
			fig, ax = plt.subplots(figsize=(ncols*figsize_scale,nrows*figsize_scale),
					nrows=int(nrows), ncols=int(ncols),
					)
		else:
			fig, ax = plt.subplots(
					nrows=int(nrows), ncols=int(ncols),
					)
		flat_axes = list(ax.flatten())
		for ix, ax_i in enumerate(flat_axes):
			try:
				display = nilearn.plotting.plot_anat(bg_img,
					axes=ax_i,
					display_mode='y',
					cut_coords=[cut_coords[ix]],
					annotate=False,
					black_bg=black_bg,
					dim=dimming,
					cmap=anatomical_cmap,
					)
			except IndexError:
				ax_i.axis('off')
			else:
				for img_ix, img in enumerate(imgs):
					color = colors[img_ix]
					display.add_contours(img,
							alpha=alpha[img_ix],
							colors=[color],
							levels=levels[img_ix],
							linewidths=(linewidths[img_ix],),
							)

		if legend_template:
			for ix, img in enumerate(imgs):
				insertion_legend, = plt.plot([],[], color=colors[ix], label=legend_template.format(**substitutions[ix]))
			if cut_coord_length == ncols*(nrows-1)+1:
				plt.legend(loc='upper left',bbox_to_anchor=(-0.1, -0.3))
			else:
				plt.legend(loc='lower left',bbox_to_anchor=(1.1, 0.))
	else:
		display = nilearn.plotting.plot_anat(bg_img,
			display_mode='y',
			cut_coords=cut_coords,
			black_bg=black_bg,
			)
		for ix, img in enumerate(imgs):
			color = colors[ix]
			display.add_contours(img, levels=levels, colors=[color])

	if figure_title:
		fig.suptitle(figure_title, color=title_color)

	if save_as:
		save_as = save_as.format(**substitutions[0])
		save_as = path.abspath(path.expanduser(save_as))
		save_dir,_ = os.path.split(save_as)
		try:
			os.makedirs(save_dir)
		except FileExistsError:
			pass
		plt.savefig(save_as,
			#facecolor=fig.get_facecolor(),
			)
		plt.close()
Example #45
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date    : 2018-05-02 16:48:19
# @Author  : Hao Li ([email protected])
# @Link    : https://github.com/SAmmer0
# @Version : $Id$

from sys import path as sys_path
from os.path import dirname

from tdtools import trans_date
from pitdata.const import CALCULATION_FOLDER_PATH, DataType
from pitdata import DataDescription
if CALCULATION_FOLDER_PATH not in sys_path:
    sys_path.append(dirname(CALCULATION_FOLDER_PATH))
from quotes_template import quotes_factory


dd = DataDescription(quotes_factory('TurnoverValue'), trans_date('2018-05-02'),
                     DataType.PANEL_NUMERIC, dep=['UNIVERSE'], desc='成交金额(元)')
Example #46
0
from os import path
from tardis.default_settings.storage import DEFAULT_STORAGE_BASE_DIR

# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = DEFAULT_STORAGE_BASE_DIR

# Used by "django collectstatic"
STATIC_ROOT = path.abspath(path.join(path.dirname(__file__), '../..', 'static'))

# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
# ADMIN_MEDIA_PREFIX = STATIC_URL + '/admin/'
def get_admin_media_path():
    import pkgutil
    package = pkgutil.get_loader("django.contrib.admin")
    return path.join(package.filename, 'static', 'admin')

ADMIN_MEDIA_STATIC_DOC_ROOT = get_admin_media_path()

STATICFILES_DIRS = (
    ('admin', ADMIN_MEDIA_STATIC_DOC_ROOT),
)

# Use cachable copies of static files
STATICFILES_STORAGE = \
    'django.contrib.staticfiles.storage.CachedStaticFilesStorage'

STATICFILES_FINDERS = (
    'django.contrib.staticfiles.finders.FileSystemFinder',
Example #47
0
def local_path(filename):
    return path.join(
        path.abspath(path.dirname(__file__)),
        filename)
Example #48
0
def expand_modules(files_or_modules, black_list, black_list_re):
    """take a list of files/modules/packages and return the list of tuple
    (file, module name) which have to be actually checked
    """
    result = []
    errors = []
    for something in files_or_modules:
        if basename(something) in black_list:
            continue
        if _basename_in_blacklist_re(basename(something), black_list_re):
            continue
        if exists(something):
            # this is a file or a directory
            try:
                modname = ".".join(modutils.modpath_from_file(something))
            except ImportError:
                modname = splitext(basename(something))[0]
            if isdir(something):
                filepath = join(something, "__init__.py")
            else:
                filepath = something
        else:
            # suppose it's a module or package
            modname = something
            try:
                filepath = modutils.file_from_modpath(modname.split("."))
                if filepath is None:
                    continue
            except (ImportError, SyntaxError) as ex:
                # The SyntaxError is a Python bug and should be
                # removed once we move away from imp.find_module: http://bugs.python.org/issue10588
                errors.append({"key": "fatal", "mod": modname, "ex": ex})
                continue

        filepath = normpath(filepath)
        modparts = (modname or something).split(".")

        try:
            spec = modutils.file_info_from_modpath(modparts, path=sys.path)
        except ImportError:
            # Might not be acceptable, don't crash.
            is_namespace = False
            is_directory = isdir(something)
        else:
            is_namespace = modutils.is_namespace(spec)
            is_directory = modutils.is_directory(spec)

        if not is_namespace:
            result.append({
                "path": filepath,
                "name": modname,
                "isarg": True,
                "basepath": filepath,
                "basename": modname,
            })

        has_init = (
            not (modname.endswith(".__init__") or modname == "__init__")
            and basename(filepath) == "__init__.py")

        if has_init or is_namespace or is_directory:
            for subfilepath in modutils.get_module_files(
                    dirname(filepath), black_list, list_all=is_namespace):
                if filepath == subfilepath:
                    continue
                if _basename_in_blacklist_re(basename(subfilepath),
                                             black_list_re):
                    continue

                modpath = _modpath_from_file(subfilepath, is_namespace)
                submodname = ".".join(modpath)
                result.append({
                    "path": subfilepath,
                    "name": submodname,
                    "isarg": False,
                    "basepath": filepath,
                    "basename": modname,
                })
    return result, errors
Example #49
0
def cli(indir,
        odir=None, 
        tmp_dir=None,
        reformatted_name=True,
        force=False,
        # force_prokka=False,
        prokka_p=" `which prokka`",
        thread=5,
        all_ids=None,
thread_per_prokka=0,
dry_run=False
        ):
    """
    It would use the downloaded protein first.
    If it doesn't exist, it will perform prokka to predict genes.
    :param indir: ./genbank
    :param odir: ./modified_data/genome_protein_files/
    :param tmp_dir: ./modified_data/prokka_o/
    :param reformatted_name: reformatted the output file or not
    :param force: overlap the output file or not
    :param force_prokka: deprecated
    :param prokka_p: the exec path of prokka. Default using `which prokka` to retrieve it path
    :return:
    """

    # process the directory
    if odir is None:
        odir = './genome_protein_files'
    if tmp_dir is None:
        tmp_dir = join(odir, 'tmp')
        # tmp_dir = join(odir, 'tmp')
    if not exists(tmp_dir):
        os.makedirs(tmp_dir, exist_ok=True)

    tqdm.write(f'iterating the {indir}')
    
    all_dir = [_
        for _ in tqdm(glob(join(indir, '**', 'GC*', '*.fna.gz'))
                        )
        ]
            

    if all_ids:
        all_dir = [_ for _ in all_dir if basename(dirname(_)) in all_ids]
        found = [basename(dirname(_)) for _ in all_dir]
        not_found = list(set(all_ids).difference(set(found)))
        if not_found:
            tqdm.write(f"{len(not_found)} are not found!")
                #    if basename(dirname(_)) in all_ids]


    tqdm.write("gunzip fna file and collect jobs")
    jobs = []
    jobs2 = []
    for p_dir in tqdm(all_dir):
        p_dir = dirname(p_dir)
        p_files = glob(join(p_dir, '*.faa.gz'))
        ofile = join(odir, basename(p_dir)) + '.faa'
        if exists(ofile) and not force:
            # if the output faa exists and not force, pass  it
            continue
        # if not p_files:
        # it haven't protein files
        # use prokka to predice gene
        fna_file = glob(join(p_dir, '*.fna.gz'))[0]
        new_fna = fna_file.replace('.gz', '')
        if not exists(new_fna):
            run_cmd(f'gunzip -d -c {fna_file} > {new_fna}')
        sample_name = basename(dirname(fna_file))
        prokka_cmd = get_faa_from_prokka_r(infile=new_fna,
                                           odir=tmp_dir,
                                           sample_name=sample_name,
                                           prokka_p=prokka_p,
                                           return_cmd=True,
                                           thread_per_prokka=thread_per_prokka,
                                           )
        if exists(prokka_cmd):
            # output is a file instead of cmd.
            prokka_ofile = prokka_cmd
            jobs2.append(f"ln -s `realpath {prokka_ofile}` {ofile}")
            continue
        else:
            jobs.append(prokka_cmd)

        # collect prokka output file
        prokka_ofile = f"{tmp_dir}/{sample_name}/{sample_name}.faa"
        jobs2.append(f'ln -s `realpath {prokka_ofile}` {ofile}')

        # if p_file.endswith('.gz') and exists(prokka_ofile):
        #     run_cmd(f'gunzip -d -c {p_file} >{ofile}')
        # elif  exists(prokka_ofile):
        #     run_cmd(f'cat {p_file} >{ofile}')
        # else:
        #     # print(p_file,ofile)
        #     pass
    if dry_run:
        with open('./cmds','w') as f1:
            f1.write('\n'.join(jobs))
        return
    else:
        tqdm.write('run prokka')
        with mp.Pool(processes=thread) as tp:
            r = list(tqdm(tp.imap(run_cmd, jobs), total=len(jobs)))

    tqdm.write('run soft link to save space')
    for j in tqdm(jobs2):
        run_cmd(j)
Example #50
0
def testdata_expts(
    defaultdb='testdb1',
    default_acfgstr_name_list=['default:qindex=0:10:4,dindex=0:20'],
    default_test_cfg_name_list=['default'],
    a=None,
    t=None,
    p=None,
    qaid_override=None,
    daid_override=None,
    initial_aids=None,
    use_cache=None,
    dbdir=None,
    ibs=None,
):
    r"""
    Use this if you want data from an experiment.
    Command line interface to quickly get testdata for test_results.

    Command line flags can be used to specify db, aidcfg, pipecfg, qaid
    override, daid override (and maybe initial aids).


    CommandLine:
        python -m wbia.init.main_helpers testdata_expts

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.other.dbinfo import *  # NOQA
        >>> import wbia
        >>> ibs, testres = wbia.testdata_expts(defaultdb='pz_mtest',
        >>>                                     a='timectrl:qsize=2',
        >>>                                     t='invar:ai=[false],ri=false',
        >>>                                     use_cache=false)
        >>> print('testres = %r' % (testres,))
    """
    if ut.VERBOSE:
        logger.info('[main_helpers] testdata_expts')
    import wbia
    from wbia.expt import harness

    if a is not None:
        default_acfgstr_name_list = a
    if t is not None and p is None:
        p = t
    if p is not None:
        default_test_cfg_name_list = p

    if isinstance(default_acfgstr_name_list, six.string_types):
        default_acfgstr_name_list = [default_acfgstr_name_list]
    if isinstance(default_test_cfg_name_list, six.string_types):
        default_test_cfg_name_list = [default_test_cfg_name_list]

    # from wbia.expt import experiment_helpers
    if dbdir is not None:
        dbdir = ut.truepath(dbdir)
    if ibs is None:
        ibs = wbia.opendb(defaultdb=defaultdb, dbdir=dbdir)
    acfg_name_list = ut.get_argval(
        ('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstr_name_list
    )
    test_cfg_name_list = ut.get_argval(
        ('-t', '-p'), type_=list, default=default_test_cfg_name_list
    )
    daid_override = ut.get_argval(
        ('--daid-override', '--daids-override'), type_=list, default=daid_override
    )
    qaid_override = ut.get_argval(
        ('--qaid', '--qaids-override', '--qaid-override'),
        type_=list,
        default=qaid_override,
    )

    # Hack a cache here
    use_bulk_cache = not ut.get_argflag(('--nocache', '--nocache-hs'))
    use_bulk_cache &= ut.is_developer()
    if use_cache is not None:
        use_bulk_cache &= use_cache
    use_bulk_cache &= False
    # use_bulk_cache = True
    if use_bulk_cache:
        from os.path import dirname

        cache_dir = ut.ensuredir((dirname(ut.get_module_dir(wbia)), 'BULK_TESTRES'))
        _cache_wrp = ut.cached_func('testreslist', cache_dir=cache_dir)
        _load_testres = _cache_wrp(harness.run_expt)
    else:
        _load_testres = harness.run_expt

    testres = _load_testres(
        ibs,
        acfg_name_list,
        test_cfg_name_list,
        qaid_override=qaid_override,
        daid_override=daid_override,
        initial_aids=initial_aids,
        use_cache=use_cache,
    )
    # testres = test_result.combine_testres_list(ibs, testres_list)

    if ut.VERBOSE:
        logger.info(testres)
    return ibs, testres
"""
created on:2017/10/6
author:DilicelSten
target:从薛之谦的50首歌词中切词并统计词频制作词云
finished on:2017/10/6
"""
import jieba.analyse
import os
from os import path
import jieba
import matplotlib.pyplot as plt
from scipy.misc import imread
from wordcloud import WordCloud,ImageColorGenerator


d = path.dirname(__file__)  # 为代码路径

all_words = []
ipath = '../lyrics/'
lyrics = ''

stopwords = [line.strip().decode('gbk') for line in open('stop_words.txt').readlines()]
for filename in os.listdir(ipath):
    # print (filename)
    with open(ipath + filename, 'r') as f:
        lyrics += f.read().decode('utf-8')
# print (lyrics)
result = jieba.analyse.textrank(lyrics, topK=1000, withWeight=True)  # 基于TextRank算法的关键词提取前1000个
# print result[0][0]
keywords = dict()
for i in result:
Example #52
0
from setuptools import setup
from os import path

this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
    long_description = f.read()

setup(
    name='wordcloud_fa',
    packages=['wordcloud_fa'],
    version='0.1.8',
    license='MIT',
    description='A wrapper for wordcloud module for creating persian (and other rtl languages) word cloud.',
    long_description=long_description,
    long_description_content_type='text/markdown',
    author='Mohammadreza Alihoseiny',
    author_email='*****@*****.**',
    url='https://github.com/alihoseiny/word_cloud_fa',
    download_url='https://github.com/alihoseiny/word_cloud_fa/archive/V0.1.8.tar.gz',
    keywords=['wordcloud', 'word cloud', 'Farsi', 'persian', 'Iran', 'nlp', 'National Language Processing',
              'text processing', 'data visualization'],
    install_requires=[
        'numpy>=1.18.1',
        'pillow>=7.0.0',
        'matplotlib>=3.1.2',
        'arabic_reshaper',
        'python-bidi>=0.4.2',
        'wordcloud==1.7.0',
        'hazm'
    ],
    package_data={'wordcloud_fa': ['stopwords', 'Fonts/font.ttf']},
Example #53
0
    TextLogErrorMetadata,
    User,
)
from treeherder.perf.models import (
    IssueTracker,
    PerformanceAlert,
    PerformanceAlertSummary,
    PerformanceDatum,
    PerformanceFramework,
    PerformanceSignature,
    PerformanceTag,
)
from treeherder.services.pulse.exchange import get_exchange

IS_WINDOWS = "windows" in platform.system().lower()
SAMPLE_DATA_PATH = join(dirname(__file__), 'sample_data')


def pytest_addoption(parser):
    parser.addoption(
        "--runslow", action="store_true", help="run slow tests",
    )


def pytest_runtest_setup(item):
    """
    Per-test setup.
    - Add an option to run those tests marked as 'slow'
    - Clear the django cache between runs
    """
Example #54
0
if sys.version_info[0] is 3:
    test_requires.append("bump2version>=1.0,<2.0")

if is_coap_supported():
    install_requires.append('aiocoap[linkheader]==0.4a1')

if is_mqtt_supported():
    install_requires.append('hbmqtt>=0.9.4,<1.0')
    install_requires.append('websockets>=8.0,<9.0')

if is_dnssd_supported():
    install_requires.append('zeroconf>=0.21.3,<0.22.0')
    test_requires.append('aiozeroconf==0.1.8')

this_dir = path.abspath(path.dirname(__file__))

with open(path.join(this_dir, 'README.md')) as fh:
    long_description = fh.read()

setup(
    name='wotpy',
    version=__version__,
    description='Python implementation of a W3C WoT Runtime and the WoT Scripting API',
    long_description=long_description,
    long_description_content_type='text/markdown',
    keywords='wot iot gateway fog w3c',
    author='Andres Garcia Mangas',
    author_email='*****@*****.**',
    url='https://github.com/agmangas/wot-py',
    classifiers=[
Example #55
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)

    cfg.merge_from_dict(args.cfg_options)

    # Load output_config from cfg
    output_config = cfg.get('output_config', {})
    if args.out:
        # Overwrite output_config from args.out
        output_config = Config._merge_a_into_b(
            dict(out=args.out), output_config)

    # Load eval_config from cfg
    eval_config = cfg.get('eval_config', {})
    if args.eval:
        # Overwrite eval_config from args.eval
        eval_config = Config._merge_a_into_b(
            dict(metrics=args.eval), eval_config)
    if args.eval_options:
        # Add options from args.eval_options
        eval_config = Config._merge_a_into_b(args.eval_options, eval_config)

    assert output_config or eval_config, \
        ('Please specify at least one operation (save or eval the '
         'results) with the argument "--out" or "--eval"')

    dataset_type = cfg.data.test.type
    if output_config.get('out', None):
        out = output_config['out']
        # make sure the dirname of the output path exists
        mmcv.mkdir_or_exist(osp.dirname(out))
        _, suffix = osp.splitext(out)
        if dataset_type == 'AVADataset':
            assert suffix[1:] == 'csv', ('For AVADataset, the format of the '
                                         'output file should be csv')
        else:
            assert suffix[1:] in file_handlers, (
                'The format of the output '
                'file should be json, pickle or yaml')

    # set cudnn benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.model.get('test_cfg') is None and cfg.get('test_cfg') is None:
        cfg.model.setdefault('test_cfg',
                             dict(average_clips=args.average_clips))
    else:
        # You can set average_clips during testing, it will override the
        # original settting
        if args.average_clips is not None:
            if cfg.model.get('test_cfg') is not None:
                cfg.model.test_cfg.average_clips = args.average_clips
            else:
                cfg.test_cfg.average_clips = args.average_clips

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # The flag is used to register module's hooks
    cfg.setdefault('module_hooks', [])

    # build the dataloader
    dataset = build_dataset(cfg.data.test, dict(test_mode=True))
    dataloader_setting = dict(
        videos_per_gpu=cfg.data.get('videos_per_gpu', 1),
        workers_per_gpu=cfg.data.get('workers_per_gpu', 1),
        dist=distributed,
        shuffle=False)
    dataloader_setting = dict(dataloader_setting,
                              **cfg.data.get('test_dataloader', {}))
    data_loader = build_dataloader(dataset, **dataloader_setting)

    # build the model and load checkpoint
    model = build_model(
        cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))

    register_module_hooks(model.backbone, cfg.module_hooks)

    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is not None:
        wrap_fp16_model(model)
    load_checkpoint(model, args.checkpoint, map_location='cpu')

    if args.fuse_conv_bn:
        model = fuse_conv_bn(model)

    if not distributed:
        model = MMDataParallel(model, device_ids=[0])
        outputs = single_gpu_test(model, data_loader)
    else:
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False)
        outputs = multi_gpu_test(model, data_loader, args.tmpdir,
                                 args.gpu_collect)

    rank, _ = get_dist_info()
    if rank == 0:
        if output_config.get('out', None):
            out = output_config['out']
            print(f'\nwriting results to {out}')
            dataset.dump_results(outputs, **output_config)
        if eval_config:
            eval_res = dataset.evaluate(outputs, **eval_config)
            for name, val in eval_res.items():
                print(f'{name}: {val:.04f}')
            print(e.message)
            self.assertEqual(0, 1, 'Cannot add input data to datastream')

    def tearDown(self):  # teardown
        for ds in self.created_datastreams:
            try:
                self.fclient.delete_datastream(ds)
            except Exception as e:
                print(e.message)

    pass


if __name__ == '__main__':
    if __package__ is None:
        import sys
        from os import path
        sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
        from falkonryclient import schemas as Schemas
        from falkonryclient import client as FClient
    else:
        from ..falkonryclient import schemas as Schemas
        from ..falkonryclient import client as FClient
    unittest.main(testRunner=xmlrunner.XMLTestRunner(output='out'),
                  failfast=False,
                  buffer=False,
                  catchbreak=False)
else:
    from falkonryclient import schemas as Schemas
    from falkonryclient import client as FClient
Example #57
0
    # '-DNDEBUG',
    "-std=c++11",
    "-xc++",
    "-I",
    "/usr/include/eigen3",
    "-I",
    "/usr/local/include",
    "-I",
    "/usr/local/cuda/include",
]
if "ROS_DISTRO" in os.environ:
    ros_base = "/opt/ros/%(ROS_DISTRO)s" % os.environ
    BASE_FLAGS += ["-I", ros_base + "/include"]

SOURCE_EXTENSIONS = [
    ".cpp",
    ".cxx",
    ".cc",
    ".c",
]

HEADER_EXTENSIONS = [".h", ".hxx", ".hpp", ".hh"]

DIR_OF_THIS_SCRIPT = p.abspath(p.dirname(__file__))


def Settings(**kwargs):
    return {
        "flags": BASE_FLAGS,
    }
 def swig_import_helper():
     from os.path import dirname
     import imp
     fp = None
     try:
         fp, pathname, description = imp.find_module('_itkHistogramToEntropyImageFilterPython', [dirname(__file__)])
     except ImportError:
         import _itkHistogramToEntropyImageFilterPython
         return _itkHistogramToEntropyImageFilterPython
     if fp is not None:
         try:
             _mod = imp.load_module('_itkHistogramToEntropyImageFilterPython', fp, pathname, description)
         finally:
             fp.close()
         return _mod
#!/usr/bin/env python3

import os
import os.path
import subprocess
import signal
from os.path import dirname,abspath
import sys

kind=sys.argv[1];
pwd=os.getcwd()
nwd = dirname(dirname(dirname(abspath(__file__))))
print(nwd)
print(nwd)
os.chdir(nwd)

##babble
#####################################################################
# Run diff and after it run cost_function
subprocess.run(["./tmp/"+kind+"/Remove_punctuation $(pwd)/tmp/"+kind+"/clean_signal $(pwd)/"+kind+"/my_output/clean_signal "+kind],stdout=subprocess.PIPE, shell=True)
######################################################################
subprocess.run(["./tmp/"+kind+"/Remove_punctuation $(pwd)/tmp/"+kind+"/processed_noised_signal $(pwd)/"+kind+"/my_output/processed_noised_signal "+kind],stdout=subprocess.PIPE, shell=True)
######################################################################
subprocess.run(["./tmp/"+kind+"/Remove_punctuation $(pwd)/tmp/"+kind+"/noised_signal $(pwd)/"+kind+"/my_output/noised_signal "+kind],stdout=subprocess.PIPE, shell=True)

os.chdir(pwd)

Example #60
0
import os
import sys

from os.path import join, dirname
from dotenv import load_dotenv
from optparse import IndentedHelpFormatter, OptionGroup, OptionParser

dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)

try:
    import maka.inquirer as inquirer
except ImportError:
    import inspect
    CURRENT_DIR = os.path.dirname(
        os.path.abspath(inspect.getfile(inspect.currentframe())))
    PARENT_DIR = os.path.dirname(CURRENT_DIR)
    os.sys.path.insert(0, PARENT_DIR)
    import inquirer


def main():
    """
    The method called when running this script
    """
    usage = """similarity.py --s1 "this is a test" --s2 "that was a test"
A command-line tool to test similarity to Microsoft's Academic Knowledge."""

    fmt = IndentedHelpFormatter(max_help_position=50, width=100)
    parser = OptionParser(usage=usage, formatter=fmt)
    group = OptionGroup(