Example #1
0
def get_path_id(cur, path, workdir=None):
    path = abspath(path)
    if workdir:
        workdir = abspath(workdir)
        path    = relpath(path, workdir)
    cur.execute('SELECT Id from PATHs where Path="%s"' % path)
    return cur.fetchall()[0][0]
        def relpath(path, start=os.path.curdir):
            """Return a relative version of a path"""
            from os.path import sep, curdir, join, abspath, commonprefix, \
                 pardir, splitunc

            if not path:
                raise ValueError("no path specified")
            start_list = abspath(start).split(sep)
            path_list = abspath(path).split(sep)
            if start_list[0].lower() != path_list[0].lower():
                unc_path, rest = splitunc(path)
                unc_start, rest = splitunc(start)
                if bool(unc_path) ^ bool(unc_start):
                    raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
                                                                        % (path, start))
                else:
                    raise ValueError("path is on drive %s, start on drive %s"
                                                        % (path_list[0], start_list[0]))
            # Work out how much of the filepath is shared by start and path.
            for i in range(min(len(start_list), len(path_list))):
                if start_list[i].lower() != path_list[i].lower():
                    break
            else:
                i += 1

            rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
            if not rel_list:
                return curdir
            return join(*rel_list)
    def test_get_set_sample_network(self):
        ndex = nc.Ndex(host= tt.TESTSERVER, username=tt.testUser1, password=tt.testUserpasswd, debug=True)

        with open(path.join(path.abspath(path.dirname(__file__)),example_network_1), 'r') as file_handler:
            network_in_cx = file_handler.read()

        # test save_cx_stream_as_new_network
        test_network_1_uri = ndex.save_cx_stream_as_new_network(network_in_cx)
        self.assertTrue(test_network_1_uri.startswith(tt.TESTSERVER + ndex_network_resource))

        network_UUID = str(test_network_1_uri.split("/")[-1])

        time.sleep(20)

        with open(path.join(path.abspath(path.dirname(__file__)),sample_network), 'r') as file_handler:
            sample_cx = file_handler.read()
        ndex.set_network_sample(network_UUID, sample_cx)

        time.sleep(3)
        # get network summary with the new properties
        sample_from_server = ndex.get_sample_network(network_UUID)
        putJson = json.loads(sample_cx)
        self.assertTrue(len(putJson) == len(sample_from_server))


        # test delete_network
        del_network_return = ndex.delete_network(network_UUID)
        self.assertTrue(del_network_return == '')
def check_mask_coverage(epi,brainmask):
    from os.path import abspath
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting
    from nipype.interfaces.nipy.preprocess import Trim

    trim = Trim()
    trim.inputs.in_file = epi
    trim.inputs.end_index = 1
    trim.inputs.out_file = 'epi_vol1.nii.gz'
    trim.run()
    epi_vol = abspath('epi_vol1.nii.gz')

    maskcheck_filename='maskcheck.png'
    display = plotting.plot_anat(epi_vol, display_mode='ortho',
                                 draw_cross=False,
                                 title = 'brainmask coverage')
    display.add_contours(brainmask,levels=[.5], colors='r')
    display.savefig(maskcheck_filename)
    display.close()
    maskcheck_file = abspath(maskcheck_filename)

    return(maskcheck_file)
Example #5
0
def results_table(path_dict):
    """ Return precalculated results images for subject info in `path_dict`

    Parameters
    ----------
    path_dict : dict
        containing key 'rootdir'

    Returns
    -------
    rtab : dict
        dict with keys given by run directories for this subject, values being a
        list with filenames of effect and sd images.
    """
    # Which runs correspond to this design type?
    rootdir = path_dict['rootdir']
    runs = filter(lambda f: isdir(pjoin(rootdir, f)),
                  ['results_%02d' % i for i in range(1,5)] )

    # Find out which contrasts have t-statistics,
    # storing the filenames for reading below

    results = {}

    for rundir in runs:
        rundir = pjoin(rootdir, rundir)
        for condir in listdir(rundir):
            for stat in ['sd', 'effect']:
                fname_effect = abspath(pjoin(rundir, condir, 'effect.nii'))
                fname_sd = abspath(pjoin(rundir, condir, 'sd.nii'))
            if exists(fname_effect) and exists(fname_sd):
                results.setdefault(condir, []).append([fname_effect,
                                                       fname_sd])
    return results
 def suite_for_path(self, path_arg):
     if os.path.exists(path.abspath(path_arg)):
         abs_path = path.abspath(path_arg)
         for prefix, suite in TEST_SUITES_BY_PREFIX.iteritems():
             if abs_path.startswith(prefix):
                 return suite
     return None
Example #7
0
def find_pylintrc():
    """search the pylint rc file and return its path if it find it, else None
    """
    # is there a pylint rc file in the current directory ?
    if exists("pylintrc"):
        return abspath("pylintrc")
    if isfile("__init__.py"):
        curdir = abspath(os.getcwd())
        while isfile(join(curdir, "__init__.py")):
            curdir = abspath(join(curdir, ".."))
            if isfile(join(curdir, "pylintrc")):
                return join(curdir, "pylintrc")
    if "PYLINTRC" in os.environ and exists(os.environ["PYLINTRC"]):
        pylintrc = os.environ["PYLINTRC"]
    else:
        user_home = expanduser("~")
        if user_home == "~" or user_home == "/root":
            pylintrc = ".pylintrc"
        else:
            pylintrc = join(user_home, ".pylintrc")
            if not isfile(pylintrc):
                pylintrc = join(user_home, ".config", "pylintrc")
    if not isfile(pylintrc):
        if isfile("/etc/pylintrc"):
            pylintrc = "/etc/pylintrc"
        else:
            pylintrc = None
    return pylintrc
Example #8
0
 def _is_under_root(self, full_path):
     """Guard against arbitrary file retrieval."""
     if (path.abspath(full_path) + path.sep)\
             .startswith(path.abspath(self.root) + path.sep):
         return True
     else:
         return False
Example #9
0
def check_filepath(filepath):
	if not filepath:
		return None
	
	base_filename = basename(filepath)
	abs_dirname = dirname(abspath(filepath)) if base_filename else abspath(filepath)
	return join(abs_dirname, base_filename)
Example #10
0
def build_chm_doc(libname):
    """Return CHM documentation file (on Windows only), which is copied under 
    {PythonInstallDir}\Doc, hence allowing Spyder to add an entry for opening 
    package documentation in "Help" menu. This has no effect on a source 
    distribution."""
    args = ''.join(sys.argv)
    if os.name == 'nt' and ('bdist' in args or 'build' in args):
        try:
            import sphinx  # analysis:ignore
        except ImportError:
            print('Warning: `sphinx` is required to build documentation',
                  file=sys.stderr)
            return
        hhc_base = r'C:\Program Files%s\HTML Help Workshop\hhc.exe'
        for hhc_exe in (hhc_base % '', hhc_base % ' (x86)'):
            if osp.isfile(hhc_exe):
                break
        else:
            print('Warning: `HTML Help Workshop` is required to build CHM '\
                  'documentation file', file=sys.stderr)
            return
        doctmp_dir = 'doctmp'
        subprocess.call('sphinx-build -b htmlhelp doc %s' % doctmp_dir,
                        shell=True)
        atexit.register(shutil.rmtree, osp.abspath(doctmp_dir))
        fname = osp.abspath(osp.join(doctmp_dir, '%s.chm' % libname))
        subprocess.call('"%s" %s' % (hhc_exe, fname), shell=True)
        if osp.isfile(fname):
            return fname
        else:
            print('Warning: CHM building process failed', file=sys.stderr)
Example #11
0
def inside(dir, name):
    """
    检查给定的目录中是否有给定的文件名。
    """
    dir = abspath(dir)
    name = abspath(name)
    return name.startswith(join(dir, ''))
Example #12
0
def fixate():
    "puts activation code to usercustomize.py for user"
    print_message('Fixate')
    import site
    userdir = site.getusersitepackages()
    if not userdir:
        raise PundleException('Can`t fixate due user have not site package directory')
    try:
        makedirs(userdir)
    except OSError:
        pass
    template = FIXATE_TEMPLATE.replace('op.dirname(__file__)', "'%s'" % op.abspath(op.dirname(__file__)))
    usercustomize_file = op.join(userdir, 'usercustomize.py')
    print_message('Will edit %s file' % usercustomize_file)
    if op.exists(usercustomize_file):
        content = open(usercustomize_file).read()
        if '# pundle user customization start' in content:
            regex = re.compile(r'\n# pundle user customization start.*# pundle user customization end\n', re.DOTALL)
            content, res = regex.subn(template, content)
            open(usercustomize_file, 'w').write(content)
        else:
            open(usercustomize_file, 'a').write(content)
    else:
        open(usercustomize_file, 'w').write(template)
    link_file = op.join(userdir, 'pundle.py')
    if op.lexists(link_file):
        print_message('Remove exist link to pundle')
        os.unlink(link_file)
    print_message('Create link to pundle %s' % link_file)
    os.symlink(op.abspath(__file__), link_file)
    print_message('Complete')
Example #13
0
def getIcons(filename=None):
    """Creates wxBitmaps ``self.icon`` and ``self.iconAdd`` based on the the image.
    The latter has a plus sign added over the top.

    png files work best, but anything that wx.Image can import should be fine
    """
    icons = {}
    if filename is None:
        filename = join(dirname(abspath(__file__)), 'base.png')
        
    # get the low-res version first
    im = Image.open(filename)
    icons['24'] = pilToBitmap(im, scaleFactor=0.5)
    icons['24add'] = pilToBitmap(im, scaleFactor=0.5)
    # try to find a 128x128 version
    filename128 = filename[:-4]+'128.png'
    if False: # TURN OFF FOR NOW os.path.isfile(filename128):
        im = Image.open(filename128)
    else:
        im = Image.open(filename)
    icons['48'] = pilToBitmap(im)
    # add the plus sign
    add = Image.open(join(dirname(abspath(__file__)), 'add.png'))
    im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)
    # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],
    #               im.size[0], im.size[1]], mask=add)
    icons['48add'] = pilToBitmap(im)

    return icons
Example #14
0
    def __init__(self, pdf_page, svg_filename, png_filename):
        self._svg_filename = abspath(svg_filename)
        self._png_filename = abspath(png_filename) if png_filename else None
        self._context, self._surface = self._get_context(
            svg_filename, *pdf_page.get_size())

        pdf_page.render(self._context)
Example #15
0
    def check(self):
        """ Checks parameters and paths
        """

        if 'TITLE' not in PAR:
            setattr(PAR, 'TITLE', unix.basename(abspath('..')))

        if 'SUBTITLE' not in PAR:
            setattr(PAR, 'SUBTITLE', unix.basename(abspath('.')))

        # check parameters
        if 'NTASK' not in PAR:
            setattr(PAR, 'NTASK', 1)

        if 'NPROC' not in PAR:
            setattr(PAR, 'NPROC', 1)

        if 'VERBOSE' not in PAR:
            setattr(PAR, 'VERBOSE', 1)

        # check paths
        if 'GLOBAL' not in PATH:
            setattr(PATH, 'GLOBAL', join(abspath('.'), 'scratch'))

        if 'LOCAL' not in PATH:
            setattr(PATH, 'LOCAL', '')

        if 'SUBMIT' not in PATH:
            setattr(PATH, 'SUBMIT', unix.pwd())

        if 'OUTPUT' not in PATH:
            setattr(PATH, 'OUTPUT', join(PATH.SUBMIT, 'output'))

        if 'SYSTEM' not in PATH:
            setattr(PATH, 'SYSTEM', join(PATH.GLOBAL, 'system'))
Example #16
0
    def test_profile_with_no_profiles_in_files(self):
        conf_file = path.join(path.dirname(path.abspath(__file__)), 'resources', 'conf', 'conf.ini')
        conf_dist_file = path.join(path.dirname(path.abspath(__file__)), 'resources', 'conf', 'conf_dist.ini')
        profiles_dir = path.join(path.dirname(path.abspath(__file__)), 'resources', 'conf', 'profiles')
        conf = Conf(conf_file, conf_dist_file, profiles_dir)

        self.assertEqual(len(conf.get_profiles()), 1)
Example #17
0
def syncfiles(args):
    if len(args) < 3:
        printtofile ('args not enough, need: files src_host dest_hosts')
        return
    dest_hosts = []
    src_host = ''
    files = ast.literal_eval(args[2])
    if len(args) >= 4:
        src_host = args[3]
    if len(args) >= 5:
        dest_hosts = ast.literal_eval(args[4])
    if len(dest_hosts) == 0:
        if len(src_host) == 0:
            dest_hosts = primary_host + replicas_host
        else:
            dest_hosts = replicas_host

    for file in files:
        fullfilepath = ''
        if (src_host == ''):
            fullfilepath = base_dir + '/' + path.abspath(file).replace(path.abspath(local_base), '')
            printtofile ('local to remote : ' + fullfilepath)
        else:
            fullfilepath = base_dir + '/' + file
        for dest_host in dest_hosts:
            dest_host_path = loginuser + '@' + dest_host + ':' + fullfilepath
            if src_host == '':
                # using local file
                os.system(scp_local + file + ' ' + dest_host_path)
            else:
                os.system(scp_remote + src_host + ':' + fullfilepath + ' ' + dest_host_path)
    printtofile ('finished.')
    def test_discover_with_init_modules_that_fail_to_import(self):
        vfs = {abspath('/foo'): ['my_package'],
               abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
        self.setup_import_issue_package_tests(vfs)
        import_calls = []
        def _get_module_from_name(name):
            import_calls.append(name)
            raise ImportError("Cannot import Name")
        loader = unittest.TestLoader()
        loader._get_module_from_name = _get_module_from_name
        suite = loader.discover(abspath('/foo'))

        self.assertIn(abspath('/foo'), sys.path)
        self.assertEqual(suite.countTestCases(), 1)
        # Errors loading the suite are also captured for introspection.
        self.assertNotEqual([], loader.errors)
        self.assertEqual(1, len(loader.errors))
        error = loader.errors[0]
        self.assertTrue(
            'Failed to import test module: my_package' in error,
            'missing error string in %r' % error)
        test = list(list(suite)[0])[0] # extract test from suite
        with self.assertRaises(ImportError):
            test.my_package()
        self.assertEqual(import_calls, ['my_package'])

        # Check picklability
        for proto in range(pickle.HIGHEST_PROTOCOL + 1):
            pickle.loads(pickle.dumps(test, proto))
Example #19
0
	def fetch_unpackaged(self, build, to_dir, target):
		'''Retrieves the unpackaged artefacts for a particular build.
		
		:param build: the build to fetch
		:param to_dir: directory that will hold all the unpackged build trees
		'''
		LOG.info('Fetching Forge templates for %s into "%s"' % (build["id"], to_dir))
		self._authenticate()
		
		filenames = []
		if not path.isdir(to_dir):
			LOG.debug('Creating output directory "%s"' % to_dir)
			os.mkdir(to_dir)
			
		with lib.cd(to_dir):
			location = build['file_output']

			filename = urlsplit(location).path.split('/')[-1]
			
			LOG.debug('writing %s to %s' % (location, path.abspath(filename)))
			self._get_file(
				location,
				write_to_path=filename,
				progress_bar_title=target,
			)
			
			self._handle_unpackaged(target, filename)
			
			filenames.append(path.abspath(target))
		
		LOG.debug('Fetched build into "%s"' % '", "'.join(filenames))
		return filenames
Example #20
0
    def run(self):
        sFiles=self.parser.getArgs()[0:-1]
        dFile=path.abspath(self.parser.getArgs()[-1])

        for s in sFiles:
            sName=path.abspath(s)
            dName=dFile
            
            if len(s)>1:
                print f.name+"Source file",sName,f.reset
            try:
                source=ParsedParameterFile(sName,
                                           backup=False,
                                           debug=self.opts.debugParser,
                                           listLengthUnparsed=self.opts.longlist,
                                           noBody=self.opts.noBody,
                                           noHeader=self.opts.noHeader,
                                           boundaryDict=self.opts.boundaryDict,
                                           listDict=self.opts.listDict,
                                           listDictWithHeader=self.opts.listDictWithHeader)
            except IOError,e:
                self.warning("Problem with file",sName,":",e)
                continue
            except PyFoamParserError,e:
                self.warning("Parser problem with",sName,":",e)
                continue
Example #21
0
File: misc.py Project: certik/conda
def launch(fn, prefix=config.root_dir, additional_args=None):
    info = install.is_linked(prefix, fn[:-8])
    if info is None:
        return None

    if not info.get('type') == 'app':
        raise Exception('Not an application: %s' % fn)

    # prepend the bin directory to the path
    fmt = r'%s\Scripts;%s' if sys.platform == 'win32' else '%s/bin:%s'
    env = {'PATH': fmt % (abspath(prefix), os.getenv('PATH'))}
    # copy existing environment variables, but not anything with PATH in it
    for k, v in iteritems(os.environ):
        if 'PATH' not in k:
            env[k] = v
    # allow updating environment variables from metadata
    if 'app_env' in info:
        env.update(info['app_env'])

    # call the entry command
    args = info['app_entry'].split()
    args = [a.replace('${PREFIX}', prefix) for a in args]
    arg0 = find_executable(args[0], env['PATH'])
    if arg0 is None:
        raise Exception('Executable not found: %s' % args[0])
    args[0] = arg0

    cwd = abspath(expanduser('~'))
    if additional_args:
        args.extend(additional_args)
    return subprocess.Popen(args, cwd=cwd , env=env)
 def test_get_shared_files(self):
     document = CacheDocument()
     document.add_repository(TEST_DIR)
     document.expand_dir(abspath("data"))
     document.expand_dir(abspath(os.path.join("data", "subdir1")))
     document.share_files(abspath("data"),
                           [os.sep.join([TEST_DIR, "data", ".path"]),
                            os.sep.join([TEST_DIR, "data", ".svn"]),
                            os.sep.join([TEST_DIR, "data", "date.txt"]),
                            os.sep.join([TEST_DIR, "data", "emptydir"]),
                            os.sep.join([TEST_DIR, "data", "profiles"]),
                            os.sep.join([TEST_DIR, "data", "subdir1", ".svn"]),
                            os.sep.join([TEST_DIR, "data", "subdir1", "subsubdir"])],
                           False)
     document.share_files(abspath("data"),
                           [os.sep.join([TEST_DIR, "data"]),
                            os.sep.join([TEST_DIR, "data", ".path"]),
                            os.sep.join([TEST_DIR, "data", "date.txt"]),
                            os.sep.join([TEST_DIR, "data", "routage"]),
                            os.sep.join([TEST_DIR, "data", "subdir1"]),
                            os.sep.join([TEST_DIR, "data", "subdir1", "TOtO.txt"]),
                            os.sep.join([TEST_DIR, "data", "subdir1", "date.doc"])],
                           True)
     shared_files = [file_container.get_path() for file_container
                     in document.get_shared_files()[TEST_DIR]]
     shared_files.sort()
     self.assertEquals(shared_files, [os.sep.join([TEST_DIR, "data", ".path"]),
                                      os.sep.join([TEST_DIR, "data", "02_b_1280x1024.jpg"]),
                                      os.sep.join([TEST_DIR, "data", "Python-2.3.5.zip"]),
                                      os.sep.join([TEST_DIR, "data", "arc en ciel 6.gif"]),
                                      os.sep.join([TEST_DIR, "data", "date.txt"]),
                                      os.sep.join([TEST_DIR, "data", "pywin32-203.win32-py2.3.exe"]),
                                      os.sep.join([TEST_DIR, "data", "routage"]),
                                      os.sep.join([TEST_DIR, "data", "subdir1", "TOtO.txt"]),
                                      os.sep.join([TEST_DIR, "data", "subdir1", "date.doc"])])
Example #23
0
def copy_strip():
    """Copy idle.html to idlelib/help.html, stripping trailing whitespace.

    Files with trailing whitespace cannot be pushed to the hg cpython
    repository.  For 3.x (on Windows), help.html is generated, after
    editing idle.rst in the earliest maintenance version, with
      sphinx-build -bhtml . build/html
      python_d.exe -c "from idlelib.help import copy_strip; copy_strip()"
    After refreshing TortoiseHG workshop to generate a diff,
    check  both the diff and displayed text.  Push the diff along with
    the idle.rst change and merge both into default (or an intermediate
    maintenance version).

    When the 'earlist' version gets its final maintenance release,
    do an update as described above, without editing idle.rst, to
    rebase help.html on the next version of idle.rst.  Do not worry
    about version changes as version is not displayed.  Examine other
    changes and the result of Help -> IDLE Help.

    If maintenance and default versions of idle.rst diverge, and
    merging does not go smoothly, then consider generating
    separate help.html files from separate idle.htmls.
    """
    src = join(abspath(dirname(dirname(dirname(__file__)))),
               'Doc', 'build', 'html', 'library', 'idle.html')
    dst = join(abspath(dirname(__file__)), 'help.html')
    with open(src, 'rb') as inn,\
         open(dst, 'wb') as out:
        for line in inn:
            out.write(line.rstrip() + b'\n')
    print('idle.html copied to help.html')
Example #24
0
def test_get_home_dir_7():
    """Using HOMESHARE, os=='nt'."""

    os.name = 'nt'
    env["HOMESHARE"] = abspath(HOME_TEST_DIR)
    home_dir = path.get_home_dir()
    nt.assert_equal(home_dir, abspath(HOME_TEST_DIR))
Example #25
0
    def iter_files(self):
        """"""
        seen_paths_ = []
        files = self.filepaths
        dirs = self.dirpaths
        exclude_dirs = self.ignored_dirs

        for filepath in files:
            pth = path.realpath(path.abspath(filepath))
            if pth not in seen_paths_:
                seen_paths_.append(pth)
                yield pth

        for dirpath in dirs:
            dirpath = path.abspath(dirpath)
            for dirpath, dirnames, filenames in walk(dirpath):
                ## remove excluded dirs
                for dir in [dir for dir in exclude_dirs if dir in dirnames]:
                    self.log.debug('Ignoring dir: {0}'.format(dir))
                    dirnames.remove(dir)

                for filepath in filenames:
                    pth = path.join(dirpath, filepath)
                    pth = path.realpath(path.abspath(pth))
                    if pth not in seen_paths_:
                        seen_paths_.append(pth)
                        yield pth
Example #26
0
def main():
  """The main function."""
  options = ParseOptions()
  required_flags = ["version_file", "output", "input", "build_dir",
                    "gen_out_dir", "auto_updater_dir", "build_type"]
  for flag in required_flags:
    if getattr(options, flag) is None:
      logging.error("--%s is not specified." % flag)
      exit(-1)

  version = mozc_version.MozcVersion(options.version_file)

  # \xC2\xA9 is the copyright mark in UTF-8
  copyright_message = '\xC2\xA9 %d Google Inc.' % _COPYRIGHT_YEAR
  long_version = version.GetVersionString()
  short_version = version.GetVersionInFormat('@MAJOR@.@MINOR@.@BUILD@')
  variables = {
      'MOZC_VERSIONINFO_MAJOR': version.GetVersionInFormat('@MAJOR@'),
      'MOZC_VERSIONINFO_MINOR': version.GetVersionInFormat('@MINOR@'),
      'MOZC_VERSIONINFO_LONG': long_version,
      'MOZC_VERSIONINFO_SHORT': short_version,
      'MOZC_VERSIONINFO_FINDER':
        'Google Japanese Input %s, %s' % (long_version, copyright_message),
      'GEN_OUT_DIR': path.abspath(options.gen_out_dir),
      'BUILD_DIR': path.abspath(options.build_dir),
      'AUTO_UPDATER_DIR': path.abspath(options.auto_updater_dir),
      'MOZC_DIR': path.abspath(path.join(os.getcwd(), ".."))
      }

  open(options.output, 'w').write(
      _RemoveDevOnlyLines(
          _ReplaceVariables(open(options.input).read(), variables),
          options.build_type))
def convert_disk_image(args):
    """Convert disk to another format."""
    filename = op.abspath(args.file.name)
    output = op.abspath(args.output)

    os.environ['LIBGUESTFS_CACHEDIR'] = os.getcwd()
    if args.verbose:
        os.environ['LIBGUESTFS_DEBUG'] = '1'

    if args.zerofree:
        guestfish_zerofree(filename)

    for fmt in args.formats:
        if fmt in (tar_formats + disk_formats):
            output_filename = "%s.%s" % (output, fmt)
            if output_filename == filename:
                continue
            logger.info("Creating %s" % output_filename)
            try:
                if fmt in tar_formats:
                    tar_convert(filename, output_filename,
                                args.tar_excludes,
                                args.tar_compression_level)
                else:
                    qemu_convert(filename, fmt, output_filename)
            except ValueError as exp:
                logger.error("Error: %s" % exp)
 def test_reset_files(self):
     """reset files"""
     for document in self.documents:
         document.share_file(abspath("data"), True)
         self.assertEquals(document.get_container(abspath("data"))._shared, True)
         document.reset_files()
         self.assertEquals(document.get_files(), {})
Example #29
0
def rev_get_dataset_root(path):
    """Return the root of an existent dataset containing a given path

    The root path is returned in the same absolute or relative form
    as the input argument. If no associated dataset exists, or the
    input path doesn't exist, None is returned.

    If `path` is a symlink or something other than a directory, its
    the root dataset containing its parent directory will be reported.
    If none can be found, at a symlink at `path` is pointing to a
    dataset, `path` itself will be reported as the root.
    """
    suffix = '.git'
    altered = None
    if op.islink(path) or not op.isdir(path):
        altered = path
        path = op.dirname(path)
    apath = op.abspath(path)
    # while we can still go up
    while op.split(apath)[1]:
        if op.exists(op.join(path, suffix)):
            return path
        # new test path in the format we got it
        path = op.normpath(op.join(path, os.pardir))
        # no luck, next round
        apath = op.abspath(path)
    # if we applied dirname() at the top, we give it another go with
    # the actual path, if it was itself a symlink, it could be the
    # top-level dataset itself
    if altered and op.exists(op.join(altered, suffix)):
        return altered

    return None
 def _assertContent(self, doc):
     """check validity of content"""
     self.assertEquals(u"atao", doc.get_pseudo())
     self.assertEquals(u"Mr", doc.get_title())
     self.assertEquals(u"manu", doc.get_firstname())
     self.assertEquals(u"breton", doc.get_lastname())
     self.assertEquals(QUESTION_MARK(), doc.get_photo())
     self.assertEquals(u"*****@*****.**", doc.get_email())
     self.assertEquals({'City': u'', 'color': u'blue', 'Country': u'',
                        'Favourite Book': u'', 'homepage': u'manu.com',
                        'Favourite Movie': u'', 'Studies': u''},
                       doc.get_custom_attributes())
     # assert correct file structure
     files = doc.get_files()
     self.assertEquals(files.has_key(TEST_DIR), True)
     self.assertEquals(files[TEST_DIR][os.sep.join([abspath("data"), "date.txt"])]._tag, "tagos")
     self.assertEquals(files[TEST_DIR][abspath("data")]._shared, False)
     self.assertEquals(files[TEST_DIR][os.sep.join([abspath("data"), "routage"])]._shared, True)
     self.assertEquals(files[TEST_DIR][os.sep.join([abspath("data"), "emptydir"])]._shared, True)
     self.assertEquals(files[TEST_DIR][os.sep.join([abspath("data"), "subdir1"])]._shared, True)
     self.assertEquals(files[TEST_DIR][os.sep.join([abspath("data"), "subdir1", ""])]._shared, True)
     self.assertEquals(files[TEST_DIR][os.sep.join([abspath("data"), "subdir1", "subsubdir"])]._shared, False)
     self.assertEquals(files[TEST_DIR][os.sep.join([abspath("data"), "subdir1", "subsubdir", "null"])]._shared, True)
     self.assertEquals(files[TEST_DIR][os.sep.join([abspath("data"), "subdir1", "subsubdir", "dummy.txt"])]._shared, True)
     self.assertEquals(files[TEST_DIR].has_key(os.sep.join([abspath("data"), "subdir1", "subsubdir", "default.solipsis"])), False)
     # peers
     peers = doc.get_peers()
     self.assertEquals(peers.has_key(PROFILE_BRUCE), True)
     self.assertEquals(peers[PROFILE_BRUCE].state, PeerDescriptor.FRIEND)
     self.assertEquals(peers[PROFILE_BRUCE].connected, False)
Example #31
0
def make_nsi(info, dir_path):
    "Creates the tmp/main.nsi from the template file"
    name = info['name']
    download_dir = info['_download_dir']
    dists = info['_dists']
    py_name, py_version, unused_build = filename_dist(dists[0]).rsplit('-', 2)
    assert py_name == 'python'
    arch = int(info['_platform'].split('-')[1])

    # these appear as __<key>__ in the template, and get escaped
    replace = {
        'NAME': name,
        'VERSION': info['version'],
        'VIPV': make_VIProductVersion(info['version']),
        'COMPANY': info.get('company', 'Unknown, Inc.'),
        'ARCH': '%d-bit' % arch,
        'PY_VER': py_version[:3],
        'PYVERSION': py_version,
        'PYVERSION_JUSTDIGITS': ''.join(py_version.split('.')),
        'OUTFILE': info['_outpath'],
        'LICENSEFILE': abspath(info.get('license_file',
                               join(NSIS_DIR, 'placeholder_license.txt'))),
        'DEFAULT_PREFIX': info.get(
            'default_prefix',
            join('%LOCALAPPDATA%', 'Continuum', name.lower())
        ),
    }
    for key, fn in [('HEADERIMAGE', 'header.bmp'),
                    ('WELCOMEIMAGE', 'welcome.bmp'),
                    ('ICONFILE', 'icon.ico'),
                    ('INSTALL_PY', '.install.py'),
                    ('URLS_FILE', 'urls'),
                    ('URLS_TXT_FILE', 'urls.txt'),
                    ('POST_INSTALL', 'post_install.bat'),
                    ('CONDA_HISTORY', join('conda-meta', 'history')),
                    ('INDEX_CACHE', 'cache')]:
        replace[key] = join(dir_path, fn)
    for key in replace:
        replace[key] = str_esc(replace[key])

    data = read_nsi_tmpl()
    ppd = ns_platform(info['_platform'])
    ppd['add_to_path_default'] = info.get('add_to_path_default', None)
    ppd['register_python_default'] = info.get('register_python_default', None)
    data = preprocess(data, ppd)
    data = fill_template(data, replace)

    cmds = pkg_commands(download_dir, dists, py_version,
                        bool(info.get('keep_pkgs')),
                        bool(info.get('attempt_hardlinks')))

    # division by 10^3 instead of 2^10 is deliberate here. gives us more room
    approx_pkgs_size_kb = int(
        math.ceil(info.get('_approx_pkgs_size', 0) / 1000))

    # these are unescaped (and unquoted)
    for key, value in [
        ('@NAME@', name),
        ('@NSIS_DIR@', NSIS_DIR),
        ('@BITS@', str(arch)),
        ('@PKG_COMMANDS@', '\n    '.join(cmds)),
        ('@WRITE_CONDARC@', '\n    '.join(add_condarc(info))),
        ('@MENU_PKGS@', ' '.join(info.get('menu_packages', []))),
        ('@SIZE@', str(approx_pkgs_size_kb)),
        ]:
        data = data.replace(key, value)

    nsi_path = join(dir_path, 'main.nsi')
    with open(nsi_path, 'w') as fo:
        fo.write(data)
    # Copy all the NSIS header files (*.nsh)
    for fn in os.listdir(NSIS_DIR):
        if fn.endswith('.nsh'):
            shutil.copy(join(NSIS_DIR, fn),
                        join(dir_path, fn))

    print('Created %s file' % nsi_path)
    return nsi_path
Example #32
0
Enue.points.points >> ibd_first.Enu.Ee
ctheta.points.points >> ibd_first.Enu.ctheta

Enue.points.points >> ibd_first.jacobian.Ee
ibd_first.Enu.Enu >> ibd_first.jacobian.Enu
ctheta.points.points >> ibd_first.jacobian.ctheta

Evis_edges = C.Points(np.linspace(0.0, 12.0, 1201, dtype='d'))

#
# Do some plots
#

if args.output:
    pp = PdfPages(abspath(args.output))
#  Initialize figure
fig, (ax, ax_ratio) = P.subplots(nrows=2, ncols=1 )
ax.minorticks_on()
ax.grid()

ax.set_xlabel(r'$E_\nu$, MeV')
ax.set_ylabel( r'$\sigma$' )
ax.set_title(r"IBD cross section vs $E_\nu$ in zero order")

ax_ratio.set_xlabel(r'$E_\nu$, MeV')
ax_ratio.set_ylabel(r'Ratio')
# Get evis and cross section
enue = __enu
xsec_zero_order = ibd.xsec.xsec.data()
Example #33
0
import os.path as op
from setuptools import setup, find_packages
from oardocker import VERSION

here = op.abspath(op.dirname(__file__))


def read(fname):
    ''' Return the file content. '''
    with open(op.join(here, fname)) as f:
        return f.read()


setup(
    name='oar-docker',
    author='Salem Harrache',
    author_email='*****@*****.**',
    version=VERSION,
    url='https://oar.imag.fr/wiki:oar-docker',
    install_requires=[
        'Click',
        'docker',
        'sh',
        'tabulate',
        'arrow',
    ],
    packages=find_packages(),
    include_package_data=True,
    zip_safe=False,
    description='Manage a small OAR developpement cluster with docker.',
    long_description=read('README.rst') + '\n\n' + read('CHANGES.rst'),
Example #34
0
import importlib
import os.path as op
import pylint
from pylint.lint import Run

try:
    ModuleNotFoundError
except NameError:
    ModuleNotFoundError = ImportError   # pylint: disable=redefined-builtin

try:
    from vainupylinter.print_logger import PrintLogger, redirect_stdout
except ModuleNotFoundError:
    from print_logger import PrintLogger, redirect_stdout

sys.path.append(op.abspath("."))

def parse_args(args):
    """Handle inputs"""
    parser = argparse.ArgumentParser()
    parser.description = 'File specific pylint analysis.'
    parser.add_argument(
        nargs='*',
        type=str,
        dest='fnames',
        help='The files to pylint. Pylinting is performed only to *.py files.'
    )
    parser.add_argument(
        '-r', '--rcfile',
        type=str,
        default='',
from os import sys, path
import asyncio
import aiohttp
from logging import Logger

sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))


class APIError(Exception):
    """Class that encapsules errors returned by the VirusTotal API."""
    @classmethod
    def from_dict(cls, dict_error):
        """errors returned by the VirusTotal API from dict"""
        return cls(dict_error['code'], dict_error.get('message'))

    def __init__(self, code, message):
        self.code = code
        self.message = message
        self.print_error()


    def print_error(self):
        return str(f"{self.code} {self.message}")


class VirusTotalClient:
    """Client for interacting with VirusTotal."""

    def __init__(self, **kwargs):
        self.kwargs = kwargs
        self._session = None
Example #36
0
Private variables for constructing the app entry point are
prefixed with a lodash (e.g. _NAME) and are to be used inside this module.
"""

from os.path import join, dirname, abspath, basename


_HEADER = 'Package Database'
_NAME = basename(dirname(__file__))

_INSTALLED_APPS = (
    'iris.%s' % _NAME,
)

_TEMPLATE_DIRS = (
    abspath(join(dirname(__file__), 'templates')),
)

_URLPATTERNS = {
    'baseurl': r'^app/%s/' % _NAME,
    'patterns': 'iris.%s.urls' % _NAME,
    'apiurl': r'^api/%s/' % _NAME,
    'apipatterns': 'iris.%s.apiurls' % _NAME,
}

_INTRO = """IRIS Package Database.  Contains metadata about packages,
such as maintainers, developers, reviewers, versioning."""

APPINFO = {
    'header': _HEADER,
    'name': _NAME,
Example #37
0
import envparse

from os.path import abspath, dirname, join

from django.utils.log import DEFAULT_LOGGING

envparse.env.read_envfile()

from .apps import *  # noqa

DEBUG = False
ADMINS = MANAGER = ()

BASE_DIR = os.environ.get(
    'DJANGO_BASE_DIR',
    dirname(dirname(dirname(dirname(abspath(__file__))))),
)

ALLOWED_HOSTS = ['*']

CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
        'LOCATION': '127.0.0.1:11211',
        'KEY_PREFIX': 'freenodejobs',
    }
}

TEMPLATES = [{
    'BACKEND': 'django.template.backends.django.DjangoTemplates',
    'DIRS': [
Example #38
0
def _bundle_extensions(objs: Sequence[Model | Document],
                       resources: Resources) -> List[ExtensionEmbed]:
    names: Set[str] = set()
    bundles: List[ExtensionEmbed] = []

    extensions = [".min.js", ".js"] if resources.minified else [".js"]

    for obj in _all_objs(
            objs
    ) if objs is not None else Model.model_class_reverse_map.values():
        if hasattr(obj, "__implementation__"):
            continue
        name = obj.__view_module__.split(".")[0]
        if name == "bokeh":
            continue
        if name in names:
            continue
        names.add(name)
        module = __import__(name)
        this_file = abspath(module.__file__)
        base_dir = dirname(this_file)
        dist_dir = join(base_dir, "dist")

        ext_path = join(base_dir, "bokeh.ext.json")
        if not exists(ext_path):
            continue

        server_prefix = f"{resources.root_url}static/extensions"
        package_path = join(base_dir, "package.json")

        pkg: Pkg | None = None
        if exists(package_path):
            with open(package_path) as io:
                try:
                    pkg = json.load(io)
                except json.decoder.JSONDecodeError:
                    pass

        artifact_path: str
        server_url: str
        cdn_url: str | None = None

        if pkg is not None:
            pkg_name: str | None = pkg.get("name", None)
            if pkg_name is None:
                raise ValueError("invalid package.json; missing package name")
            pkg_version = pkg.get("version", "latest")
            pkg_main = pkg.get("module", pkg.get("main", None))
            if pkg_main is not None:
                cdn_url = f"{_default_cdn_host}/{pkg_name}@{pkg_version}/{pkg_main}"
            else:
                pkg_main = join(dist_dir, f"{name}.js")
            artifact_path = join(base_dir, normpath(pkg_main))
            artifacts_dir = dirname(artifact_path)
            artifact_name = basename(artifact_path)
            server_path = f"{name}/{artifact_name}"
            if not settings.dev:
                sha = hashlib.sha256()
                sha.update(pkg_version.encode())
                vstring = sha.hexdigest()
                server_path = f"{server_path}?v={vstring}"
        else:
            for ext in extensions:
                artifact_path = join(dist_dir, f"{name}{ext}")
                artifacts_dir = dist_dir
                server_path = f"{name}/{name}{ext}"
                if exists(artifact_path):
                    break
            else:
                raise ValueError(
                    f"can't resolve artifact path for '{name}' extension")

        extension_dirs[name] = artifacts_dir
        server_url = f"{server_prefix}/{server_path}"
        embed = ExtensionEmbed(artifact_path, server_url, cdn_url)
        bundles.append(embed)

    return bundles
def main(argv):
    try:
        gopts, args = getopt.getopt(argv[1:], "vi:")
    except getopt.GetoptError:
        print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
        return 2
    opts = {}
    for opt, val in gopts:
        if opt == '-i':
            val = abspath(val)
        opts.setdefault(opt, []).append(val)

    if len(args) == 0:
        path = '.'
    elif len(args) == 1:
        path = args[0]
    else:
        print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
        return 2

    verbose = '-v' in opts

    num = 0
    out = io.StringIO()

    # TODO: replace os.walk run with iteration over output of
    #       `svn list -R`.

    for root, dirs, files in os.walk(path):
        if '.hg' in dirs:
            dirs.remove('.hg')
        if 'examplefiles' in dirs:
            dirs.remove('examplefiles')
        if '-i' in opts and abspath(root) in opts['-i']:
            del dirs[:]
            continue
        # XXX: awkward: for the Makefile call: don't check non-package
        #      files for file headers
        in_pygments_pkg = root.startswith('./pygments')
        for fn in files:

            fn = join(root, fn)
            if fn[:2] == './':
                fn = fn[2:]

            if '-i' in opts and abspath(fn) in opts['-i']:
                continue

            ext = splitext(fn)[1]
            checkerlist = checkers.get(ext, None)
            if not checkerlist:
                continue

            if verbose:
                print("Checking %s..." % fn)

            try:
                lines = open(fn, 'rb').read().decode('utf-8').splitlines()
            except (IOError, OSError) as err:
                print("%s: cannot open: %s" % (fn, err))
                num += 1
                continue

            for checker in checkerlist:
                if not in_pygments_pkg and checker.only_pkg:
                    continue
                for lno, msg in checker(fn, lines):
                    print(u"%s:%d: %s" % (fn, lno, msg), file=out)
                    num += 1
    if verbose:
        print()
    if num == 0:
        print("No errors found.")
    else:
        print(out.getvalue().rstrip('\n'))
        print("%d error%s found." % (num, num > 1 and "s" or ""))
    return int(num > 0)
Example #40
0
"""A setuptools based setup module.

See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""

# To use a consistent encoding
from os import path

# Always prefer setuptools over distutils
from setuptools import setup, find_packages

here = path.abspath(path.dirname(__file__))

# Get the long description from the README file
long_description = ''
try:
  import pypandoc

  long_description = pypandoc.convert(source='README.md', to='rst', format='markdown_github')
except (IOError, ImportError):
  long_description = ''

setup(
  name='chandas',

  # Versions should comply with PEP440.  For a discussion on single-sourcing
  # the version across setup.py and the project code, see
  # https://packaging.python.org/en/latest/single_source_version.html
  version='0.0.4',
Example #41
0
    subject = args.subject
    database = args.database
    verbose = args.verbose

    jsonfile = args.json
    antecfile = args.order
    if args.order is None:
        antecfile = '/home/grg/git/alfa/alfa_dwi_pipeline_antecedency.json'
    if args.json is None:
        jsonfile = '/home/grg/git/alfa/alfa_dwi_pipeline_io_aug2016.json'

    from brainvisa import axon
    axon.initializeProcesses()
    import neuroHierarchy
    db = neuroHierarchy.databases.database(osp.abspath(database))
    #types = collect_types(jsonfile)
    types = json.load(open(antecfile))

    if subject is None:
        subjects = sorted(
            list(
                set([
                    e.get('subject')
                    for e in db.findDiskItems(_type='Any Type')
                ])))  #['10010', '10015']
        ans = raw_input('do it for all subjects? %s' % subjects)
    else:
        dates = get_subject_dates(subject, db, types)

        table = get_subject_table(dates, types)
Example #42
0
 def add_output_file(self, filename):
     """ register an output to the current activity """
     self.current_activity.register_output(abspath(filename))
     log.debug("added output entity '{}' to activity: '{}'".format(
         filename, self.current_activity.name))
Example #43
0
#!/usr/bin/env python3

from os.path import abspath
from os.path import dirname
from os.path import join
from os.path import realpath
from sys import path
from unittest import TestCase

root_dir = abspath(join(dirname(realpath(__file__)), '..'))
path.append(join(root_dir, 'src'))

import labelling  # noqa


class EndToEndTest(TestCase):
    algorithm = 'otsu_hue'

    def test_list_algorithms(self):
        algorithms = labelling.list_algorithms()

        self.assertGreater(len(algorithms), 0)
        self.assertIn(self.algorithm, algorithms)

    def test_create_mask(self):
        image = join(root_dir, 'test_image.jpg')
        algorithm = labelling.image_mask_algorithm_type(self.algorithm)
        morph = 0

        mask = labelling.create_mask(image, algorithm, morph)
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages

from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
    long_description = f.read()

setup(
    # Application name:
    name="SeafileContentManager",

    # Version number (initial):
    version="0.1.2",

    # Application author details:
    author="Malte Vogl",
    author_email="*****@*****.**",

    # Packages
    packages=find_packages(exclude=('tests', 'example')),

    # Include additional files into the package
    include_package_data=True,

    url='https://github.com/computational-antiquity/SeafileContentManager/',

    # Details

    license='GPLv3',
    description="ContentManager using Seafile's WebAPI",
Example #45
0
from worker import FCNWorker
from distributed import distribute
import os, sys
from sacred import Experiment
from sacred.observers import file_storage
from trainer import Trainer
from mtl_loss import MultiTaskLoss


def expname(script=None):
    if script is None:
        script = sys.argv[0]
    return pt.splitext(pt.basename(script))[0]


ROOT = pt.abspath(pt.dirname(__file__))
EXP_FOLDER = pt.join(ROOT, '../../exp')
exp = Experiment(' Experiment: Jigsaw with mtl_loss')


# contruct the loader which will give the batches like a dataloader
# this function should also divide the data for each process
def make_loader(file,
                batch_size,
                device,
                world_size,
                rank,
                nworkers,
                image_rng=None,
                image_params=None,
                gpu_augmentation=False,
Example #46
0
import locale
import os
import sys
from os import path

locale.setlocale(locale.LC_ALL, '')
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
SOURCE_DIR = path.dirname(path.dirname(path.abspath(__file__))) + '/'

from common.ops import *
from common.optimizer import AdaShift
from common.data_loader import *
from common.logger import Logger

import numpy as np
import tensorflow as tf

cfg = tf.app.flags.FLAGS

tf.app.flags.DEFINE_integer("n", 10, "")
tf.app.flags.DEFINE_string("sDataSet", "cifar10",
                           "cifar10, mnist, flowers,tiny")

tf.app.flags.DEFINE_string("sResultTagA", "case5",
                           "your tag for each test case")
tf.app.flags.DEFINE_string("sResultTagB", "mlp1024",
                           "your tag for each test case")
tf.app.flags.DEFINE_boolean("bLoadCheckpoint", True, "bLoadCheckpoint")

tf.app.flags.DEFINE_string("discriminator", 'discriminator_mlp', "")
Example #47
0
Django settings for askcompany project.

Generated by 'django-admin startproject' using Django 2.1.1.

For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/

For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""

import os
from os.path import abspath, dirname

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))

# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')&9kh7+8ynwkqc*7=oc=3ro84!=%ey=i$^hi&hdh*roi&30u#r'

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True  # False

ALLOWED_HOSTS = ['*']

# Application definition

INSTALLED_APPS = [
Example #48
0
import shutil
import sys
import tarfile
from jupyter_core.paths import ENV_JUPYTER_PATH

from ._version import __version__

if sys.platform == 'win32':
    from subprocess import list2cmdline
else:

    def list2cmdline(cmd_list):
        return ' '.join(map(pipes.quote, cmd_list))


here = osp.dirname(osp.abspath(__file__))


def get_app_dir(app_dir=None):
    """Get the configured JupyterLab app directory.
    """
    app_dir = app_dir or os.environ.get('JUPYTERLAB_DIR')
    app_dir = app_dir or pjoin(ENV_JUPYTER_PATH[0], 'lab')
    return os.path.realpath(app_dir)


def run(cmd, **kwargs):
    """Run a command in the given working directory.
    """
    print('> ' + list2cmdline(cmd))
    kwargs.setdefault('shell', sys.platform == 'win32')
Example #49
0
def main():
    if len(sys.argv) != 2:
        print("Usage: plan_packages.py <plan_file>")
        sys.exit(1)

    planned_pkgs = eval(open(sys.argv[1]).read())

    srcdir = "../../src"
    abs_srcdir = abspath(srcdir)

    src_files = []
    for root, _, files in os.walk(abs_srcdir):
        if root.endswith("/preprettify"):
            continue
        for fn in files:
            if fn[-2:] in (".F", ".c", ".cu"):
                src_files.append(path.join(root, fn))

    src_basenames = [basename(fn).rsplit(".", 1)[0] for fn in src_files]
    for bfn in src_basenames:
        if src_basenames.count(bfn) > 1:
            error("Multiple source files with the same basename: " + bfn)

    # parse source files
    parsed_files = dict()
    for fn in src_files:
        parse_file(parsed_files, fn)  # parses also included files
    print("Parsed %d source files" % len(parsed_files))

    # create table mapping fortan module-names to file-name
    mod2fn = dict()
    for fn in src_files:
        for m in parsed_files[fn]["module"]:
            if mod2fn.has_key(m):
                error('Multiple declarations of module "%s"' % m)
            mod2fn[m] = fn
    print("Created mod2fn table, found %d modules." % len(mod2fn))

    # check "one module per file"-convention
    for m, fn in mod2fn.items():
        if basename(fn) != m + ".F":
            error("Names of module and file do not match: " + fn)

    # check for circular dependencies
    for fn in parsed_files.keys():
        find_cycles(parsed_files, mod2fn, fn)

    # read existsing package manifests
    packages = dict()
    fn2pkg = dict()
    for fn in parsed_files.keys():
        p = normpath(dirname(fn))
        fn2pkg[basename(fn)] = p
        read_manifest(packages, p)

    # update with manifest with planned packages
    for pp in planned_pkgs:
        p = abspath(path.join(srcdir, pp["dirname"]))
        if not packages.has_key(p):
            packages[p] = {"problems": []}
        packages[p].update(pp)
        if pp.has_key("files"):
            for fn in pp["files"]:
                fn2pkg[fn] = p
        if pp.has_key("requires+"):
            packages[p]["requires"] += pp["requires+"]
        if pp.has_key("requires-"):
            for i in pp["requires-"]:
                while i in packages[p]["requires"]:
                    packages[p]["requires"].remove(i)

    # process the manifests
    for p in packages.keys():
        process_manifest(packages, p)
        find_pkg_cycles(packages, p)
    print("Read %d package manifests" % len(packages))

    # check dependencies against package manifests
    n_deps = 0
    for fn in src_files:
        p = fn2pkg[basename(fn)]
        deps = collect_include_deps(parsed_files, fn)
        deps += [
            mod2fn[m] for m in collect_use_deps(parsed_files, fn)
            if mod2fn.has_key(m)
        ]
        n_deps += len(deps)
        for d in deps:
            dp = fn2pkg[basename(d)]
            msg = "%32s  ->  %-32s  %-15s " % (
                basename(fn),
                basename(d),
                "[src" + dp[len(abs_srcdir):] + "]",
            )
            if dp not in packages[p]["allowed_deps"]:
                packages[p]["problems"].append(msg +
                                               "(requirement not listed)")
            if dp != p and packages[dp].has_key("public"):
                if basename(d) not in packages[dp]["public"]:
                    packages[p]["problems"].append(msg + "(file not public)")

    print("Checked %d dependencies\n" % n_deps)

    for p in sorted(packages.keys()):
        if len(packages[p]["problems"]) == 0:
            continue
        assert p.startswith(abs_srcdir)
        print(" " * 10 +
              "====== Forbidden Dependencies in Package src%s =====" %
              p[len(abs_srcdir):])
        print("\n".join(packages[p]["problems"]) + "\n")
Example #50
0
    def setup_class(cls):
        # todo: add docs for these
        test_folder = path.split(__file__)[0]
        fn = path.abspath(path.join(test_folder, "ice_test.xyz"))
        at_train = ase.io.read(fn, '0')
        at_test = ase.io.read(fn, '1')

        soap_desc = SOAP(species=['H', 'O'], rcut=4., nmax=6, lmax=6, sigma=0.3, periodic=True, average='off')
        desc_train = soap_desc.create(at_train)
        desc_test = soap_desc.create(at_test)

        cls.K_tr = np.dot(desc_train, desc_train.T) ** 2
        cls.K_test = np.dot(desc_test, desc_train.T) ** 2

        cls.K_tr_save = cls.K_tr.copy()
        cls.K_test_save = cls.K_test.copy()

        # references:
        cls.ref_pvec_train = np.array([[-4.06617973e+00, 1.08520804e+00, 3.69840829e-03, -1.44976066e-02],
                                       [-4.05352401e+00, 1.09320822e+00, 1.15028148e-03, 2.74168800e-02],
                                       [-3.80654705e+00, -1.10403286e+00, 1.04877050e-02, -3.63707475e-03],
                                       [-3.81287331e+00, -1.12812425e+00, 1.29987555e-02, -2.38640711e-02],
                                       [-3.80579914e+00, -1.11806303e+00, 1.15386212e-02, -2.80550703e-03],
                                       [-3.81204091e+00, -1.14223341e+00, 1.40256548e-02, -2.29474563e-02],
                                       [-4.07666395e+00, 1.09555792e+00, 4.74882612e-03, -2.29506563e-02],
                                       [-4.05571605e+00, 1.07800587e+00, 1.48251628e-03, 2.24335026e-02],
                                       [-3.79634766e+00, -1.11187590e+00, 1.04707193e-02, 8.98527376e-03],
                                       [-3.79298753e+00, -1.10617575e+00, 9.84540621e-03, 1.78169808e-02],
                                       [-4.07074480e+00, 1.11864192e+00, 1.27063488e-03, -1.80462524e-02],
                                       [-4.06688594e+00, 1.09362545e+00, 2.15794282e-03, -1.96113470e-02],
                                       [-4.05784859e+00, 1.08740112e+00, 1.47447842e-03, 1.17331366e-02],
                                       [-4.05813149e+00, 1.08739296e+00, 1.75614318e-03, 1.31801983e-02],
                                       [-3.79553551e+00, -1.11389621e+00, 1.04074380e-02, 8.63819290e-03],
                                       [-3.79218637e+00, -1.10818775e+00, 9.78369564e-03, 1.74613912e-02],
                                       [8.06281699e+00, 3.61120658e-02, 6.99331053e-01, 1.75912880e-02],
                                       [7.71411056e+00, 1.64718213e-02, -7.04066810e-01, -9.34054057e-02],
                                       [7.60371751e+00, 3.22014366e-03, -7.28918047e-01, -3.78427909e-02],
                                       [8.06310473e+00, 3.72640108e-02, 6.97723902e-01, 1.24539333e-02],
                                       [7.63289763e+00, 9.78096777e-03, -7.43053420e-01, 6.39070502e-02],
                                       [8.13279172e+00, 4.06699694e-02, 7.22168932e-01, -1.01534123e-01],
                                       [8.07660163e+00, 4.08046239e-02, 6.90906415e-01, 7.69453911e-02],
                                       [7.63397127e+00, 9.22403666e-03, -7.41389253e-01, 6.25790719e-02]],
                                      dtype="float32")

        cls.ref_pvec_test = np.array([[-4.23817197e+00, 2.21216879e-01, 1.56505888e-01, -3.56531020e-02],
                                      [-4.20017877e+00, 6.26185600e-02, 1.44533133e-01, 3.37271109e-02],
                                      [-4.28592079e+00, 2.52459297e-01, 1.68965737e-01, 2.46597830e-03],
                                      [-4.21372514e+00, 2.40352917e-01, 1.35345282e-01, -4.19430108e-02],
                                      [-4.22075792e+00, 2.57834641e-01, 1.46428658e-01, -2.54276131e-02],
                                      [-4.20912533e+00, 1.41107424e-01, 1.34349660e-01, -2.35976049e-02],
                                      [-4.23708825e+00, 8.85194416e-02, 1.61491705e-01, 1.15497682e-02],
                                      [-4.21062574e+00, 2.60190268e-01, 1.36750424e-01, 3.25269508e-03],
                                      [-3.98889410e+00, 1.56896778e-01, 7.17538880e-02, 6.39784979e-02],
                                      [-4.00525321e+00, 1.73568460e-01, 8.23704558e-02, 6.53524973e-02],
                                      [-3.65987514e+00, 6.04595020e-02, -8.57224607e-02, 1.05262634e-01],
                                      [-3.74517767e+00, -9.97008707e-01, -4.41414522e-02, -9.64487284e-02],
                                      [-4.10981478e+00, 1.66066458e-01, 1.12060385e-01, 2.31874639e-02],
                                      [-3.76780263e+00, -1.10609767e-02, -6.28042821e-02, 1.17437832e-01],
                                      [-4.04305882e+00, 2.80428956e-01, 8.85085856e-02, 9.05746488e-02],
                                      [-3.79102766e+00, -1.16690220e+00, -9.50532485e-03, -8.32897739e-02],
                                      [-4.13646883e+00, 2.12319898e-01, 1.00052036e-01, 2.64889789e-02],
                                      [-3.79810541e+00, 7.01554115e-02, -5.60348665e-02, 1.05956141e-01],
                                      [-4.05869532e+00, 3.29850364e-01, 9.29060632e-02, 9.15201392e-02],
                                      [-3.78959362e+00, -1.17952566e+00, -3.02953553e-03, -4.44245483e-02],
                                      [-4.16021423e+00, 2.67263726e-01, 1.05038377e-01, 2.16737311e-02],
                                      [-3.80467128e+00, 2.30076951e-01, -6.38924752e-02, 8.91573211e-02],
                                      [-4.03365112e+00, 3.51188385e-01, 7.69169787e-02, 7.47813405e-02],
                                      [-3.62183900e+00, -1.12477840e+00, -2.97227887e-02, 4.24700267e-02],
                                      [9.10146883e+00, 2.08341286e-01, 5.08705116e-01, -1.97210624e-01],
                                      [9.13581787e+00, 2.08796593e-01, 5.24817925e-01, -2.40741773e-01],
                                      [9.04694197e+00, 2.08949917e-01, 3.96804461e-01, -1.81276499e-01],
                                      [9.07470424e+00, 2.05175107e-01, 4.46638811e-01, -1.30784165e-01],
                                      [7.62273127e+00, -5.72024720e-02, 3.68091975e-01, -7.02586774e-02],
                                      [7.90566964e+00, 1.91416005e-02, -2.93423768e-01, -6.13016842e-02],
                                      [8.02886720e+00, 5.98404709e-03, 1.36396991e-01, -2.54598786e-01],
                                      [7.85220135e+00, -2.53487064e-02, 1.26409015e-02, -1.79153278e-01],
                                      [8.13281083e+00, 2.43322996e-02, 1.99101046e-01, -2.28687789e-01],
                                      [7.84935179e+00, -3.14337990e-02, 5.60258262e-02, -1.90401494e-01],
                                      [8.17793845e+00, 2.68832136e-02, 2.76448251e-01, -1.95965960e-01],
                                      [7.61105954e+00, -5.43027123e-02, -4.45044842e-02, -1.51450933e-01]],
                                     dtype="float32")
Example #51
0
def from_vcf_stream_to_maf_stream(input_stream,
                                  output_stream,
                                  config_json,
                                  chunk_size=20):
    with normalize_stream_to_stream(input_stream) as input_stream:

        ann_header = None
        for line in input_stream:
            line_string = line.decode("utf-8")
            if line_string.startswith("##INFO=<ID=ANN"):

                def detect_description(part):
                    splitted_part = part.split("=")
                    if splitted_part[0] == "Description":
                        return splitted_part[1].split(":")[1].replace(" ", "")
                    return None

                ann_header = list(
                    filter(
                        None,
                        map(
                            detect_description,
                            list(line_string[7:-1].replace("\"", "").replace(
                                "<", "").replace(">",
                                                 "").split(",")))))[0].replace(
                                                     "'", "").split("|")
            if line_string.startswith("#CHROM"):
                break

        vcf_header = line.decode("utf-8")[1:-1].split("\t")
        format_index = vcf_header.index("FORMAT")
        individual_headers = vcf_header[format_index:]

        mp_pool = multiprocessing.Pool()
        results = []

        renaming_dictionary = {}
        for elem in config_json["all_renamings"]:
            renaming_dictionary[elem["database"]] = {}
            for rename in elem["database_renamings"]:
                renaming_dictionary[elem["database"]][
                    rename["original_name"]] = rename["new_name"]

        field_dictionary = {}
        for elem in config_json["annotated_fields"]:
            field_dictionary[elem["database"]] = elem["fields"]

        module_path = dirname(dirname(abspath(__file__)))
        path_to_dbsnp = module_path + "/data/cache/dbSNP/All_20180418.vcf.gz"
        dbSNP_dict = {}
        with open_potential_gz(path_to_dbsnp) as dbSNP_database:
            for line in dbSNP_database:
                line_string = line.decode("utf-8")
                if line_string.startswith("##INFO=<ID="):
                    splitted_string = line_string.rstrip(
                        "\n")[len("##INFO=<"):-1].split(",")
                    if splitted_string[2].split("=")[1] == "Flag":
                        dbSNP_dict[splitted_string[0].split("=")[1]] = "bool"
                    elif splitted_string[1].split("=")[1] == "1":
                        dbSNP_dict[splitted_string[0].split("=")[1]] = "unique"
                    elif splitted_string[1].split("=")[1] == ".":
                        dbSNP_dict[splitted_string[0].split("=")[1]] = "multi"

                if line_string.startswith("#CHROM"):
                    break

        field_dictionary["dbSNP_boolean"] = []
        field_dictionary["dbSNP_multi"] = []
        field_dictionary["dbSNP_unique"] = []
        if "dbSNP" in field_dictionary.keys():
            for field in field_dictionary["dbSNP"]:
                if dbSNP_dict[field] == "bool":
                    field_dictionary["dbSNP_boolean"].append(field)
                elif dbSNP_dict[field] == "unique":
                    field_dictionary["dbSNP_unique"].append(field)
                elif dbSNP_dict[field] == "multi":
                    field_dictionary["dbSNP_multi"].append(field)

        field_dictionary.pop("dbSNP", None)

        field_list = [
            "dbClinVar", "dbNSFP", "dbCOSMICCodingMutations",
            "dbCOSMICNonCodingVariants", "dbCOSMICMutantCensus",
            "dbCiVICGeneSummaries", "dbCiVICVariantSummaries",
            "dbCiVICClinicalEvidence", "dbSNP_boolean", "dbSNP_unique",
            "dbSNP_multi"
        ]
        for field in field_list:
            if not field in field_dictionary.keys():
                field_dictionary[field] = []
        field_dictionary["renamings"] = renaming_dictionary

        for chunk in iter(lambda: list(islice(input_stream, chunk_size)), []):
            results.append(
                mp_pool.apply_async(treat_chunk,
                                    (chunk, vcf_header, ann_header),
                                    field_dictionary))

        current_chunk = results[0].get()
        all_headers = list(current_chunk.columns)
        common_fields = [x for x in all_headers if not x in individual_headers]
        new_order = common_fields + individual_headers
        s = io.StringIO()
        current_chunk[new_order].to_csv(s, sep='\t', na_rep='.', index=False)
        output_stream.write(s.getvalue())
        for i in range(1, len(results)):
            current_chunk = results[i].get()
            all_headers = list(current_chunk.columns)
            common_fields = [
                x for x in all_headers if not x in individual_headers
            ]
            new_order = common_fields + individual_headers
            s = io.StringIO()
            current_chunk[new_order].to_csv(s,
                                            sep='\t',
                                            na_rep='.',
                                            index=False,
                                            header=False)
            output_stream.write(s.getvalue())
Example #52
0
#

from os.path import abspath
from os.path import join
from os.path import dirname
import unittest

import pytest

from licensedcode_test_utils import build_tests  # NOQA
from scancode_config import REGEN_TEST_FIXTURES


pytestmark = pytest.mark.scanslow

"""
Data-driven tests using expectations stored in YAML files.
Test functions are attached to test classes at module import time
"""

TEST_DIR = abspath(join(dirname(__file__), 'data'))


class TestLicenseDataDriven4(unittest.TestCase):
    pass


build_tests(
    join(TEST_DIR, 'datadriven/lic4'),
    clazz=TestLicenseDataDriven4, regen=REGEN_TEST_FIXTURES)
import os.path as osp
import sys

def add_path(path):
  if path not in sys.path: sys.path.insert(0, path)

this_dir = osp.dirname(__file__)

# Add LIB_PATH to PYTHONPATH
lib_path = osp.abspath( osp.join(this_dir, '..') )
add_path(lib_path)
Example #54
0
"""
Django settings for PyanoBench project.
"""

from os import path
PROJECT_ROOT = path.dirname(path.abspath(path.dirname(__file__)))

DEBUG = True
TEMPLATE_DEBUG = DEBUG

ALLOWED_HOSTS = ('localhost', )

ADMINS = (
    # ('Your Name', '*****@*****.**'),
)

MANAGERS = ADMINS

DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.sqlite3',
        'NAME': path.join(PROJECT_ROOT, 'db.sqlite3'),
        'USER': '',
        'PASSWORD': '',
        'HOST': '',
        'PORT': '',
    }
}

LOGIN_URL = '/login'
Example #55
0
import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))

from page_obj.scg.scg_def import *
from page_obj.scg.scg_button import *
from page_obj.scg.scg_def_firewall import *
from page_obj.scg.scg_def_sslvpn import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def_machine_learning import *

test_id = "221870"

# SSLVPN-本地用户
# 新增本地用户
# 其余配置均正确
# 1. 点击保存
# 2. 点击取消


def test_c221870(browser):
    try:
        login_web(browser, url=dev1)
        # 删除所有本地用户(避免冲突)
        del_all_sslvpn_local_admin_lzy(browser)
        # 增加本地用户
        add_sslvpn_local_admin_complete_lzy(browser,
                                            name='localadmin1',
Example #56
0
def treat_chunk(chunk,
                vcf_header,
                ann_header,
                input_additional_multifields=None,
                vep_fields=None,
                snpeff_fields=None,
                dbClinVar=None,
                dbNSFP=None,
                dbSNP_boolean=None,
                dbSNP_multi=None,
                dbSNP_unique=None,
                dbCOSMICCodingMutations=None,
                dbCOSMICNonCodingVariants=None,
                dbCOSMICMutantCensus=None,
                dbCiVICGeneSummaries=None,
                dbCiVICVariantSummaries=None,
                dbCiVICClinicalEvidence=None,
                renamings=None):
    if input_additional_multifields is None:
        input_additional_multifields = [
            "platformnames", "datasetnames", "callsetnames", "callable",
            "filt", "datasetsmissingcall"
        ]
    if vep_fields is None:
        vep_fields = [
            "Allele", "Consequence", "IMPACT", "SYMBOL", "Gene",
            "Feature_type", "Feature", "BIOTYPE", "EXON", "INTRON", "HGVSc",
            "HGVSp", "cDNA_position", "CDS_position", "Protein_position",
            "Amino_acids", "Codons", "Existing_variation", "DISTANCE",
            "STRAND", "FLAGS", "SYMBOL_SOURCE", "HGNC_ID", "SOURCE",
            "HGVS_OFFSET", "HGVSg"
        ]
    if snpeff_fields is None:
        snpeff_fields = [
            "Allele", "Annotation", "Annotation_Impact", "Gene_Name",
            "Gene_ID", "Feature_Type", "Feature_ID", "Transcript_BioType",
            "Rank", "HGVS.c", "HGVS.p", "cDNA.pos/cDNA.length",
            "CDS.pos/CDS.length", "AA.pos/AA.length", "Distance",
            "ERRORS/WARNINGS/INFO"
        ]
    if dbClinVar is None:
        dbClinVar = [
            "CLINVAR_ID", "CLNDISDB", "CLNDISDBINCL", "CLNDN", "CLNSIG",
            "CLNREVSTAT"
        ]
    if dbNSFP is None:
        """
        dbNSFP = ["MutationAssessor_score", "MutationAssessor_pred", "SIFT_score", "SIFT_pred",
                         "FATHMM_score", "FATHMM_pred", "Polyphen2_HDIV_score", "Polyphen2_HDIV_rankscore",
                         "Polyphen2_HDIV_pred", "Polyphen2_HVAR_score", "Polyphen2_HVAR_rankscore",
                         "Polyphen2_HVAR_pred"]
        """
        dbNSFP = [
            "MutationAssessor_score", "MutationAssessor_pred", "SIFT_score",
            "SIFT_pred", "FATHMM_score", "FATHMM_pred", "Polyphen2_HDIV_score",
            "Polyphen2_HDIV_pred", "Polyphen2_HVAR_score",
            "Polyphen2_HVAR_pred"
        ]
    if dbSNP_boolean is None:
        dbSNP_boolean = ["ASP"]
    if dbSNP_multi is None:
        dbSNP_multi = ["TOPMED"]
    if dbSNP_unique is None:
        dbSNP_unique = ["SAO", "RS"]
    if dbCOSMICCodingMutations is None:
        dbCOSMICCodingMutations = ["COSMIC_ID", "LEGACY_ID", "GENE", "CDS"]
    if dbCOSMICNonCodingVariants is None:
        dbCOSMICNonCodingVariants = ["COSMIC_ID", "LEGACY_ID", "GENE", "SNP"]
    if dbCOSMICMutantCensus is None:
        dbCOSMICMutantCensus = ["Primary histology"]
    if dbCiVICGeneSummaries is None:
        dbCiVICGeneSummaries = ["description"]
    if dbCiVICVariantSummaries is None:
        dbCiVICVariantSummaries = [
            "gene", "variant", "civic_variant_evidence_score",
            "civic_variant_evidence_score"
        ]
    if dbCiVICClinicalEvidence is None:
        dbCiVICClinicalEvidence = [
            "drugs", "variant_summary", "disease", "evidence_type",
            "evidence_direction", "evidence_level", "clinical_significance",
            "citation_id"
        ]
    if renamings is None:
        renamings = {
            "dbCOSMICCodingMutations": {
                "GENE": "COSMIC_CODING_MUT_GENE",
                "COSMIC_ID": "COSMIC_ID_MUT",
                "LEGACY_ID": "LEGACY_ID_MUT"
            },
            "dbCOSMICNonCodingVariants": {
                "GENE": "COSMIC_NON_CODING_VARS_GENE",
                "COSMIC_ID": "COSMIC_ID_NON_MUT",
                "LEGACY_ID": "LEGACY_ID_NON_MUT"
            }
        }

    # if len(dbCiVICClinicalEvidence) > 0:
    #    if (not "gene" in dbCiVICVariantSummaries or not "variant" in dbCiVICGeneSummaries) or ()

    format_index = vcf_header.index("FORMAT")
    vcf_fields_header = vcf_header[:format_index]

    info_index = vcf_header.index("INFO")
    vcf_fields_header.pop(info_index)

    input_vcf_dataframe = pd.DataFrame(list(
        map(lambda x: x.decode("utf-8")[:-1].replace(" ", "").split("\t"),
            chunk)),
                                       columns=vcf_header)
    vcf_dataframe_with_info = split_info(input_vcf_dataframe)

    if not ann_header is None:
        vcf_dataframe_with_info_ann = split_ann(vcf_dataframe_with_info,
                                                ann_header)
    else:
        return vcf_dataframe_with_info

    vcf_dataframe_with_info_corrected = clean_dataframe_column(
        vcf_dataframe_with_info_ann,
        cols=input_additional_multifields,
        old_allele_sep="|",
        new_allele_sep="|",
        old_internal_sep=",",
        new_internal_sep="|")
    vcf_dataframe_with_info_corrected = clean_dataframe_column(
        vcf_dataframe_with_info_corrected,
        cols=vep_fields + snpeff_fields,
        old_allele_sep=",",
        new_allele_sep="&",
        old_internal_sep="&",
        new_internal_sep="|")

    vcf_dataframe_with_hgvs_id = compute_hgvs_id(
        vcf_dataframe_with_info_corrected)

    dbCiVICVariantSummaries_fields = compute_fields_with_renamings(
        "dbCiVICVariantSummaries", dbCiVICVariantSummaries, renamings)
    module_path = dirname(dirname(abspath(__file__)))
    variant_summaries_fields_tsv = module_path + "/data/cache/civic/nightly-VariantSummaries.tsv"
    vcf_dataframe_with_civic_variant_summaries = add_civic_variant_summaries_fields(
        vcf_dataframe_with_hgvs_id, variant_summaries_fields_tsv,
        dbCiVICVariantSummaries_fields)

    dbCiVICClinicalEvidence_fields = compute_fields_with_renamings(
        "dbCiVICClinicalEvidence", dbCiVICClinicalEvidence, renamings)

    variant_clinical_evidence_tsv = module_path + "/data/cache/civic/nightly-ClinicalEvidenceSummaries.tsv"
    vcf_dataframe_with_civic_clinical_evidence = add_civic_variant_clinical_evidence_fields(
        vcf_dataframe_with_civic_variant_summaries,
        variant_clinical_evidence_tsv, dbCiVICClinicalEvidence_fields)

    dbCiVICGeneSummaries_fields = compute_fields_with_renamings(
        "dbCiVICGeneSummaries", dbCiVICGeneSummaries, renamings)
    gene_summaries_tsv = module_path + "/data/cache/civic/nightly-GeneSummaries.tsv"
    vcf_dataframe_with_civic_gene_description = add_civic_gene_description_fields(
        vcf_dataframe_with_civic_clinical_evidence, gene_summaries_tsv,
        dbCiVICGeneSummaries_fields)

    dbSNP_multi_fields = compute_fields_with_renamings("dbSNP", dbSNP_multi,
                                                       renamings)
    dbSNP_unique_fields = compute_fields_with_renamings(
        "dbSNP", dbSNP_unique, renamings)
    dbSNP_boolean_fields = compute_fields_with_renamings(
        "dbSNP", dbSNP_boolean, renamings)
    path_to_dbsnp = module_path + "/data/cache/dbSNP/All_20180418.vcf.gz"
    vcf_dataframe_with_dbsnp = add_dbsnp_fields(
        vcf_dataframe_with_civic_gene_description,
        path_to_dbsnp,
        dbsnp_multialt_fields=dbSNP_multi_fields,
        dbsnp_unique_fields=dbSNP_unique_fields,
        dbsnp_boolean_fields=dbSNP_boolean_fields)

    dbNSFP_fields = compute_fields_with_renamings("dbNSFP", dbNSFP, renamings)
    path_to_dbNSFP = module_path + "/data/cache/dbNSFP/dbNSFP4.0a.txt.gz"
    vcf_dataframe_with_dbnsfp = add_dbnsfp_fields(vcf_dataframe_with_dbsnp,
                                                  path_to_dbNSFP,
                                                  dbnsfp_fields=dbNSFP_fields)

    dbClinVar_fields = compute_fields_with_renamings("dbClinVar", dbClinVar,
                                                     renamings)
    path_to_clinvar = module_path + "/data/cache/clinvar/clinvar.vcf.gz"
    vcf_dataframe_with_clinvar = add_clinvar_fields(
        vcf_dataframe_with_dbnsfp,
        path_to_clinvar,
        clinvar_fields=dbClinVar_fields)

    dbCOSMICCodingMutations_fields = compute_fields_with_renamings(
        "dbCOSMICCodingMutations", dbCOSMICCodingMutations, renamings)
    path_to_cosmic_coding_mut = module_path + "/data/cache/cosmic/CosmicCodingMuts.vcf.gz"
    vcf_dataframe_with_cosmic_coding_mut = add_cosmic_vcf_fields(
        vcf_dataframe_with_clinvar,
        path_to_cosmic_coding_mut,
        cosmic_fields=dbCOSMICCodingMutations_fields)

    dbCOSMICNonCodingVariants_fields = compute_fields_with_renamings(
        "dbCOSMICNonCodingVariants", dbCOSMICNonCodingVariants, renamings)
    path_to_cosmic_non_coding_vars = module_path + "/data/cache/cosmic/CosmicNonCodingVariants.vcf.gz"
    vcf_dataframe_with_cosmic_non_coding_mut = add_cosmic_vcf_fields(
        vcf_dataframe_with_cosmic_coding_mut,
        path_to_cosmic_non_coding_vars,
        cosmic_fields=dbCOSMICNonCodingVariants_fields)

    dbCOSMICMutantCensus_fields = compute_fields_with_renamings(
        "dbCOSMICMutantCensus", dbCOSMICMutantCensus, renamings)
    path_to_cosmic_mutant_census = module_path + "/data/cache/cosmic/CosmicMutantExportCensus.tsv.gz"
    with open_potential_gz(path_to_cosmic_mutant_census) as mutant_cens:
        mutant_header = mutant_cens.readline().decode("utf-8").lstrip(
            "#").rstrip("\n").split("\t")
    vcf_dataframe_with_cosmic_mutant_census = add_cosmic_indexed_fields(
        vcf_dataframe_with_cosmic_non_coding_mut,
        path_to_cosmic_mutant_census,
        cosmic_fields=dbCOSMICMutantCensus_fields,
        cosmic_header=mutant_header)
    vcf_dataframe_with_cosmic_mutant_census.drop('hgvsc_id',
                                                 axis=1,
                                                 inplace=True)
    return vcf_dataframe_with_cosmic_mutant_census
Example #57
0
import json
import tables, warnings
from datetime import datetime
from itertools import chain
from collections import OrderedDict
from contextlib import contextmanager
from os import path
import numpy as np
from deepdish import io as dio
from . import six, afni

from .paraproc import format_duration
from .paraproc import cmd_for_exec, cmd_for_disp, run
from .paraproc import PooledCaller, SharedMemoryArray

package_dir = path.abspath(path.dirname(__file__))


def has_ants():
    pass


def has_N4():
    pass


def has_hcp_retino_docker():
    try:
        run(f"docker image list", goal_pattern='nben/neuropythy')
        return True
    except RuntimeError:
 def pathDiagrama(self, nom):
     path = self.pathEnquesta + nom + '.png'
     return abspath(path)
Example #59
0
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.

# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])

# For reproducibility
__C.RNG_SEED = 3

# A small number that's used many times
__C.EPS = 1e-14

# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))

# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))

# Model directory
__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models', 'pascal_voc'))

# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'

# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
__C.LOG_DIR = 'default'

# Use GPU implementation of non-maximum suppression
import os.path as op
import nipype.interfaces.io as nio  # Data i/o
import nipype.interfaces.utility as util  # utility
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe  # pypeline engine
from forward.struct import create_structural_mesh_workflow

from forward.datasets import sample
data_path = sample.data_path(name="simnibs")
almi5_path = sample.data_path(name="almi5")

subjects_dir = op.abspath(op.curdir)
# Normally something like:
# subjects_dir = os.environ["SUBJECTS_DIR"]

fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

# The fat-suppressed T1 should be run through freesurfer!
# See http://simnibs.de/documentation/mri_sequences

subject_list = ['almi5']
name = 'SingleImage_'
infosource = pe.Node(
    interface=util.IdentityInterface(fields=['subject_id', 'subjects_dir']),
    name="infosource")
infosource.iterables = ('subject_id', subject_list)
infosource.inputs.subjects_dir = subjects_dir

datasink = pe.Node(interface=nio.DataSink(), name="datasink")
datasink.inputs.base_directory = op.abspath(name + 'structural_datasink')
datasink.inputs.container = 'subject'