示例#1
0
 def make_file(self, foldername, filename, challenge_id, file_contents):
     try:
         created = False
         fi = self.app.config['SERVER_ROOT']+'sand/'+str(challenge_id)+'/base/'+foldername
         if fi[-1]=='/':
             fi=fi[:-1]
         if(not path.normpath(fi) == fi and foldername != ''):
             return('Stop hacking me! ahum, I mean. There was an error making that file')
         if(not secure_filename(filename) == filename and not '.'+secure_filename(filename) == filename):
             return('Stop hacking me! ahum, I mean. There was an error making that file')
         if( path.exists( path.normpath(fi)+'/'+filename )):
             return('The file already exists')
         if(foldername != '' and not path.exists(fi)):
             mkdir(fi)
             created = True
         fi = path.normpath(fi)+'/'+filename
         if(filename != '' and not path.exists( fi ) ):
             open(fi,'w',encoding='utf-8').close()
             file_contents.save(fi)
             created = True
         if created:
             chmod(fi, S.S_IRWXU | S.S_IRWXG | S.S_IRWXO)
             return('The file has been made')
     except Exception, e:
         self.app.logger.warning('make_file '+str(e))
         return('There was an error making that file')        
示例#2
0
def get_7zip_path():
    path_7zip = config.get('deploy.7zip_path', None)
    if path_7zip:
        return path_7zip

    sdk_root = normpath(dirname(__file__))
    while not isdir(normpath(join(sdk_root, 'external', '7-Zip'))):
        new_root = normpath(join(sdk_root, '..'))
        if new_root == sdk_root:
            return None
        sdk_root = new_root
        del new_root
    if SYSNAME == 'Linux':
        if MACHINE == 'x86_64':
            path_7zip = join(sdk_root, 'external/7-Zip/bin/linux64/7za')
        else:
            path_7zip = join(sdk_root, 'external/7-Zip/bin/linux32/7za')
    elif SYSNAME == 'Windows':
        path_7zip = join(sdk_root, 'external/7-Zip/bin/win/7za.exe')
    elif SYSNAME == 'Darwin':
        path_7zip = join(sdk_root, 'external/7-Zip/bin/macosx/7za')
    else:
        raise Exception('Unknown OS!')
    if exists(path_7zip):
        return path_7zip
    else:
        return None
示例#3
0
文件: sim.py 项目: rcoca/HIV_01
def get_param_set(params):
    """Yield dict, a replicate specific set of parameters.
    There are n_sim (e.g., 100) replicates for each epsilon,
    and each one has its own random seed, sim_name, and outfilename.
    This generator function should yield dicts with **pickleable** elements only,
    so that it can be used with multiprocessing.
    """
    outfolder = params['outfolder']
    outfile_pattern = 'eps??sim??.out'
    search_pattern = path.normpath(path.join(outfolder, outfile_pattern))
    previously_done = glob.glob(search_pattern)
    for n in range(params['n_sim']):
        for i,eps in enumerate(ehg_epsilons):  #ehg_epsilons is a global constant
            assert type(eps) is float
            sim_name = 'eps{0:02d}sim{1:02d}'.format(i,n)
            outfilename = path.normpath(path.join(outfolder, sim_name + '.out'))
            if path.normpath(outfilename) in previously_done:
                print 'Skip simulation ' + sim_name + ' (already completed).'
                continue
            else:
                seed = params['rndseed'], n, int(eps * 10**5)   #replicate(n)-specific seed
                simparams = params.copy()  #fresh copy for each replicate (IMPORTANT!)
                simparams.update(
                    prng = RandomState(seed=seed),
                    #get phi for this simulation (to be passed to `random_pairings` via `coresim`)
                    epsilon = eps,  # `phi` needs this!
                    sim_name = sim_name,
                    outfilename = outfilename,
                    )
                yield simparams
示例#4
0
 def _add_to_package(self, CurrentPath, LocalBEConfig, 
                     LocalDirs, LocalFiles):
     # If there is no "AddToPackage" in LocalBEConfig
     # or if LocalBEConfig[AddToPackage] is empty, then
     # there is nothing to add.. so just leave right now!
     if AddToPackage not in LocalBEConfig:
         return None
     if not LocalBEConfig[AddToPackage]:
         return None
     # Now, for all the files and directories present 
     # in "AddToPackage", add them to self._add_to_pkg
     for f in LocalBEConfig[AddToPackage]:
         try:
             regex = re.compile(f)
             AddFiles = [x for x in LocalFiles if regex.search(x)]
             for f in AddFiles:
                 logger.debug('Adding File: %s' % f)
                 _f = normpath(join(CurrentPath, f))
                 self._add_to_pkg.append(_f)
             AddDirs = [x for x in LocalDirs if regex.search(x)]
             for d in AddDirs:
                 logger.debug('Adding Directory: %s' % d)
                 _d = normpath(join(CurrentPath, d))
                 self._add_dir_to_package(_d)
         except Exception, e:
             msg = 'Unable to add %s to the Package; Please fix \"%s\" ' \
                   'in %s; Reason [%s]' % (f, AddToPackage, 
                   BEConfigFilename, e)
             logger.error(msg)
             raise UnableToAddPackageFiles(msg)
def _import_module(module_name, loaded=None):
    """
    Import the module.

    Import the module and track which modules have been loaded
    so we don't load already loaded modules.
    """

    # Pull in built-in and custom plugin directory
    if module_name.startswith("bh_modules."):
        path_name = join("Packages", "BracketHighlighter", normpath(module_name.replace('.', '/')))
    else:
        path_name = join("Packages", normpath(module_name.replace('.', '/')))
    path_name += ".py"
    if loaded is not None and module_name in loaded:
        module = sys.modules[module_name]
    else:
        module = imp.new_module(module_name)
        sys.modules[module_name] = module
        exec(
            compile(
                sublime.load_resource(sublime_format_path(path_name)),
                module_name,
                'exec'
            ),
            sys.modules[module_name].__dict__
        )
    return module
示例#6
0
def link(target, lnk, force=False):
    """
    Creates symbolic link 'lnk' pointing to 'target'.
    """

    if system() not in ('Linux', 'Windows', 'MSYS_NT-6.1'):
        print("{} operating system is not supported.".format(system()))
        return

    isdir = False

    lnk = path.normpath(path.expandvars(path.expanduser(lnk)))
    if path.isdir(target):
        isdir = True
    target = path.normpath(path.expandvars(path.expanduser(target)))

    if isdir:
        print("\n{} -> {} : DIR".format(lnk, target))
    else:
        print("\n{} -> {} : FILE".format(lnk, target))

    if path.isdir(lnk) or path.isfile(lnk):
        if not force:
            print("'{}': link exists".format(lnk))
            return
        else:
            remove(lnk)

    if system() in ('Linux', 'MSYS_NT-6.1'):
        Popen(['ln', '-s', target, lnk]).wait()
    elif system() == 'Windows':
        if isdir:
            CreateSymbolicLink(lnk, target, 1)
        else:
            CreateSymbolicLink(lnk, target, 0)
示例#7
0
def make_mo_gettext():
    """
    Calls 'msgfmt' from GNU gettext to genearte object files (.mo) from
    the translation files (.po).

    Note: As this function usese the $PATH variable (with spawnlp) it doesn't
      work under Windows.
    """
    print "Generating gettext mo files:",
    po_files = 'po/*.po'
    mo_base_dir = 'locale/%s/LC_MESSAGES/'
    conv_program = 'msgfmt'

    for lang_file in glob(po_files):
        language = basename(lang_file)[:-3]
        mo_dir = mo_base_dir % language
        print language,
        try:
            makedirs(mo_dir)
        except OSError, inst:
            if inst.strerror != 'File exists':
                print 'Warning: ', inst.file, inst.strerror, 'ignoring.'
        # normalize path for windows #
        lang_file_norm = normpath(lang_file)
        mo_dir_norm = normpath(mo_dir)
        #
        mo_file = mo_dir_norm + "/quizdrill.mo"
        #print conv_program, lang_file, "-o", mo_file    # debugging
        spawnlp(P_WAIT, conv_program, conv_program, lang_file_norm, "-o", 
                mo_file)
 def test_import_file_by_path(self):
     import bytelib as expected
     module = self._import_module(join(LIBDIR, 'bytelib.py'))
     assert_equals(module.__name__, expected.__name__)
     assert_equals(dirname(normpath(module.__file__)),
                   dirname(normpath(expected.__file__)))
     assert_equals(dir(module), dir(expected))
示例#9
0
def create_apk(build, sdk, output_filename, interactive=True):
	"""Create an APK file from the the contents of development/android.

	:param output_filename: name of the file to which we'll write
	"""
	jre = _get_jre()
	path_info = _find_or_install_sdk(build)

	lib_path = path.normpath(path.join('.template', 'lib'))
	dev_dir = path.normpath(path.join('development', 'android'))
	package_name = _generate_package_name(build)
	
	LOG.info('Creating Android .apk file')

	with temp_file() as zipf_name:
		# Compile XML files into APK
		_create_apk_with_aapt(build, zipf_name, path_info, package_name, lib_path, dev_dir)

		with temp_file() as compressed_zipf_name:
			with zipfile.ZipFile(zipf_name, 'r') as zipf:
				with zipfile.ZipFile(compressed_zipf_name, 'w') as compressed_zipf:
					for name in zipf.namelist():
						compress_type = zipfile.ZIP_STORED
						if name == 'classes.dex':
							compress_type = zipfile.ZIP_DEFLATED
						compressed_zipf.writestr(name, zipf.read(name), compress_type=compress_type)
		
			with temp_file() as signed_zipf_name:
				# Sign APK
				_sign_zipf_debug(lib_path, jre, compressed_zipf_name, signed_zipf_name)
				
				# Align APK
				_align_apk(path_info, signed_zipf_name, output_filename)
示例#10
0
def collect_static():    
    """ Collect multiple static files (LESS, JS) into one. Compile LESS. """        
    # Needs refactoring
    # LESS
    for result in assets.CSS:
        output_css = path.join(STATIC_ROOT, 'compiled', '%s.min.css' % result)
        output_css = open(output_css, 'w')
        for file in assets.CSS[result]:
            if 'less' in file:
                file = 'static/compiled/%s.css' % path.basename(path.normpath(file))
            else:
                file = file + '.css'
            css = path.join(SITE_ROOT, file)
            file = open(css, 'r')
            output_css.write(file.read())
        output_css.close()
    # JS
    for result in assets.JS:
        output_js = path.join(STATIC_ROOT, 'compiled', '%s.min.js' % result)
        output_js = open(output_js, 'w')
        for file in assets.JS[result]:
            if 'coffee' in file:
                file = 'static/compiled/%s.js' % path.basename(path.normpath(file))
            else:
                file = file + '.js'
            js = path.join(SITE_ROOT, file)
            file = open(js, 'r')
            output_js.write(file.read())
        output_js.close()
示例#11
0
 def addLoc(self, position, objType = None,
             mViewImag=None, pViewImag = None, itemName = None):
     """Helper Function used when loading objects into data for use in the
     person and map view."""
     
     if mViewImag:
         mViewImagPath = normpath("images/"+mViewImag)
     else: mViewImagPath = None
     
     if pViewImag:
         pViewImagPath = normpath("images/"+pViewImag)
     else: pViewImagPath = None
     
     locObj = Loc(position, objType, mViewImag = mViewImagPath, pViewImag = pViewImagPath)
     
     if objType == 'char':
         self.character = locObj
     else:
         self.places.append(locObj)
     
     if objType == 'landmark' and itemName:
         self.landmarks[itemName] = locObj
         
     if objType == 'overlay' and itemName:
         self.overlays[itemName] = locObj
         
     return
示例#12
0
def normalize_filenames(*iterator):
    """
    function normalized filenames
    """
    filenames = []
    fileparts = []
    for item in iterator:
        # special treatment item which have members filename and pathname
        if hasattr(item, "filename") and hasattr(item, "pathname"):
            filenames.append(fs.normpath(fs.join(item.pathname, item.filename)))
        elif isinstance(item, list) or isinstance(item, tuple):
            normalized = normalize_filenames(*item)
            if isinstance(normalized, list) or isinstance(normalized, tuple):
                filenames[len(filenames) :] = normalized
            else:
                filenames.append(normalized)
        else:
            # item could be a file already
            if fs.isfile(item):
                filenames.append(item)
            else:
                fileparts.append(item)
    if len(fileparts) > 0:
        filenames.append(fs.normpath(fs.join(*fileparts)))
    if len(filenames) == 1:
        return filenames[0]
    elif len(filenames) > 1:
        return filenames
    else:
        return None
示例#13
0
文件: utils.py 项目: kosqx/lilyplayer
def makepath(path):

    """ creates missing directories for the given path and
        returns a normalized absolute version of the path.

    - if the given path already exists in the filesystem
      the filesystem is not modified.

    - otherwise makepath creates directories along the given path
      using the dirname() of the path. You may append
      a '/' to the path if you want it to be a directory path.
      
    Example:
      file = open(makepath('/tmp/dir/hallo'), 'w')

    from [email protected] 2002/03/18
    """

    from os import makedirs
    from os.path import normpath,dirname,exists,abspath

    dpath = normpath(dirname(path))
    if not exists(dpath):
        makedirs(dpath)
    return normpath(abspath(path))
示例#14
0
def test_uninstall_overlapping_package(script, data):
    """
    Uninstalling a distribution that adds modules to a pre-existing package
    should only remove those added modules, not the rest of the existing
    package.

    See: GitHub issue #355 (pip uninstall removes things it didn't install)
    """
    parent_pkg = data.packages.join("parent-0.1.tar.gz")
    child_pkg = data.packages.join("child-0.1.tar.gz")

    result1 = script.pip('install', parent_pkg, expect_error=False)
    assert join(script.site_packages, 'parent') in result1.files_created, sorted(result1.files_created.keys())
    result2 = script.pip('install', child_pkg, expect_error=False)
    assert join(script.site_packages, 'child') in result2.files_created, sorted(result2.files_created.keys())
    assert normpath(join(script.site_packages, 'parent/plugins/child_plugin.py')) in result2.files_created, sorted(result2.files_created.keys())
    #the import forces the generation of __pycache__ if the version of python supports it
    script.run('python', '-c', "import parent.plugins.child_plugin, child")
    result3 = script.pip('uninstall', '-y', 'child', expect_error=False)
    assert join(script.site_packages, 'child') in result3.files_deleted, sorted(result3.files_created.keys())
    assert normpath(join(script.site_packages, 'parent/plugins/child_plugin.py')) in result3.files_deleted, sorted(result3.files_deleted.keys())
    assert join(script.site_packages, 'parent') not in result3.files_deleted, sorted(result3.files_deleted.keys())
    # Additional check: uninstalling 'child' should return things to the
    # previous state, without unintended side effects.
    assert_all_changes(result2, result3, [])
示例#15
0
def symlink_conda(prefix, root_dir, shell=None):
    # do not symlink root env - this clobbers activate incorrectly.
    # prefix should always be longer than, or outside the root dir.
    if normcase(normpath(prefix)) in normcase(normpath(root_dir)):
        return

    if shell is None:
        shell = "bash.msys"

    if on_win:
        where = 'Scripts'
    else:
        where = 'bin'

    if on_win:
        if shell in ["cmd.exe", "powershell.exe"]:
            symlink_fn = functools.partial(win_conda_bat_redirect, shell=shell)
        else:
            symlink_fn = functools.partial(win_conda_unix_redirect, shell=shell)
    else:
        symlink_fn = os.symlink

    if not isdir(join(prefix, where)):
        os.makedirs(join(prefix, where))
    symlink_conda_hlp(prefix, root_dir, where, symlink_fn)
示例#16
0
 def test_multi_suite(self):
     data = TestSuiteFactory([join(DATADIR, 'normal.robot'),
                              join(DATADIR, 'pass_and_fail.robot')])
     suite = JsonConverter().convert(data)
     test_convert(suite,
                  source='',
                  relativeSource='',
                  id='s1',
                  name='Normal & Pass And Fail',
                  fullName='Normal & Pass And Fail',
                  doc='',
                  metadata=[],
                  numberOfTests=4,
                  keywords=[],
                  tests=[])
     test_convert(suite['suites'][0],
                  source=normpath(join(DATADIR, 'normal.robot')),
                  relativeSource='',
                  id='s1-s1',
                  name='Normal',
                  fullName='Normal & Pass And Fail.Normal',
                  doc='<p>Normal test cases</p>',
                  metadata=[('Something', '<p>My Value</p>')],
                  numberOfTests=2)
     test_convert(suite['suites'][1],
                  source=normpath(join(DATADIR, 'pass_and_fail.robot')),
                  relativeSource='',
                  id='s1-s2',
                  name='Pass And Fail',
                  fullName='Normal &amp; Pass And Fail.Pass And Fail',
                  doc='<p>Some tests here</p>',
                  metadata=[],
                  numberOfTests=2)
示例#17
0
def alternate_tags_paths(view, tags_file):
    tags_paths = '%s_search_paths' % tags_file
    search_paths = [tags_file]

    if os.path.exists(tags_paths):
        search_paths.extend(open(tags_paths).read().split('\n'))

    try:
        for (selector, platform), path in setting('extra_tag_paths'):
            if ( view.match_selector(view.sel()[0].begin(), selector) and
                 sublime.platform() == platform ):
                search_paths.append(path)
    except Exception as e:
        print (e)

    if os.path.exists(tags_paths):
        for extrafile in setting('extra_tag_files'):
            search_paths.append(normpath(join(dirname(tags_file), extrafile)))


    # Ok, didn't found the .tags file under the viewed file.
    # Let's look in the currently openened folder
    for folder in view.window().folders():
        search_paths.append(normpath(join(folder, '.tags')))
        for extrafile in setting('extra_tag_files'):
            search_paths.append(normpath(join(folder, extrafile)))

    return set(p for p in search_paths if p and os.path.exists(p))
示例#18
0
 def setUp(self):
     #settings.DEBUG = True
     RateLoader(normpath(join(settings.SITE_ROOT, 'core/fixtures/rate.csv'))).load()
     AccountTypeLoader(normpath(join(settings.SITE_ROOT, 'core/fixtures/accounttype.csv'))).load()
     RateChargeLoader(normpath(join(settings.SITE_ROOT, 'core/fixtures/ratecharge.csv'))).load()
     BillingScheduleLoader(normpath(join(settings.SITE_ROOT, 'core/fixtures/billingschedule.csv'))).load()
     FelizanaLoader(normpath(join(settings.SITE_ROOT, 'core/fixtures/felizana_customer_info_2014-02.csv'))).load()
示例#19
0
 def test_suite(self):
     test_convert(self.suite,
                  source=normpath(DATADIR),
                  relativeSource='misc',
                  id='s1',
                  name='Misc',
                  fullName='Misc',
                  doc='<p>My doc</p>',
                  metadata=[('1', '<p>2</p>'), ('abc', '<p>123</p>')],
                  numberOfTests=175,
                  tests=[],
                  keywords=[])
     test_convert(self.suite['suites'][0],
                  source=join(normpath(DATADIR), 'dummy_lib_test.robot'),
                  relativeSource='misc/dummy_lib_test.robot',
                  id='s1-s1',
                  name='Dummy Lib Test',
                  fullName='Misc.Dummy Lib Test',
                  doc='',
                  metadata=[],
                  numberOfTests=1,
                  suites=[],
                  keywords=[])
     test_convert(self.suite['suites'][4]['suites'][1]['suites'][-1],
                  source=join(normpath(DATADIR), 'multiple_suites',
                              '02__sub.suite.1', 'second__.Sui.te.2..robot'),
                  relativeSource='misc/multiple_suites/02__sub.suite.1/second__.Sui.te.2..robot',
                  id='s1-s5-s2-s2',
                  name='.Sui.te.2.',
                  fullName='Misc.Multiple Suites.Sub.Suite.1..Sui.te.2.',
                  doc='',
                  metadata=[],
                  numberOfTests=12,
                  suites=[],
                  keywords=[])
def load_list_from_folder(folder_path, ext_filter=None, depth=1):
  '''
  load a list of files or folders from a system path

  parameter:
    folder_path: root to search 
    ext_filter: a string to represent the extension of files interested
    depth: maximum depth of folder to search, when it's None, all levels of folders will be searched
  '''
  folder_path = osp.normpath(folder_path)
  assert isinstance(depth, int) , 'input depth is not correct {}'.format(depth)
  assert ext_filter is None or (isinstance(ext_filter, list) and all(isinstance(ext_tmp, str) for ext_tmp in ext_filter)) or isinstance(ext_filter, str), 'extension filter is not correct'
  if isinstance(ext_filter, str):    # convert to a list
    ext_filter = [ext_filter]

  fulllist = list()
  wildcard_prefix = '*'
  for index in range(depth):
    if ext_filter is not None:
      for ext_tmp in ext_filter:
        curpath = osp.join(folder_path, wildcard_prefix + '.' + ext_tmp)
        fulllist += glob.glob(curpath)
    else:
      curpath = osp.join(folder_path, wildcard_prefix)
      fulllist += glob.glob(curpath)
    wildcard_prefix = osp.join(wildcard_prefix, '*')

  fulllist = [osp.normpath(path_tmp) for path_tmp in fulllist]
  num_elem = len(fulllist)

  return fulllist, num_elem
示例#21
0
def mandb_changed():
    manpath_file = normpath(join(HOME, ".manpath"))
    manpath = ".local/share/man"
    lines = []
    try:
        with open(manpath_file, "r") as f:
            lines = f.readlines()
    except IOError:
        if not config.UpdateManPath:
            return

    has_path = any([manpath in l for l in lines])

    with open(manpath_file, "w") as f:
        if config.UpdateManPath:
            if not has_path:
                lines.append("MANDATORY_MANPATH\t%s\n" % normpath(join(HOME, manpath)))
        else:
            new_lines = []
            for line in lines:
                if manpath not in line:
                    new_lines.append(line)
            lines = new_lines

        for line in lines:
            f.write(line)
示例#22
0
def construct_db(assets_dir):
  ## Creating a database of text labels from game assets dir given
  ## the database has a following structure:
  ## {"section": { "label" :
  ##   { "files were it used" : [list of fields were it used in file] } } }
  print("Scanning assets at " + assets_dir)
  db = dict()
  db[""] = dict()
  foi = list()
  endings = tuple(files_of_interest.keys())
  for subdir, dirs, files in walk(assets_dir):
    for thefile in files:
      if thefile.endswith(endings):
        foi.append(normpath(join(subdir, thefile)))
  with Pool() as p:
    r = p.imap_unordered(parseFile, foi)
    for chunk in r:
      for sec, val, fname, path in chunk:
        if sec not in db:
          db[sec] = dict()
        if val not in db[sec]:
          db[sec][val] = dict()
        filename = normpath(relpath(abspath(fname), abspath(assets_dir)))
        if filename not in db[sec][val]:
          db[sec][val][filename] = list()
        if path not in db[sec][val][filename]:
          insort_left(db[sec][val][filename], path)
  return db
示例#23
0
    def query_url_mapping(self, filepath):
        """Searches the environment-wide url mapping (based on the
        urls assigned to each directory in the load path). Returns
        the correct url for ``filepath``.

        Subclasses should be sure that they really want to call this
        method, instead of simply falling back to ``super()``.
        """
        # Build a list of dir -> url mappings
        mapping = list(self.env.url_mapping.items())
        try:
            mapping.append((self.env.directory, self.env.url))
        except EnvironmentError:
            # Rarely, directory/url may not be set. That's ok.
            pass

        # Make sure paths are absolute, normalized, and sorted by length
        mapping = list(map(
            lambda p_u: (path.normpath(path.abspath(p_u[0])), p_u[1]),
            mapping))
        mapping.sort(key=lambda i: len(i[0]), reverse=True)

        needle = path.normpath(filepath)
        for candidate, url in mapping:
            if needle.startswith(candidate):
                # Found it!
                rel_path = filepath[len(candidate)+1:]
                return url_prefix_join(url, rel_path)
        raise ValueError('Cannot determine url for %s' % filepath)
示例#24
0
def update(src,dest,old_dest = None, syml = False, logdir = realpath(".")):
    realdest = normpath(join(logdir, dest))
    dest_path = split(realdest)[0]
    realsrc = normpath(join(dest_path,src))
    # The modification time is compared only with the precision of the second
    # to avoid a bug in Python 2.5 + Win32 (Fixed in Python 2.5.1).
    # See:
    #   http://bugs.python.org/issue1671965
    #   http://bugs.python.org/issue1565150
    if (not exists(realdest)) or (int(getmtime(realsrc)) > int(getmtime(realdest))):
        if not isdir(dest_path):
            print "Create dir '%s'"%(dest_path)
            makedirs(dest_path)
        # the destination file is missing or older than the source
        if syml and sys.platform != "win32" :
            if exists(realdest):
                remove(realdest,logdir)
            print "Create Link to '%s' in '%s'"%(src,dest_path)
            os.symlink(src,realdest)
        else:
            print "Copy '%s' -> '%s'"%(src, realdest)
            if exists(realdest):
                # If the destination path exists it is better to remove it before
                # doing the copy (shutil.copystat fails if the destination file
                # is not owned by the current user).
                os.remove(realdest)
            shutil.copy2(realsrc, realdest) # do the copy (cp -p src dest)
示例#25
0
def check_modules(ext_js):
    """
    Checks requirement of config.js and version.js.
    """
    refact_ext_js = []

    for js in ext_js:
        ex_module = False

        if normpath(join(BASE_DIR, "..", "src", "config.js")) == js:
            refact_ext_js.append(normpath(join(BASE_DIR, "..",
                                                   "config_rel.js")))
            continue

        if normpath(join(BASE_DIR, "..", "src", "version.js")) == js:
            refact_ext_js.append(normpath(join(BASE_DIR, "..",
                                                   "version_rel.js")))
            continue

        for ex in EXCLUSION_MODULES:
            if normpath(join(BASE_DIR, "..", ex)) == js:
                ex_module = True
                break

        if not ex_module:
            refact_ext_js.append(js)

    return refact_ext_js
示例#26
0
    def make(self):
        for path in _paths_from_path_patterns(['.'],
                        excludes=[".svn", "*.pyc", "TODO.txt"]):

            if abspath(normpath(path)) == abspath(normpath(__file__)):
                continue
            self._dump_xxxs_from_path(path)
示例#27
0
def test_walk_modules():
    """
    Test _walk_modules, a flattened version of _iter_modules.
    """
    modules = make_test_modules()
    args = MockedCommandLineArgs()

    expected_output = [
        (0, '01_section1',
         0, normpath('test_class/01_section1/01_module1'),
         0, 'lecture1', normpath('test_class/01_section1/01_module1/01_lecture1_title.en.txt'),
         'https://www.coursera.org/api/test-url')]
    collected_output = []

    for module, section, lecture, resource in _walk_modules(
            modules=modules, class_name='test_class',
            path='', ignored_formats=None, args=args):

        collected_output.append(
            (module.index, module.name,
             section.index, section.dir,
             lecture.index, lecture.name, lecture.filename(resource.fmt, resource.title),
             resource.url)
        )

    assert expected_output == collected_output
 def test_import_file_by_path(self):
     import module_library as expected
     module = self._import_module(join(LIBDIR, 'module_library.py'))
     assert_equal(module.__name__, expected.__name__)
     assert_equal(dirname(normpath(module.__file__)),
                   dirname(normpath(expected.__file__)))
     assert_equal(dir(module), dir(expected))
示例#29
0
 def _update_ignore_items(self, CurrentPath, LocalBEConfig, 
                          LocalDirs, LocalFiles):
     for item in LocalBEConfig[IgnoreItems]:
         try:
             regex = re.compile(item)
             IgnoreFiles = [x for x in LocalFiles if regex.search(x)]
             for f in IgnoreFiles:
                 logger.debug('Ignoring File: %s' % f)
                 LocalFiles.remove(f)
                 #
                 # Update the self._files_ignored for statistics
                 _f = normpath(join(CurrentPath, f))
                 self._files_ignored.append(_f)
             IgnoreDirs = [x for x in LocalDirs if regex.search(x)]
             for d in IgnoreDirs:
                 logger.debug('Ignoring Directroy: %s' % d)
                 # Update the self._dirs_ignored for statistics
                 _d = normpath(join(CurrentPath, d))
                 self._add_dir_to_ignore_items(_d)
         except Exception, e:
             msg = '%s does not exist; Please fix \"%s\" ' \
                   'in %s; Reason [%s]' % (f, IgnoreItems, 
                   BEConfigFilename, e)
             logger.error(msg)
             raise UnableToIgnoreItem(msg)
示例#30
0
def updateModFile():
	if not op.isfile(op.normpath(op.join(srcPath, "mod.info"))):
		return

	command = ['git', 'describe', '--long']
	out = subprocess.Popen(command, stdout=subprocess.PIPE)
	(sout, serr) = out.communicate()

	result = re.match(r"v(-?[0-9|\.]+)_(-?[0-9|\.]+)-(-?[0-9|\.]+)", sout)
	dofusVersion = result.group(1)
	version = result.group(2)
	revision = result.group(3)

	command = ['git', 'log', '--tags', '--simplify-by-decoration', '-1', '--pretty=%ai']
	out = subprocess.Popen(command, stdout=subprocess.PIPE)
	(sout, serr) = out.communicate()

	result = re.match(r"(-?[0-9|\.]+)-(-?[0-9|\.]+)-(-?[0-9|\.]+)", sout)
	date = result.group(0)

	with open(op.normpath(op.join(srcPath, "mod.info")), "r") as file:
		data = file.read()
		data = data.replace("${name}", moduleName)
		data = data.replace("${author}", authorName)
		data = data.replace("${dofusVersion}", dofusVersion)
		data = data.replace("${version}", version)
		data = data.replace("${tag}", "v" + dofusVersion + "_" + version)
		data = data.replace("${date}", date)
		data = data.replace("${filename}", moduleName + "_" + dofusVersion + "_" + version)
		data = data.replace("${contributors}", json.dumps(contributorsName))
		with open(op.normpath(op.join(srcPath, "mod.json")), "w") as outFile:
			outFile.write(data)
    def from_qm9_pretrained(root, dataset, target):
        if spk is None:
            raise ImportError(
                '`SchNet.from_qm9_pretrained` requires `schnetpack`.')

        assert target >= 0 and target <= 12

        units = [1] * 12
        units[0] = ase.units.Debye
        units[1] = ase.units.Bohr**3
        units[5] = ase.units.Bohr**2

        root = osp.expanduser(osp.normpath(root))
        makedirs(root)
        folder = 'trained_schnet_models'
        if not osp.exists(osp.join(root, folder)):
            path = download_url(SchNet.url, root)
            extract_zip(path, root)
            os.unlink(path)

        name = f'qm9_{qm9_target_dict[target]}'
        path = osp.join(root, 'trained_schnet_models', name, 'split.npz')

        split = np.load(path)
        train_idx = split['train_idx']
        val_idx = split['val_idx']
        test_idx = split['test_idx']

        # Filter the splits to only contain characterized molecules.
        idx = dataset.data.idx
        assoc = idx.new_empty(idx.max().item() + 1)
        assoc[idx] = torch.arange(idx.size(0))

        train_idx = assoc[train_idx[np.isin(train_idx, idx)]]
        val_idx = assoc[val_idx[np.isin(val_idx, idx)]]
        test_idx = assoc[test_idx[np.isin(test_idx, idx)]]

        path = osp.join(root, 'trained_schnet_models', name, 'best_model')

        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            state = torch.load(path, map_location='cpu')

        net = SchNet(hidden_channels=128, num_filters=128, num_interactions=6,
                     num_gaussians=50, cutoff=10.0,
                     atomref=dataset.atomref(target))

        net.embedding.weight = state.representation.embedding.weight

        for int1, int2 in zip(state.representation.interactions,
                              net.interactions):
            int2.mlp[0].weight = int1.filter_network[0].weight
            int2.mlp[0].bias = int1.filter_network[0].bias
            int2.mlp[2].weight = int1.filter_network[1].weight
            int2.mlp[2].bias = int1.filter_network[1].bias
            int2.lin.weight = int1.dense.weight
            int2.lin.bias = int1.dense.bias

            int2.conv.lin1.weight = int1.cfconv.in2f.weight
            int2.conv.lin2.weight = int1.cfconv.f2out.weight
            int2.conv.lin2.bias = int1.cfconv.f2out.bias

        net.lin1.weight = state.output_modules[0].out_net[1].out_net[0].weight
        net.lin1.bias = state.output_modules[0].out_net[1].out_net[0].bias
        net.lin2.weight = state.output_modules[0].out_net[1].out_net[1].weight
        net.lin2.bias = state.output_modules[0].out_net[1].out_net[1].bias

        mean = state.output_modules[0].atom_pool.average
        net.readout = 'mean' if mean is True else 'add'

        dipole = state.output_modules[0].__class__.__name__ == 'DipoleMoment'
        net.dipole = dipole

        net.mean = state.output_modules[0].standardize.mean.item()
        net.std = state.output_modules[0].standardize.stddev.item()

        if state.output_modules[0].atomref is not None:
            net.atomref.weight = state.output_modules[0].atomref.weight
        else:
            net.atomref = None

        net.scale = 1. / units[target]

        return net, (dataset[train_idx], dataset[val_idx], dataset[test_idx])
示例#32
0
from os import path

local_database = path.normpath(path.join(path.dirname(__file__),"../Database"))+"/"
mainWidget_images = [
    path.normpath(path.join(path.dirname(__file__),"../assets/admin.png")),
    path.normpath(path.join(path.dirname(__file__),"../assets/books.png")),
    path.normpath(path.join(path.dirname(__file__),"../assets/mybooks.png")),
    path.normpath(path.join(path.dirname(__file__),"../assets/setting.png")),
    path.normpath(path.join(path.dirname(__file__),"../assets/plus.png"))
    ]

CSS_LOGIN = path.normpath(path.join(path.dirname(__file__),"../styles/loginpage.css"))
CSS_BOOKPREVIEW = path.normpath(path.join(path.dirname(__file__),"../styles/bookpreview.css"))
CSS_MAINWINDOW = path.normpath(path.join(path.dirname(__file__),"../styles/mainwindow.css"))
示例#33
0
def verify_git_repo(git_exe,
                    git_dir,
                    git_url,
                    git_commits_since_tag,
                    debug=False,
                    expected_rev='HEAD'):
    env = os.environ.copy()
    log = utils.get_logger(__name__)

    if debug:
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stderr = FNULL

    if not expected_rev:
        return False

    OK = True

    env['GIT_DIR'] = git_dir
    try:
        # Verify current commit (minus our locally applied patches) matches expected commit
        current_commit = utils.check_output_env([
            git_exe, "log", "-n1", "--format=%H",
            "HEAD" + "^" * git_commits_since_tag
        ],
                                                env=env,
                                                stderr=stderr)
        current_commit = current_commit.decode('utf-8')
        expected_tag_commit = utils.check_output_env(
            [git_exe, "log", "-n1", "--format=%H", expected_rev],
            env=env,
            stderr=stderr)
        expected_tag_commit = expected_tag_commit.decode('utf-8')

        if current_commit != expected_tag_commit:
            return False

        # Verify correct remote url. Need to find the git cache directory,
        # and check the remote from there.
        cache_details = utils.check_output_env([git_exe, "remote", "-v"],
                                               env=env,
                                               stderr=stderr)
        cache_details = cache_details.decode('utf-8')
        cache_dir = cache_details.split('\n')[0].split()[1]

        if not isinstance(cache_dir, str):
            # On Windows, subprocess env can't handle unicode.
            cache_dir = cache_dir.encode(sys.getfilesystemencoding()
                                         or 'utf-8')

        try:
            remote_details = utils.check_output_env(
                [git_exe, "--git-dir", cache_dir, "remote", "-v"],
                env=env,
                stderr=stderr)
        except subprocess.CalledProcessError:
            if sys.platform == 'win32' and cache_dir.startswith('/'):
                cache_dir = utils.convert_unix_path_to_win(cache_dir)
            remote_details = utils.check_output_env(
                [git_exe, "--git-dir", cache_dir, "remote", "-v"],
                env=env,
                stderr=stderr)
        remote_details = remote_details.decode('utf-8')
        remote_url = remote_details.split('\n')[0].split()[1]

        # on windows, remote URL comes back to us as cygwin or msys format.  Python doesn't
        # know how to normalize it.  Need to convert it to a windows path.
        if sys.platform == 'win32' and remote_url.startswith('/'):
            remote_url = utils.convert_unix_path_to_win(git_url)

        if os.path.exists(remote_url):
            # Local filepaths are allowed, but make sure we normalize them
            remote_url = normpath(remote_url)

        # If the current source directory in conda-bld/work doesn't match the user's
        # metadata git_url or git_rev, then we aren't looking at the right source.
        if not os.path.isdir(
                remote_url) and remote_url.lower() != git_url.lower():
            log.debug("remote does not match git_url")
            log.debug("Remote: " + remote_url.lower())
            log.debug("git_url: " + git_url.lower())
            OK = False
    except subprocess.CalledProcessError as error:
        log.debug(
            "Error obtaining git information in verify_git_repo.  Error was: ")
        log.debug(str(error))
        OK = False
    finally:
        if not debug:
            FNULL.close()
    return OK
示例#34
0
def meta_vars(meta, skip_build_id=False):
    d = {}
    for var_name in ensure_list(meta.get_value('build/script_env', [])):
        if '=' in var_name:
            value = var_name.split('=')[-1]
            var_name = var_name.split('=')[0]
        else:
            value = os.getenv(var_name)
        if value is None:
            warnings.warn(
                "The environment variable '%s' is undefined." % var_name,
                UserWarning)
        else:
            d[var_name] = value
            warnings.warn(
                "The environment variable '%s' is being passed through with value '%s'.  "
                "If you are splitting build and test phases with --no-test, please ensure "
                "that this value is also set similarly at test time." %
                (var_name,
                 "<hidden>" if meta.config.suppress_variables else value),
                UserWarning)

    folder = meta.get_value('source/0/folder', '')
    repo_dir = join(meta.config.work_dir, folder)
    git_dir = join(repo_dir, '.git')
    hg_dir = join(repo_dir, '.hg')

    if not isinstance(git_dir, str):
        # On Windows, subprocess env can't handle unicode.
        git_dir = git_dir.encode(sys.getfilesystemencoding() or 'utf-8')

    git_exe = external.find_executable('git', meta.config.build_prefix)
    if git_exe and os.path.exists(git_dir):
        # We set all 'source' metavars using the FIRST source entry in meta.yaml.
        git_url = meta.get_value('source/0/git_url')

        if os.path.exists(git_url):
            if sys.platform == 'win32':
                git_url = utils.convert_unix_path_to_win(git_url)
            # If git_url is a relative path instead of a url, convert it to an abspath
            git_url = normpath(join(meta.path, git_url))

        _x = False

        if git_url:
            _x = verify_git_repo(git_exe, git_dir, git_url,
                                 meta.config.git_commits_since_tag,
                                 meta.config.debug,
                                 meta.get_value('source/0/git_rev', 'HEAD'))

        if _x or meta.get_value('source/0/path'):
            d.update(get_git_info(git_exe, git_dir, meta.config.debug))

    elif external.find_executable(
            'hg', meta.config.build_prefix) and os.path.exists(hg_dir):
        d.update(get_hg_build_info(hg_dir))

    # use `get_value` to prevent early exit while name is still unresolved during rendering
    d['PKG_NAME'] = meta.get_value('package/name')
    d['PKG_VERSION'] = meta.version()
    d['PKG_BUILDNUM'] = str(meta.build_number() or 0)
    if meta.final and not skip_build_id:
        d['PKG_BUILD_STRING'] = str(meta.build_id())
        d['PKG_HASH'] = meta.hash_dependencies()
    else:
        d['PKG_BUILD_STRING'] = 'placeholder'
        d['PKG_HASH'] = '1234567'
    d['RECIPE_DIR'] = meta.path
    return d
示例#35
0
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True

# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True

# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True

#######################
# locale configuration #
#######################

LOCALE_PATHS = (normpath(join(DJANGO_ROOT, 'locale')), )

#######################
# Media configuration #
#######################

# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = normpath(join(DJANGO_ROOT, 'media'))

# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'

##############################
示例#36
0
 def _trpath(cls, path):
     return op.normpath(op.expanduser(op.expandvars(path)))
示例#37
0
class Q7OptionContext(object):
    Q_VAR_NODE = 'NODE'
    Q_VAR_PARENT = 'PARENT'
    Q_VAR_NAME = 'NAME'
    Q_VAR_VALUE = 'VALUE'
    Q_VAR_CGNSTYPE = 'SIDSTYPE'
    Q_VAR_CHILDREN = 'CHILDREN'
    Q_VAR_TREE = 'TREE'
    Q_VAR_PATH = 'PATH'
    Q_VAR_LINKS = 'LINKS'
    Q_VAR_SKIPS = 'SKIPS'
    Q_VAR_RESULT = 'RESULT'
    Q_VAR_USER = '******'
    Q_VAR_SELECTED = 'SELECTED'
    Q_VAR_RESULT_LIST = '__Q7_QUERY_RESULT__'

    Q_SCRIPT_PRE = """
import CGNS.PAT.cgnskeywords as CGK
import CGNS.PAT.cgnsutils as CGU
import CGNS.PAT.cgnslib as CGL
import numpy
"""

    Q_FILE_PRE = """
import CGNS.PAT.cgnskeywords as CGK
import CGNS.PAT.cgnsutils as CGU
import CGNS.PAT.cgnslib as CGL
import CGNS.NAV.moption as CGO
import numpy
"""

    Q_SCRIPT_POST = """
try:
  %s[0]=%s
except NameError:
  %s[0]=None
""" % (Q_VAR_RESULT_LIST, Q_VAR_RESULT, Q_VAR_RESULT_LIST)

    _depends = {
        'MaxDisplayDataSize': ['DoNotDisplayLargeData'],
        'MaxLoadDataSize': ['DoNotLoadLargeArrays'],
        'MaxRecursionLevel': ['RecursiveTreeDisplay'],
    }
    _HasProPackage = True
    CHLoneTrace = False
    QueryNoException = False
    ActivateMultiThreading = False
    NAVTrace = False
    AutoExpand = False
    RecursiveTreeDisplay = False
    OneViewPerTreeNode = False
    ShowTableIndex = True
    RecursiveSIDSPatternsLoad = True
    DoNotDisplayLargeData = False
    CheckOnTheFly = False
    FollowLinksAtLoad = True
    DoNotFollowLinksAtSave = True
    AddCurrentDirInSearch = True
    AddRootDirInSearch = True
    DoNotLoadLargeArrays = True
    ShowSIDSStatusColumn = True
    ForceSIDSLegacyMapping = False
    ForceFortranFlag = True
    FilterCGNSFiles = True
    FilterHDFFiles = True
    FilterOwnFiles = True
    FileUpdateRemovesChildren = True
    TransposeArrayForView = True
    Show1DAsPlain = True
    ShowSIDSColumn = True
    ShowLinkColumn = True
    ShowSelectColumn = True
    ShowUserColumn = True
    ShowCheckColumn = True
    ShowShapeColumn = True
    ShowDataTypeColumn = True
    UserCSS = ""
    SelectionListDirectory = op.normpath(op.join(conf_path, 'selections'))
    QueriesDirectory = op.normpath(op.join(conf_path, 'queries'))
    FunctionsDirectory = op.normpath(op.join(conf_path, 'functions'))
    SnapShotDirectory = op.normpath(op.join(conf_path, 'snapshots'))
    _HistoryFileName = op.normpath(op.join(conf_path, 'historyfile.py'))
    _OptionsFileName = op.normpath(op.join(conf_path, 'optionsfile.py'))
    _QueriesDefaultFile = 'default.py'
    _FunctionsDefaultFile = 'default.py'
    IgnoredMessages = []
    LinkSearchPathList = []
    ProfileSearchPathList = []
    GrammarSearchPathList = []
    ValKeyList = ['sample']
    CGNSFileExtension = ['.cgns', '.adf']
    HDFFileExtension = ['.hdf', '.hdf5']
    OwnFileExtension = ['.cgh']
    MaxLoadDataSize = 1000
    MaxDisplayDataSize = 1000
    MaxRecursionLevel = 7
    ADFConversionCom = 'cgnsconvert'
    TemporaryDirectory = op.normpath(tmp_path)
    _ConvertADFFiles = True
    _ToolName = 'CGNS.NAV'
    _ToolVersion = '%s' % __vid__

    _CopyrightNotice = u"""
Copyright (c) Marc Poinot <br>
Copyright (c) Onera - The French Aerospace Labs<br><br>
<b>all other copyrights and used versions listed below</b>

<hr>
<h3>Contributors (alphabetic order)</h3>
<table>
<tr><td>Florent Cayré</td><td>-SNECMA, France</td></tr>
<tr><td>Alexandre Fayolle</td><td>-LOGILAB, France</td></tr>
<tr><td>Loic Hauchard</td><td>-ONERA (Student INSA Rouen, France)</td></tr>
<tr><td>Elise Hénaux</td><td>-ONERA (Student FIIFO Orsay, France)</td></tr>
<tr><td>Grégory Laheurte</td><td>-ONERA (DSNA/CS2A), France</td></tr>
<tr><td>Pierre-Jacques Legay</td><td>-BERTIN, France</td></tr>
<tr><td>Bill Perkins</td><td>-Pacific Northwest Ntl Lab, U.S.A.</td></tr>
<tr><td>Jérôme Regis</td><td>-STILOG, France</td></tr>
<tr><td>Benoit Rodriguez</td><td>-ONERA (DAAP/H2T), France</td></tr>
<tr><td>Tristan Soubrié</td><td>-ANDHEO, France</td></tr>
<tr><td>Francois Thirifays</td><td>-CENAERO, Belgique</td></tr>
<tr><td>Simon Verley</td><td>-ONERA (DADS/MSAE), France</td></tr>
<tr><td>Ching-Yang Wang</td><td>-U.S.A.</td></tr>
</table>

<h2>Copyrights & Versions</h2>
<hr>
%(pycgnsversion)s<br>
All <b>pyCGNS</b> 
rights reserved in accordance with GPL v2 <br><br>
<b>NO WARRANTY :</b><br>
Check GPL v2 sections 15 and 16
about <font color=red>loss of data or corrupted data</font><br>
<hr>
%(pyqtversion)s<br>
PyQt Copyright (c) Riverbank Computing Limited. <br>
Usage within the terms of the GPL v2.<br>
All Rights Reserved.<br>
<hr>
%(qtversion)s<br>
Qt Copyright (c)<br>
The Qt4 Library is (c) 2011 Nokia Corporation and/or its subsidiary(-ies),
and is licensed under the GNU Lesser General Public License version 2.1
with Nokia Qt LGPL exception version 1.1. <br>
<hr>
%(pythonversion)s<br>
Python Copyright (c)<br>
Copyright (c) 2001-2011 Python Software Foundation.<br>
All Rights Reserved.<br>
<br>
Copyright (c) 2000 BeOpen.com.<br>
All Rights Reserved.<br>
<br>
Copyright (c) 1995-2001 Corporation for National Research Initiatives.<br>
All Rights Reserved.<br>
<br>
Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.<br>
All Rights Reserved.<br>
<br>
<hr>
%(numpyversion)s<br>
Numpy Copyright (c)<br>
Copyright (c) 2005, NumPy Developers<br>
<hr>
%(hdf5version)s<br>
HDF5 Copyright (c)<br>
HDF5 (Hierarchical Data Format 5) Software Library and Utilities<br>
Copyright 2006-2013 by The HDF Group.<br>
NCSA HDF5 (Hierarchical Data Format 5) Software Library and Utilities<br>
Copyright 1998-2006 by the Board of Trustees of the University of Illinois.<br>
<hr>
%(vtkversion)s<br>
VTK Copyright (c)<br>
Copyright (c) 1993-2008 Ken Martin, Will Schroeder, Bill Lorensen <br>
All rights reserved.<br>
<hr>
%(cythonversion)s<br>
Cython Copyright (c)<br>
(c) Copyright 2012, Stefan Behnel, Robert Bradshaw, Dag Sverre Seljebotn,
Greg Ewing, William Stein, Gabriel Gellner, et al.
<hr>
Icons Copyright (c)<br>
All these nice icons are provided or are modified versions of original
icons provided by Mark James (Birmingham, UK).<br>
Please visit his web site: http://www.famfamfam.com/<br>

"""
    #__fv = QFont(QFontDatabase().families()[0]) # Spyder-like but needs a QApplication
    __fv = QFont('Courier new')
    __fc = QFont('Courier new')

    _Label_Font = QFont(__fv)
    _Button_Font = QFont(__fv)
    _Table_Font = QFont(__fc)
    _Edit_Font = QFont(__fc)
    _RName_Font = QFont(__fc)
    _NName_Font = QFont(__fc)
    _NName_Font.setBold(True)

    _Default_Fonts = {
        'Label_Family': _Label_Font.family(),
        'Label_Size': _Label_Font.pointSize(),
        'Label_Bold': False,
        'Label_Italic': False,
        'Table_Family': _Table_Font.family(),
        'Table_Size': _Table_Font.pointSize(),
        'Table_Bold': False,
        'Table_Italic': False,
        'Edit_Family': _Edit_Font.family(),
        'Edit_Size': _Edit_Font.pointSize(),
        'Edit_Bold': False,
        'Edit_Italic': False,
        'Button_Family': _Button_Font.family(),
        'Button_Size': _Button_Font.pointSize(),
        'Button_Bold': False,
        'Button_Italic': False,
        'RName_Family': _RName_Font.family(),
        'RName_Size': _RName_Font.pointSize(),
        'RName_Bold': False,
        'RName_Italic': False,
        'NName_Family': _NName_Font.family(),
        'NName_Size': _NName_Font.pointSize(),
        'NName_Bold': False,
        'NName_Italic': True,
    }

    UserColors = [
        'gray',
        'red',
        'green',
        'blue',
        'orange',
        None,
        None,
        None,
        None,
        None,
    ]

    _ColorList = {
        'cold_grey': (0.5000, 0.5400, 0.5300),
        'dim_grey': (0.4118, 0.4118, 0.4118),
        'grey': (0.7529, 0.7529, 0.7529),
        'light_grey': (0.8275, 0.8275, 0.8275),
        'slate_grey': (0.4392, 0.5020, 0.5647),
        'slate_grey_dark': (0.1843, 0.3098, 0.3098),
        'slate_grey_light': (0.4667, 0.5333, 0.6000),
        'warm_grey': (0.5000, 0.5000, 0.4100),
        'black': (0.0000, 0.0000, 0.0000),
        'ivory_black': (0.1600, 0.1400, 0.1300),
        'lamp_black': (0.1800, 0.2800, 0.2300),
        'alizarin_crimson': (0.8900, 0.1500, 0.2100),
        'brick': (0.6100, 0.4000, 0.1200),
        'coral': (1.0000, 0.4980, 0.3137),
        'coral_light': (0.9412, 0.5020, 0.5020),
        'deep_pink': (1.0000, 0.0784, 0.5765),
        'firebrick': (0.6980, 0.1333, 0.1333),
        'geranium_lake': (0.8900, 0.0700, 0.1900),
        'hot_pink': (1.0000, 0.4118, 0.7059),
        'light_salmon': (1.0000, 0.6275, 0.4784),
        'madder_lake_deep': (0.8900, 0.1800, 0.1900),
        'maroon': (0.6902, 0.1882, 0.3765),
        'pink': (1.0000, 0.7529, 0.7961),
        'pink_light': (1.0000, 0.7137, 0.7569),
        'raspberry': (0.5300, 0.1500, 0.3400),
        'rose_madder': (0.8900, 0.2100, 0.2200),
        'salmon': (0.9804, 0.5020, 0.4471),
        'tomato': (1.0000, 0.3882, 0.2784),
        'beige': (0.6400, 0.5800, 0.5000),
        'brown': (0.5000, 0.1647, 0.1647),
        'brown_madder': (0.8600, 0.1600, 0.1600),
        'brown_ochre': (0.5300, 0.2600, 0.1200),
        'burlywood': (0.8706, 0.7216, 0.5294),
        'burnt_sienna': (0.5400, 0.2100, 0.0600),
        'burnt_umber': (0.5400, 0.2000, 0.1400),
        'chocolate': (0.8235, 0.4118, 0.1176),
        'deep_ochre': (0.4500, 0.2400, 0.1000),
        'flesh': (1.0000, 0.4900, 0.2500),
        'flesh_ochre': (1.0000, 0.3400, 0.1300),
        'gold_ochre': (0.7800, 0.4700, 0.1500),
        'greenish_umber': (1.0000, 0.2400, 0.0500),
        'khaki': (0.9412, 0.9020, 0.5490),
        'khaki_dark': (0.7412, 0.7176, 0.4196),
        'light_beige': (0.9608, 0.9608, 0.8627),
        'peru': (0.8039, 0.5216, 0.2471),
        'rosy_brown': (0.7373, 0.5608, 0.5608),
        'raw_sienna': (0.7800, 0.3800, 0.0800),
        'raw_umber': (0.4500, 0.2900, 0.0700),
        'sepia': (0.3700, 0.1500, 0.0700),
        'sienna': (0.6275, 0.3216, 0.1765),
        'saddle_brown': (0.5451, 0.2706, 0.0745),
        'sandy_brown': (0.9569, 0.6431, 0.3765),
        'tan': (0.8235, 0.7059, 0.5490),
        'van_dyke_brown': (0.3700, 0.1500, 0.0200),
        'cadmium_orange': (1.0000, 0.3800, 0.0100),
        'carrot': (0.9300, 0.5700, 0.1300),
        'dark_orange': (1.0000, 0.5490, 0.0000),
        'mars_orange': (0.5900, 0.2700, 0.0800),
        'mars_yellow': (0.8900, 0.4400, 0.1000),
        'orange': (1.0000, 0.5000, 0.0000),
        'orange_red': (1.0000, 0.2706, 0.0000),
        'yellow_ochre': (0.8900, 0.5100, 0.0900),
        'aureoline_yellow': (1.0000, 0.6600, 0.1400),
        'banana': (0.8900, 0.8100, 0.3400),
        'cadmium_lemon': (1.0000, 0.8900, 0.0100),
        'cadmium_yellow': (1.0000, 0.6000, 0.0700),
        'cadmium_yellow_light': (1.0000, 0.6900, 0.0600),
        'gold': (1.0000, 0.8431, 0.0000),
        'goldenrod': (0.8549, 0.6471, 0.1255),
        'goldenrod_dark': (0.7216, 0.5255, 0.0431),
        'goldenrod_light': (0.9804, 0.9804, 0.8235),
        'goldenrod_pale': (0.9333, 0.9098, 0.6667),
        'light_goldenrod': (0.9333, 0.8667, 0.5098),
        'melon': (0.8900, 0.6600, 0.4100),
        'naples_yellow_deep': (1.0000, 0.6600, 0.0700),
        'yellow': (1.0000, 1.0000, 0.0000),
        'yellow_light': (1.0000, 1.0000, 0.8784),
        'chartreuse': (0.4980, 1.0000, 0.0000),
        'chrome_oxide_green': (0.4000, 0.5000, 0.0800),
        'cinnabar_green': (0.3800, 0.7000, 0.1600),
        'cobalt_green': (0.2400, 0.5700, 0.2500),
        'emerald_green': (0.0000, 0.7900, 0.3400),
        'forest_green': (0.1333, 0.5451, 0.1333),
        'green': (0.0000, 1.0000, 0.0000),
        'green_dark': (0.0000, 0.3922, 0.0000),
        'green_pale': (0.5961, 0.9843, 0.5961),
        'green_yellow': (0.6784, 1.0000, 0.1843),
        'lawn_green': (0.4863, 0.9882, 0.0000),
        'lime_green': (0.1961, 0.8039, 0.1961),
        'mint': (0.7400, 0.9900, 0.7900),
        'olive': (0.2300, 0.3700, 0.1700),
        'olive_drab': (0.4196, 0.5569, 0.1373),
        'olive_green_dark': (0.3333, 0.4196, 0.1843),
        'permanent_green': (0.0400, 0.7900, 0.1700),
        'sap_green': (0.1900, 0.5000, 0.0800),
        'sea_green': (0.1804, 0.5451, 0.3412),
        'sea_green_dark': (0.5608, 0.7373, 0.5608),
        'sea_green_medium': (0.2353, 0.7020, 0.4431),
        'sea_green_light': (0.1255, 0.6980, 0.6667),
        'spring_green': (0.0000, 1.0000, 0.4980),
        'spring_green_medium': (0.0000, 0.9804, 0.6039),
        'terre_verte': (0.2200, 0.3700, 0.0600),
        'viridian_light': (0.4300, 1.0000, 0.4400),
        'yellow_green': (0.6039, 0.8039, 0.1961),
        'aquamarine': (0.4980, 1.0000, 0.8314),
        'aquamarine_medium': (0.4000, 0.8039, 0.6667),
        'cyan': (0.0000, 1.0000, 1.0000),
        'cyan_white': (0.8784, 1.0000, 1.0000),
        'turquoise': (0.2510, 0.8784, 0.8157),
        'turquoise_dark': (0.0000, 0.8078, 0.8196),
        'turquoise_medium': (0.2824, 0.8196, 0.8000),
        'turquoise_pale': (0.6863, 0.9333, 0.9333),
        'alice_blue': (0.9412, 0.9725, 1.0000),
        'blue': (0.0000, 0.0000, 1.0000),
        'blue_light': (0.6784, 0.8471, 0.9020),
        'blue_medium': (0.0000, 0.0000, 0.8039),
        'cadet': (0.3725, 0.6196, 0.6275),
        'cobalt': (0.2400, 0.3500, 0.6700),
        'cornflower': (0.3922, 0.5843, 0.9294),
        'cerulean': (0.0200, 0.7200, 0.8000),
        'dodger_blue': (0.1176, 0.5647, 1.0000),
        'indigo': (0.0300, 0.1800, 0.3300),
        'manganese_blue': (0.0100, 0.6600, 0.6200),
        'midnight_blue': (0.0980, 0.0980, 0.4392),
        'navy': (0.0000, 0.0000, 0.5020),
        'peacock': (0.2000, 0.6300, 0.7900),
        'powder_blue': (0.6902, 0.8784, 0.9020),
        'royal_blue': (0.2549, 0.4118, 0.8824),
        'slate_blue': (0.4157, 0.3529, 0.8039),
        'slate_blue_dark': (0.2824, 0.2392, 0.5451),
        'slate_blue_light': (0.5176, 0.4392, 1.0000),
        'slate_blue_medium': (0.4824, 0.4078, 0.9333),
        'sky_blue': (0.5294, 0.8078, 0.9216),
        'sky_blue_deep': (0.0000, 0.7490, 1.0000),
        'sky_blue_light': (0.5294, 0.8078, 0.9804),
        'steel_blue': (0.2745, 0.5098, 0.7059),
        'steel_blue_light': (0.6902, 0.7686, 0.8706),
        'turquoise_blue': (0.0000, 0.7800, 0.5500),
        'ultramarine': (0.0700, 0.0400, 0.5600),
        'blue_violet': (0.5412, 0.1686, 0.8863),
        'cobalt_violet_deep': (0.5700, 0.1300, 0.6200),
        'magenta': (1.0000, 0.0000, 1.0000),
        'orchid': (0.8549, 0.4392, 0.8392),
        'orchid_dark': (0.6000, 0.1961, 0.8000),
        'orchid_medium': (0.7294, 0.3333, 0.8275),
        'permanent_red_violet': (0.8600, 0.1500, 0.2700),
        'plum': (0.8667, 0.6275, 0.8667),
        'purple': (0.6275, 0.1255, 0.9412),
        'purple_medium': (0.5765, 0.4392, 0.8588),
        'ultramarine_violet': (0.3600, 0.1400, 0.4300),
        'violet': (0.5600, 0.3700, 0.6000),
        'violet_dark': (0.5804, 0.0000, 0.8275),
        'violet_red': (0.8157, 0.1255, 0.5647),
        'violet_red_medium': (0.7804, 0.0824, 0.5216),
        'violet_red_pale': (0.8588, 0.4392, 0.5765),
    }

    _ReservedNames = CGK.cgnsnames
    _ReservedTypes = CGK.cgnstypes
    _SortedTypeList = CGK.sortedtypelist

    _UsualQueries = [

        # ---------------------------------------------------------------------------
        # INDENTATION IS SIGNIFICANT
        # ---------------------------------------------------------------------------

        # --- Search -----------------------------------------------------------
        # last two booleans: Update tree, has args
        ('001. Node name', 'Search by', 'RESULT=(NAME==ARGS[0])', """
Search by
Node name

Search all nodes with the exact NAME as argument.

The argument name need not to be a tuple or to have quotes,
all the following values are ok and would match the NAME <i>ZoneType</i>:

ZoneType
'ZoneType'
('ZoneType',)
""", False, True),
        ('002. Wildcard node name', 'Search by', """import fnmatch
RESULT=fnmatch.fnmatchcase(NAME,ARGS[0])
""", """
Search by
Wildcard node name

Search all nodes with the wildcard NAME as argument.

Warning: the <b>argument name</b> should be quoted:

'BC*' is ok

BC* would fail
""", False, True),
        ('003. Node type', 'Search by', 'RESULT=(SIDSTYPE==ARGS[0])',
         """search all nodes with argument SIDS type.""", False, True),
        ('005. Node value', 'Search by', """
from numpy import *
target=eval(ARGS[0])
if   (VALUE is None and target is None): RESULT=True
elif (VALUE is None)            : RESULT=False
elif (target.dtype!=VALUE.dtype): RESULT=False
elif (target.size!=VALUE.size):   RESULT=False
elif (target.shape!=VALUE.shape): RESULT=False
elif (target.tolist()==VALUE.tolist()): RESULT=True
else:                             RESULT=False
""", """search all nodes with argument value. The compare is performed
using a straightforward '==' and then relies on the Python/Numpy comparison
operator.""", False, True),
        ('010. Node with truncated data', 'Search by',
         'if (PATH in SKIPS): RESULT=PATH',
         """search all nodes with truncated or unread data, for example if you have set
the maximum data argument for the load, or if you release the memory of a
node.""", False, False),
        ('004. Wildcard node type', 'Search by', """
import fnmatch
RESULT=fnmatch.fnmatchcase(SIDSTYPE,ARGS[0])
""", """
Search by
Wildcard node type

Search all nodes with wildcard argument SIDS type.
Warning: the <b>argument type</b> should be quoted:

'Turb*' is ok

Turb* would fail
""", False, True),
        ('011. Non-MT UserDefinedData', 'Search by', """
if (    (SIDSTYPE==CGK.UserDefinedData_ts)
    and (CGU.getValueDataType(NODE)!=CGK.MT)):
  RESULT=True
""", """
Search by
Valued UserDefinedData

Search all <b>UserDefinedData_t</b> nodes with a non-<b>MT</b> data type.

No argument.
""", False, False),
        # -----------------------------------------------------------------------------
        ('012. FamilyName', 'Search by', """
if (SIDSTYPE in [CGK.FamilyName_ts, CGK.AdditionalFamilyName_ts]):
    RESULT=True
""", """
Search by
All <b>FamilyName_t</b> and <b>AdditionalFamilyname_t</b> nodes.
""", False, False),
        # -----------------------------------------------------------------------------
        ('013. FamilyName reference', 'Search by', """
if ((SIDSTYPE in [CGK.FamilyName_ts, CGK.AdditionalFamilyName_ts]) and
    (VALUE.tostring().decode('ascii')==ARGS[0])):
    RESULT=True
""", """
Search by<br>
Reference to a FamilyName<br>
 
Search all <b>FamilyName</b> nodes with the arg string (plain).<br>
The string arg should be a valid Python string such as:<br>
 
'BLADE(1)'<br>
'Surface ext 1A'<br>
'Left/Wing/Flap'<br>
 
""", False, True),
        # -----------------------------------------------------------------------------
        ('014. Zones', 'Search by', """
if (SIDSTYPE in [CGK.Zone_ts]):
    RESULT=True
""", """
Search by
All <b>Zone_t</b> nodes.
""", False, False),
        # -----------------------------------------------------------------------------
        ('015. Zones Structured', 'Search by', """
if (SIDSTYPE in [CGK.Zone_ts]):
    t=CGU.hasChildName(NODE,CGK.ZoneType_s)
    if (t is None or CGU.stringValueMatches(t,CGK.Structured_s)):
      RESULT=True
""", """
Search by
All <b>Zone_t</b> with Structured <b>ZoneType</b> nodes.
""", False, False),
        # -----------------------------------------------------------------------------
        ('016. Zones Unstructured', 'Search by', """
if (SIDSTYPE in [CGK.Zone_ts]):
    t=CGU.hasChildName(NODE,CGK.ZoneType_s)
    if (t is not None and CGU.stringValueMatches(t,CGK.Unstructured_s)):
      RESULT=True
""", """
Search by
All <b>Zone_t</b> with Unstructured <b>ZoneType</b> nodes.
""", False, False),
        # -----------------------------------------------------------------------------
        ('017. BCs', 'Search by', """
if (SIDSTYPE in [CGK.BC_ts]):
    RESULT=True
""", """
Search by
All <b>BC_t</b> nodes.
""", False, False),

        # --- Replace
        # -----------------------------------------------------------------------------
        ('050. Valued UserDefinedData', 'Replace', """
if (     (SIDSTYPE==CGK.UserDefinedData_ts)
     and (CGU.getValueDataType(NODE)!=CGK.MT)):
  NODE[3]=CGK.DataArray_ts
  RESULT=True
""", """
Replace
Valued UserDefinedData
 
Search all <b>UserDefinedData_t</b> nodes with a non-<b>MT</b> data type
and replace them as <b>DataArray_t</b>.""", False, False),
        ('051. Substitute Zone name', 'Replace', """
l1=len(ARGS[0])
if ((SIDSTYPE==CGK.Zone_ts) and (NAME[:l1]==ARGS[0])):
  NODE[0]=ARGS[1]+NODE[0][l1:]
  RESULT=True

if (CGU.getValueDataType(NODE)==CGK.C1):
  v=VALUE.tostring().decode('ascii')
  if (v[:l1]==ARGS[0]):
    v=ARGS[1]+v[l1:]
    NODE[1]=CGU.setStringAsArray(v)
    RESULT=True
""", """
<h1>Replace</h1>
<h2>Substitute Zone name</h2>
<p>
Search all <b>Zone_t</b> nodes with a name pattern, then rename the
zone with the substitution pattern. Any other reference in the tree,
as a connectivity value for example, is subsitued as well.
<p>
Argument is a tuple with the first pattern to find and
the second as the subsitution pattern. For example:
<pre>
('domain.','zone#')
</pre>
""", True, True),

        # -----------------------------------------------------------------------------
        ('052. FamilySpecified BC type rewrite', 'Replace', """
if ((SIDSTYPE in [CGK.FamilyName_ts, CGK.AdditionalFamilyName_ts]) and
    (VALUE.tostring().decode('ascii')==ARGS[0]) and (PARENT[3]==CGK.BC_ts)):
    PARENT[1]=CGU.setStringAsArray(CGK.FamilySpecified_s)
    RESULT=True
""", """
<h1>Replace</h1>
<h2>FamilySpecified BC type rewrite</h2>
<p>
Search all <b>FamilyName BC</b> nodes with the arg string (plain).<br>
The string arg should be a valid Python string such as:<br>

'BLADE(1)'<br>
'Surface ext 1A'<br>
'Left/Wing/Flap'<br>

<p>
Once found, the parent <b>BC_t</b> value is forced to <b>FamilySpecified</b>

""", True, True),

        # --- Find Elements_t
        ('020. Elements', 'Find Elements_t', """if (SIDSTYPE==CGK.Elements_ts):
  RESULT=True
""", """Find all <b>Elements_t</b> nodes """, False, False),
        ('021. Elements QUAD', 'Find Elements_t',
         """if (SIDSTYPE==CGK.Elements_ts):
  RESULT=VALUE[0] in (CGK.QUAD_4, CGK.QUAD_8, CGK.QUAD_9)
""", """Find all <b>Elements_t</b> nodes of type <b>QUAD</b>""", False, False),
        ('022. Elements TRI', 'Find Elements_t',
         """if (SIDSTYPE==CGK.Elements_ts):
  RESULT=VALUE[0] in (CGK.TRI_3, CGK.TRI_6)
""", """Find all <b>Elements_t</b> nodes of type <b>TRI</b>""", False, False),
        ('023. Elements NGON', 'Find Elements_t',
         """if (SIDSTYPE==CGK.Elements_ts):
  RESULT=VALUE[0] in (CGK.NGON_n,)
""", """Find all <b>Elements_t</b> nodes of type <b>NGON_n</b>""", False,
         False),
        ('024. Elements HEXA', 'Find Elements_t',
         """if (SIDSTYPE==CGK.Elements_ts):
  RESULT=VALUE[0] in (CGK.HEXA_8, CGK.HEXA_20, CGK.HEXA_27)
""", """Find all <b>Elements_t</b> nodes of type <b>HEXA</b>""", False, False),
        ('025. Elements TETRA', 'Find Elements_t',
         """if (SIDSTYPE==CGK.Elements_ts):
  RESULT=VALUE[0] in (CGK.TETRA_4, CGK.TETRA_10)
""", """Find all <b>Elements_t</b> nodes of type <b>TETRA</b>""", False,
         False),

        # --- External Tools
        ('030. Create Cartesian Zone', 'External Tools', """
if (SIDSTYPE==CGK.CGNSTree_ts):
    import Generator.PyTree as G
    z=G.cart((0.,0.,0.), (0.1,0.1,0.2), (10,11,12))
    b=None
    base='BASE'
    if (len(ARGS)>0):
      base=ARGS[0]
      b=CGU.hasChildName(NODE,base)
    if (b is None):
      base=CGU.checkUniqueChildName(NODE,base)
      b=CGL.newCGNSBase(NODE,base,3,3)
    CGU.addChild(b,z)
""", """Example of Cartesian zone creation using Cassiopee.
The first argument is the base name, if ommitted a name is generated.""", True,
         True),
        ('031. Bounding boxes', 'External Tools', """
if (SIDSTYPE==CGK.Zone_ts):
    import Generator as G
    RESULT=G.bbox(NODE)
""", """Example of Bounding box computation using Cassiopee"""),
        ('100. .Solver#Compute children', 'Edit filters', """
if (PARENT[0]=='.Solver#Compute'):
    RESULT=PATH
""", """Selects all children nodes of the .Solver#Compute elsA userdefined node""",
         False, False),
        ('101. ReferenceState children', 'Edit filters', """
if (PARENT[0]=='ReferenceState'):
    RESULT=PATH
""", """Selects all children nodes of the ReferenceState node""", False,
         False),
        ('102. .Solver#Param children', 'Edit filters', """
if (PARENT[0]=='.Solver#Param'):
   RESULT=PATH
""", """Selects all children nodes of the .Solver#Param elsA userdefined node""",
         False, False),
    ]

    # -----------------------------------------------------------------
    @classmethod
    def _setOption(cls, name, value):
        setattr(cls, name, value)

    @classmethod
    def _writeFile(cls, tag, name, udata, filename, prefix=""):
        gdate = strftime("%Y-%m-%d %H:%M:%S", gmtime())
        s = u"""# %s - %s - Generated %s\n# coding: utf-8\n%s\nimport PyQt5\n%s=""" % \
            (cls._ToolName, tag, gdate, prefix, name)
        if isinstance(udata, dict):
            s += u"""{\n"""
            for k in udata:
                #          print 'K',k,'V',udata[k],'T',type(udata[k])
                if k[0] != '_':
                    val = '%s' % str(udata[k])
                    if isinstance(udata[k], str):
                        val = u'u"""%s"""' % repr(
                            udata[k]).lstrip("u'").lstrip("'").rstrip(
                                "'")  # Error
                    elif isinstance(udata[k], bytes):
                        val = u'u"""%s"""' % repr(udata[k].decode(
                            'utf-8')).lstrip("u'").lstrip("'").rstrip("'")
                    if not isinstance(k, str):
                        uk = u"u'%s'" % repr(k.decode('utf-8')).lstrip(
                            "u'").lstrip("'").rstrip("'")
                    else:
                        uk = u"u'%s'" % repr(k).lstrip("u'").lstrip(
                            "'").rstrip("'")
                    s += """%s:%s,\n""" % (uk, val)
            s += u"""}\n\n# --- last line\n"""
        elif isinstance(udata, list):
            s += u"""[\n"""
            for k in udata:
                s += u"""%s,\n""" % (k)
            s += u"""]\n\n# --- last line\n"""
        cls._crpath(filename)
        with open(filename, 'w+') as f:
            f.write(s)

    @classmethod
    def _readFile(cls, name, filename):
        dpath = tempfile.mkdtemp()
        if not op.exists(filename):
            return None
        try:
            copyOneFile(filename, '%s/%s.py' % (dpath, name))
        except IOError:
            removeSubDirAndFiles(dpath)
            return None
        sprev = sys.path
        sys.path = [dpath] + sys.path
        try:
            fp, pathname, description = imp.find_module(name)
        except IndexError:  # ImportError:
            return None
        try:
            mod = imp.load_module(name, fp, pathname, description)
        finally:
            if fp:
                fp.close()
        removeSubDirAndFiles(dpath)
        sys.path = sprev
        return mod

    @classmethod
    def _crpath(cls, path):
        p = op.dirname(path)
        if op.exists(p):
            return True
        os.makedirs(p)

    @classmethod
    def _trpath(cls, path):
        return op.normpath(op.expanduser(op.expandvars(path)))

    @classmethod
    def _writeHistory(cls, control):
        filename = cls._trpath(cls._HistoryFileName)
        cls._writeFile('History', 'history', control._history, filename)

    @classmethod
    def _readHistory(cls, control):
        filename = cls._trpath(cls._HistoryFileName)
        m = cls._readFile('history', filename)
        if m is None:
            return None
        try:
            return m.history
        except:
            return None

    @classmethod
    def _writeOptions(cls, control):
        filename = cls._trpath(cls._OptionsFileName)
        cls._writeFile('User options', 'options', control._options, filename)

    @classmethod
    def _readOptions(cls, control):
        filename = cls._trpath(cls._OptionsFileName)
        m = cls._readFile('options', filename)
        if m is None:
            return None
        try:
            return m.options
        except:
            return None

    @classmethod
    def _writeQueries(cls, control, q):
        filename = cls._trpath(
            op.join(cls.QueriesDirectory, cls._QueriesDefaultFile))
        cls._writeFile('User queries', 'queries', q, filename, cls.Q_FILE_PRE)

    @classmethod
    def _readQueries(cls, control):
        filename = cls._trpath(
            op.join(cls.QueriesDirectory, cls._QueriesDefaultFile))
        m = cls._readFile('queries', filename)
        if m is None:
            return None
        try:
            return m.queries
        except:
            return None

    @classmethod
    def _writeFunctions(cls, control, f):
        filename = cls._trpath(
            op.join(cls.FunctionsDirectory, cls._FunctionsDefaultFile))
        cls._writeFile('User functions', 'functions', f, filename,
                       cls.Q_FILE_PRE)

    @classmethod
    def _readFunctions(cls, control):
        filename = cls._trpath(
            op.join(cls.FunctionsDirectory, cls._FunctionsDefaultFile))
        m = cls._readFile('functions', filename)
        if m is None:
            return None
        try:
            return m.Q7UserFunction
        except:
            return None

    def __init__(self):
        pass

    def __getitem__(self, name):
        if name[0] != '_':
            return Q7OptionContext.__dict__[name]
        return None

    def __setitem__(self, name, value):
        if name[0] != '_':
            setattr(Q7OptionContext, name, value)
        return None

    def __iter__(self):
        for o in dir(self):
            if o[0] != '_':
                yield o

    def _nextName(self):
        for o in dir(self):
            if o[0] != '_':
                yield o
示例#38
0
        def run(self):
            #=============
            # CHECK INPUT:
            #=============
            # Check prosite.dat user has unzip prosite.dat.zip
            if not os.path.exists("prosite.dat"):
                warning_massage = "Please, unzip prosite.dat.zip. \nLeave prosite.dat file in the same folder where main.py can be found"
                tkinter.messagebox.showwarning("Warning!", warning_massage)
                return

            # Check gbks, query, id and cov
            warning_dict = {
                'gbk': [
                    "Please, choose at least one gbk file.\n\n",
                    "The following file(s) is/are not in gbk format:\n"
                ],
                'query': [
                    "Please, choose the fasta file with your query/queries.\n\n",
                    "The file with your query is not in fasta format.\n\n"
                ],
                'cov':
                'Please, choose a coverage cut-off.\nRemember to save it once you choose it.\n\n',
                'id':
                'Please, choose an indentity cut-off.\nRemember to save it once you choose it.\n\n'
            }
            warning_massage = ""
            # Check gbk(s)
            try:
                if self.gbk_filename == tuple():
                    raise Exception
                isgbk_dict = inputcheck.is_gbk(self.gbk_filename)
                wrong_files = ""
                for key, value in isgbk_dict.items():
                    if not value:
                        wrong_files = wrong_files + "\t· " + basename(
                            normpath(key)) + "\n"
                if wrong_files != "":
                    warning_massage = warning_massage + warning_dict.get(
                        "gbk")[1] + wrong_files + "\n"
            except:
                warning_massage = warning_massage + warning_dict.get("gbk")[0]
            # Check query
            try:
                isfasta = inputcheck.is_fasta(self.query_filename)
                if not isfasta:
                    warning_massage = warning_massage + warning_dict.get(
                        "query")[1]
            except:
                warning_massage = warning_massage + warning_dict.get(
                    "query")[0]
            # Check coverage
            try:
                existe = self.cov_var_get
            except:
                warning_massage = warning_massage + warning_dict.get("cov")
            # Check identity
            try:
                existe = self.id_var_get
            except:
                warning_massage = warning_massage + warning_dict.get("id")
            if warning_massage != "":
                tkinter.messagebox.showwarning(
                    "Warning!", warning_massage +
                    "If you need more information.\nPlease, click on help button."
                )
                return
            else:
                tkinter.messagebox.showinfo(
                    "Let´s go!",
                    "Your inputs have been validated.\nPress OK to start the process.\nIt may take a while, PLEASE BE PATIENT.\n\nRemember:\n-Save and close figures to allow the porgram continue.\n-The more files and queries you use, the longer it will take."
                )

        #==========================
        # RUN IF INPUT IS CORRECT
        #==========================
            try:
                gbk_multi_fasta = gbkparser.parse_gbk(
                    self.gbk_filename)  # Pass from gbk format to fasta
                cont = 1  # Loop to go through queries
                for seq_record in SeqIO.parse(self.query_filename, "fasta"):
                    query = "query.fasta"  # This file will contain one query at a time (the next in each iteration)
                    output_handle = open(query, "w")
                    if str(seq_record.id) != "":
                        output_handle.write(
                            ">" + str(seq_record.id) +
                            "\n")  # print identifier of the hit
                    else:
                        output_handle.write(
                            ">Query\n")  # print >Query in case header is empty
                    output_handle.write(str(seq_record.seq) +
                                        "\n")  # print sequence of the hit
                    output_handle.close()
                    hits_sseqid_list, dict_plot_blast = blaster.blast(
                        self.cov_var_get, self.id_var_get)  # Make blast
                    if type(hits_sseqid_list) is str:
                        tkinter.messagebox.showerror(
                            "Error",
                            "Opps! Something went wrong.\n" + hits_sseqid_list
                        )  # Show blast error to user in case the is one
                        os.remove(
                            "query.fasta"
                        )  # We remove files created so that they cannot gum up the future work of the user
                        os.remove("gbk_multi.fasta")
                        return
                    plotter.make_blast(dict_plot_blast)  # Plot Blast
                    hits_fasta = blaster.hits_to_fasta(
                        "query.fasta", gbk_multi_fasta, hits_sseqid_list
                    )  # Pass results of blast to fasta in order to do the tree and look for domains
                    dict_to_plot, max_seq_len, pattern_list = prosite.domain_parser(
                    )  # Search domains
                    muscle.muscle(hits_fasta)  # Make tree
                    if os.path.exists("tree.nwx"):
                        plotter.make_tree(self.gbk_list)  # Plot tree
                    else:  # Muscle only generates tree file if there are at least 3 sequences, in case they are less we tell the user why we cannot show the tree.
                        tkinter.messagebox.showwarning(
                            "Warning",
                            "Zero hits detected.\nIt is not possible to make a tree of one sequence.\nPress OK to see the domains of the sequences."
                        )
                    plotter.make_domains(dict_to_plot, max_seq_len,
                                         pattern_list)  # Plot Domains

                    # Organize data and results files
                    if cont == 1:  #For query1 its a little bit diferent. We create results and data folders
                        time_var = organizer.results_folder(cont, 0)
                    else:
                        organizer.results_folder(cont, time_var)
                    cont = cont + 1
                os.remove(
                    "query.fasta"
                )  # We remove that provisional file. That now contains the last query.
                organizer.data_folder(self.query_list, self.gbk_list, time_var)
            except:  #Presumably this should not happend
                tkinter.messagebox.showerror(
                    "Error",
                    "Opps! Something went wrong.\nRevise your files and make sure you have installed blast and muscle.\nWe do not know exactly why the error ocurred.\nSo please, delete any intermediate file created in the package folder so that they cannot gum up your future work."
                )
示例#39
0
def run_command(cmd,
                dataset=None,
                inputs=None,
                outputs=None,
                expand=None,
                assume_ready=None,
                explicit=False,
                message=None,
                sidecar=None,
                dry_run=False,
                jobs=None,
                extra_info=None,
                rerun_info=None,
                extra_inputs=None,
                rerun_outputs=None,
                inject=False,
                parametric_record=False,
                remove_outputs=False,
                skip_dirtycheck=False,
                yield_expanded=None):
    """Run `cmd` in `dataset` and record the results.

    `Run.__call__` is a simple wrapper over this function. Aside from backward
    compatibility kludges, the only difference is that `Run.__call__` doesn't
    expose all the parameters of this function. The unexposed parameters are
    listed below.

    Parameters
    ----------
    extra_info : dict, optional
        Additional information to dump with the json run record. Any value
        given here will take precedence over the standard run key. Warning: To
        avoid collisions with future keys added by `run`, callers should try to
        use fairly specific key names and are encouraged to nest fields under a
        top-level "namespace" key (e.g., the project or extension name).
    rerun_info : dict, optional
        Record from a previous run. This is used internally by `rerun`.
    extra_inputs : list, optional
        Inputs to use in addition to those specified by `inputs`. Unlike
        `inputs`, these will not be injected into the {inputs} format field.
    rerun_outputs : list, optional
        Outputs, in addition to those in `outputs`, determined automatically
        from a previous run. This is used internally by `rerun`.
    inject : bool, optional
        Record results as if a command was run, skipping input and output
        preparation and command execution. In this mode, the caller is
        responsible for ensuring that the state of the working tree is
        appropriate for recording the command's results.
    parametric_record : bool, optional
        If enabled, substitution placeholders in the input/output specification
        are retained verbatim in the run record. This enables using a single
        run record for multiple different re-runs via individual
        parametrization.
    remove_outputs : bool, optional
        If enabled, all declared outputs will be removed prior command
        execution, except for paths that are also declared inputs.
    skip_dirtycheck : bool, optional
        If enabled, a check for dataset modifications is unconditionally
        disabled, even if other parameters would indicate otherwise. This
        can be used by callers that already performed analog verififcations
        to avoid duplicate processing.
    yield_expanded : {'inputs', 'outputs', 'both'}, optional
        Include a 'expanded_%s' item into the run result with the exanded list
        of paths matching the inputs and/or outputs specification,
        respectively.


    Yields
    ------
    Result records for the run.
    """
    if not cmd:
        lgr.warning("No command given")
        return

    specs = {
        k: ensure_list(v)
        for k, v in (('inputs', inputs), ('extra_inputs', extra_inputs),
                     ('outputs', outputs))
    }

    rel_pwd = rerun_info.get('pwd') if rerun_info else None
    if rel_pwd and dataset:
        # recording is relative to the dataset
        pwd = op.normpath(op.join(dataset.path, rel_pwd))
        rel_pwd = op.relpath(pwd, dataset.path)
    else:
        pwd, rel_pwd = get_command_pwds(dataset)

    ds = require_dataset(dataset,
                         check_installed=True,
                         purpose='track command outcomes')
    ds_path = ds.path

    lgr.debug('tracking command output underneath %s', ds)

    # skip for callers that already take care of this
    if not (skip_dirtycheck or rerun_info or inject):
        # For explicit=True, we probably want to check whether any inputs have
        # modifications. However, we can't just do is_dirty(..., path=inputs)
        # because we need to consider subdatasets and untracked files.
        # MIH: is_dirty() is gone, but status() can do all of the above!
        if not explicit and ds.repo.dirty:
            yield get_status_dict(
                'run',
                ds=ds,
                status='impossible',
                message=(
                    'clean dataset required to detect changes from command; '
                    'use `datalad status` to inspect unsaved changes'))
            return

    # everything below expects the string-form of the command
    cmd = normalize_command(cmd)
    # pull substitutions from config
    cmd_fmt_kwargs = _get_substitutions(ds)
    # amend with unexpanded dependency/output specifications, which might
    # themselves contain substitution placeholder
    for n, val in specs.items():
        if val:
            cmd_fmt_kwargs[n] = val

    # apply the substitution to the IO specs
    expanded_specs = {
        k: _format_iospecs(v, **cmd_fmt_kwargs)
        for k, v in specs.items()
    }
    # try-expect to catch expansion issues in _format_iospecs() which
    # expands placeholders in dependency/output specification before
    # globbing
    try:
        globbed = {
            k: GlobbedPaths(
                v,
                pwd=pwd,
                expand=expand in (
                    # extra_inputs follow same expansion rules as `inputs`.
                    ["both"] +
                    (['outputs'] if k == 'outputs' else ['inputs'])))
            for k, v in expanded_specs.items()
        }
    except KeyError as exc:
        yield get_status_dict(
            'run',
            ds=ds,
            status='impossible',
            message=('input/output specification has an unrecognized '
                     'placeholder: %s', exc))
        return

    if not (inject or dry_run):
        yield from _prep_worktree(ds_path,
                                  pwd,
                                  globbed,
                                  assume_ready=assume_ready,
                                  remove_outputs=remove_outputs,
                                  rerun_outputs=rerun_outputs,
                                  jobs=None)
    else:
        # If an inject=True caller wants to override the exit code, they can do
        # so in extra_info.
        cmd_exitcode = 0
        exc = None

    # prepare command formatting by extending the set of configurable
    # substitutions with the essential components
    cmd_fmt_kwargs.update(
        pwd=pwd,
        dspath=ds_path,
        # Check if the command contains "{tmpdir}" to avoid creating an
        # unnecessary temporary directory in most but not all cases.
        tmpdir=mkdtemp(prefix="datalad-run-") if "{tmpdir}" in cmd else "",
        # the following override any matching non-glob substitution
        # values
        inputs=globbed['inputs'],
        outputs=globbed['outputs'],
    )
    try:
        cmd_expanded = format_command(ds, cmd, **cmd_fmt_kwargs)
    except KeyError as exc:
        yield get_status_dict(
            'run',
            ds=ds,
            status='impossible',
            message=('command has an unrecognized placeholder: %s', exc))
        return

    # amend commit message with `run` info:
    # - pwd if inside the dataset
    # - the command itself
    # - exit code of the command
    run_info = {
        'cmd': cmd,
        # rerun does not handle any prop being None, hence all
        # the `or/else []`
        'chain': rerun_info["chain"] if rerun_info else [],
    }
    # for all following we need to make sure that the raw
    # specifications, incl. any placeholders make it into
    # the run-record to enable "parametric" re-runs
    # ...except when expansion was requested
    for k, v in specs.items():
        run_info[k] = globbed[k].paths \
            if expand in ["both"] + (
                ['outputs'] if k == 'outputs' else ['inputs']) \
            else (v if parametric_record
                  else expanded_specs[k]) or []

    if rel_pwd is not None:
        # only when inside the dataset to not leak information
        run_info['pwd'] = rel_pwd
    if ds.id:
        run_info["dsid"] = ds.id
    if extra_info:
        run_info.update(extra_info)

    if dry_run:
        yield get_status_dict(
            "run [dry-run]",
            ds=ds,
            status="ok",
            message="Dry run",
            run_info=run_info,
            dry_run_info=dict(
                cmd_expanded=cmd_expanded,
                pwd_full=pwd,
                **{k: globbed[k].expand()
                   for k in ('inputs', 'outputs')},
            ))
        return

    if not inject:
        cmd_exitcode, exc = _execute_command(cmd_expanded, pwd)
        run_info['exit'] = cmd_exitcode

    # Re-glob to capture any new outputs.
    #
    # TODO: If a warning or error is desired when an --output pattern doesn't
    # have a match, this would be the spot to do it.
    if explicit or expand in ["outputs", "both"]:
        # also for explicit mode we have to re-glob to be able to save all
        # matching outputs
        globbed['outputs'].expand(refresh=True)
        if expand in ["outputs", "both"]:
            run_info["outputs"] = globbed['outputs'].paths

    # create the run record, either as a string, or written to a file
    # depending on the config/request
    record, record_path = _create_record(run_info, sidecar, ds)

    # abbreviate version of the command for illustrative purposes
    cmd_shorty = _format_cmd_shorty(cmd_expanded)

    # compose commit message
    msg = u"""\
[DATALAD RUNCMD] {}

=== Do not change lines below ===
{}
^^^ Do not change lines above ^^^
"""
    msg = msg.format(message if message is not None else cmd_shorty,
                     '"{}"'.format(record) if record_path else record)

    outputs_to_save = globbed['outputs'].expand_strict() if explicit else None
    if outputs_to_save is not None and record_path:
        outputs_to_save.append(record_path)
    do_save = outputs_to_save is None or outputs_to_save
    msg_path = None
    if not rerun_info and cmd_exitcode:
        if do_save:
            repo = ds.repo
            # must record path to be relative to ds.path to meet
            # result record semantics (think symlink resolution, etc)
            msg_path = ds.pathobj / \
                repo.dot_git.relative_to(repo.pathobj) / "COMMIT_EDITMSG"
            msg_path.write_text(msg)

    expected_exit = rerun_info.get("exit", 0) if rerun_info else None
    if cmd_exitcode and expected_exit != cmd_exitcode:
        status = "error"
    else:
        status = "ok"

    run_result = get_status_dict(
        "run",
        ds=ds,
        status=status,
        # use the abbrev. command as the message to give immediate clarity what
        # completed/errors in the generic result rendering
        message=cmd_shorty,
        run_info=run_info,
        # use the same key that `get_status_dict()` would/will use
        # to record the exit code in case of an exception
        exit_code=cmd_exitcode,
        exception=exc,
        # Provide msg_path and explicit outputs so that, under
        # on_failure='stop', callers can react to a failure and then call
        # save().
        msg_path=str(msg_path) if msg_path else None,
    )
    if record_path:
        # we the record is in a sidecar file, report its ID
        run_result['record_id'] = record
    for s in ('inputs', 'outputs'):
        # this enables callers to further inspect the outputs without
        # performing globbing again. Together with remove_outputs=True
        # these would be guaranteed to be the outcome of the executed
        # command. in contrast to `outputs_to_save` this does not
        # include aux file, such as the run record sidecar file.
        # calling .expand_strict() again is largely reporting cached
        # information
        # (format: relative paths)
        if yield_expanded in (s, 'both'):
            run_result[f'expanded_{s}'] = globbed[s].expand_strict()
    yield run_result

    if do_save:
        with chpwd(pwd):
            for r in Save.__call__(
                    dataset=ds_path,
                    path=outputs_to_save,
                    recursive=True,
                    message=msg,
                    jobs=jobs,
                    return_type='generator',
                    # we want this command and its parameterization to be in full
                    # control about the rendering of results, hence we must turn
                    # off internal rendering
                    result_renderer='disabled',
                    on_failure='ignore'):
                yield r
示例#40
0
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True

# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True

# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True


"""
MEDIA CONFIGURATION
"""

# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'build', 'media'))

# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'

"""
STATIC FILE CONFIGURATION
"""

# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'build', 'static'))

# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'

# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
示例#41
0
    def __call__(urls,
                 dataset=None,
                 path=None,
                 overwrite=False,
                 archive=False,
                 save=True,
                 message=None):
        from ..downloaders.providers import Providers

        pwd, rel_pwd = get_dataset_pwds(dataset)

        ds = None
        if save or dataset:
            try:
                ds = require_dataset(dataset,
                                     check_installed=True,
                                     purpose='downloading urls')
            except NoDatasetArgumentFound:
                pass

        common_report = {"action": "download_url", "ds": ds}

        urls = assure_list_from_str(urls)

        if len(urls) > 1 and path and not op.isdir(path):
            yield get_status_dict(
                status="error",
                message=(
                    "When specifying multiple urls, --path should point to "
                    "an existing directory. Got %r", path),
                type="file",
                path=path,
                **common_report)
            return

        if dataset:  # A dataset was explicitly given.
            path = op.normpath(op.join(ds.path, path or op.curdir))
        elif save and ds:
            path = op.normpath(op.join(ds.path, rel_pwd, path or op.curdir))
        elif not path:
            path = op.curdir

        # TODO setup fancy ui.progressbars doing this in parallel and reporting overall progress
        # in % of urls which were already downloaded
        providers = Providers.from_config_files()
        downloaded_paths = []
        path_urls = {}
        for url in urls:
            # somewhat "ugly"
            # providers.get_provider(url).get_downloader(url).download(url, path=path)
            # for now -- via sugaring
            try:
                downloaded_path = providers.download(url,
                                                     path=path,
                                                     overwrite=overwrite)
            except Exception as e:
                yield get_status_dict(status="error",
                                      message=exc_str(e),
                                      type="file",
                                      path=path,
                                      **common_report)
            else:
                downloaded_paths.append(downloaded_path)
                path_urls[downloaded_path] = url
                yield get_status_dict(status="ok",
                                      type="file",
                                      path=downloaded_path,
                                      **common_report)

        if downloaded_paths and save and ds is not None:
            msg = message or """\
[DATALAD] Download URLs

URLs:
  {}""".format("\n  ".join(urls))

            for r in ds.add(downloaded_paths, message=msg):
                yield r

            if isinstance(ds.repo, AnnexRepo):
                annex_paths = [
                    p for p, annexed in zip(
                        downloaded_paths,
                        ds.repo.is_under_annex(downloaded_paths)) if annexed
                ]
                if annex_paths:
                    for path in annex_paths:
                        try:
                            # The file is already present. This is just to
                            # register the URL.
                            ds.repo.add_url_to_file(path,
                                                    path_urls[path],
                                                    batch=True)
                        except AnnexBatchCommandError as exc:
                            lgr.warning("Registering %s with %s failed: %s",
                                        path, path_urls[path], exc_str(exc))

                    if archive:
                        from datalad.api import add_archive_content
                        for path in annex_paths:
                            add_archive_content(path,
                                                annex=ds.repo,
                                                delete=True)
 def __init__(self):
     self.mapdir = path.normpath(path.join(path.dirname(__file__), '../maps'))
     self.served_maps = {}
     self.serve_map_service = rospy.Service('~serve_map', RequestMap, self.serve_map)
     self.end_serve_map_service = rospy.Service('~end_serve_map', RequestMap, self.end_serve_map)
     self.save_map_service = rospy.Service('~save_map', RequestMap, self.save_map)
示例#43
0
#!/usr/bin/env python
#usage: ./gdb-lasercake [lasercake arguments...]
#or: ./gdb-lasercake path/to/lasercake/binary [lasercake arguments...]
#
#If the first argument exists as a file, it's treated as the Lasercake
#binary; otherwise "Lasercake" (in this script's directory) is used.

import sys, subprocess
from pipes import quote
from os.path import dirname, join, exists, normpath
scriptdir = normpath(dirname(join('.', __file__)))
for f in ['Lasercake.exe', 'Lasercake']:
	g = join(scriptdir, f)
	if exists(g):
		lasercake = g
args = sys.argv[1:]
if len(args) > 0 and exists(args[0]):
	lasercake = args[0]
	args = args[1:]
gdb_arg_file = join(scriptdir, 'build/gdbarg')
with open(gdb_arg_file, 'w') as f:
	f.write('set args %s\nrun\n' % ' '.join(map(quote, args)))
gdb_args = [lasercake, '-x', gdb_arg_file]
try: FileNotFoundError
except NameError: FileNotFoundError = OSError
try:
	exit(subprocess.call(['gdb'] + gdb_args))
except FileNotFoundError:
	exit(subprocess.call(['ggdb'] + gdb_args))

示例#44
0
def main():
    parser = ArgumentParser(description="Creates a new ProPresenter playlist.")
    parser.add_argument(
        "title",
        type=str,
        help="The title/path of the playlist. Use '\\n' to specify sub-folders."
    )
    parser.add_argument("items",
                        type=str,
                        nargs='+',
                        help="Paths or names of items to add to the playlist.")
    parser.add_argument(
        "--library",
        type=str,
        help="The name or path to the library where items should be found.")
    parser.add_argument(
        "--parent",
        type=str,
        nargs='*',
        help="The tree path to where the playlist should be created.")
    parser.add_argument("--create-folders",
                        action='store_true',
                        help="If parent playlist folders should be created.")
    parser.add_argument("--overwrite",
                        action='store_true',
                        help="If the target playlist should be overwritten.")
    parser.add_argument(
        "--document",
        type=str,
        help="The name or path of the playlist document to add the new list to."
    )
    args = parser.parse_args()

    library = None
    document = None

    # Set the document library source. Default is the active system library.
    if args.library:
        if not pro6_install:
            print(
                "ERROR: A ProPresenter 6 installation could not be found on this system."
            )
            exit(1)

        title = {k.lower(): k
                 for k in pro6_install.libraries.keys()
                 }.get(args.library.lower())
        if not title:
            print("ERROR: The library '%s' could not be found." % args.library)
            exit(1)

        library = DocumentLibrary(pro6_install.libraries.get(title), title)
        if not args.document:
            document = PlaylistDocument.load(
                path.join(pro6_install.playlist_path, title + ".pro6pl"))
    elif pro6_install:
        library = DocumentLibrary.active
        if not args.document:
            document = PlaylistDocument.active

    if library:
        print("Using library:", library.path)

    # Set the destination document file for the playlist. This defaults to the document associated with the library.
    #   If no library is specified or an install is not found, this parameter is required.
    if args.document:
        document = PlaylistDocument.load(
            path.normpath(path.expanduser(path.expandvars(args.document))))
    elif not document:
        print(
            "ERROR: A ProPresenter 6 installation could not be found on this system and no playlist document "
            "was specified. Use --document='path/to/document.pro6pl' to target one."
        )
        exit(1)

    parent = document.root
    if args.parent:
        # Navigate to where the new playlist should be made.
        for item in args.parent:
            result = parent.find(item)
            if result:
                parent = result
            elif args.create_folders:
                node = PlaylistNode(item, NODE_FOLDER)
                parent.children.append(node)
                print("Created playlist node:", item)
                parent = node
            else:
                print(
                    "ERROR: The specified parent playlist could not be found. "
                    "Set the --create-folders option to create it.")
                exit(1)

    # Check if the playlist already exists.
    playlist = parent.find(args.title)
    if playlist:
        if args.overwrite:
            parent.children.remove(playlist)
            print("Removed existing playlist:", playlist.name)
        else:
            print(
                "ERROR: A playlist with the name '%s' already exists. Use --overwrite to replace it."
                % playlist.name)
            exit(1)

    # Create the new playlist and add it.
    playlist = PlaylistNode(args.title)
    parent.children.append(playlist)

    # Add items to the playlist
    for item in args.items:
        item_path = path.normpath(path.expanduser(path.expandvars(item)))
        if not path.isfile(item_path) and path.basename(
                path.splitext(item_path)[0]) == item_path:
            # It's not a file so check the library.
            if library:
                print("Searching library for '%s' ..." % item_path)
                results = library.search(item_path)
                if len(results) > 0:
                    for meta in results:
                        playlist.add_path(meta.path)
                        print("Added library document:", meta.name)
                else:
                    print("ERROR: No results found for '%s' in the library." %
                          item)
                    exit(1)
            else:
                print(
                    "ERROR: No library is available to search for documents.")
                exit(1)
        else:
            playlist.add_path(item_path)
            print("Added file:", item)

    document.write()
    print("Playlist saved to document:", path.basename(document.path))
示例#45
0
        'python-coveralls>=2.5.0',
    ],

    # Test harness:
    cmdclass = {
        'test': TestCommand,
    },

    # Metadata for PyPI:
    author = 'Randall Degges',
    author_email = '*****@*****.**',
    license = 'UNLICENSE',
    url = 'https://github.com/rdegges/python-ipify',
    keywords = 'python api client ipify ip address public ipv4 ipv6 service',
    description = 'The official client library for ipify: A Simple IP Address API.',
    long_description = open(normpath(join(dirname(abspath(__file__)), 'README.rst'))).read(),
    classifiers = [
        'Development Status :: 5 - Production/Stable',
        'Environment :: Console',
        'Intended Audience :: Developers',
        'License :: Public Domain',
        'Operating System :: OS Independent',
        'Programming Language :: Python',
        'Programming Language :: Python :: 2',
        'Programming Language :: Python :: 2.7',
        'Programming Language :: Python :: 3',
        'Programming Language :: Python :: 3.2',
        'Programming Language :: Python :: 3.3',
        'Programming Language :: Python :: 3.4',
        'Programming Language :: Python :: 3.5',
        'Programming Language :: Python :: 3.6',
 def __init__(self, opts):
     super().__init__()
     self.options = opts
     self.list_of_contents = []
     self.tempdir = (
         "./integrations_data"
     )
     self.data_dir = "{0}{1}{2}".format(
         abspath(normpath(options.source)),
         sep,
         "data" + sep,
     )
     self.content_dir = "{0}{1}{2}".format(
         abspath(normpath(options.source)),
         sep,
         "content" + sep + "en" + sep,
     )
     self.data_integrations_dir = (
         join(self.data_dir, "integrations") + sep
     )
     self.data_service_checks_dir = (
         join(self.data_dir, "service_checks") + sep
     )
     self.content_integrations_dir = (
         join(self.content_dir, "integrations") + sep
     )
     self.extract_dir = "{0}".format(
         join(self.tempdir, "extracted") + sep
     )
     self.integration_datafile = "{0}{1}{2}".format(
         abspath(normpath(self.options.source)),
         sep,
         "integrations.json",
     )
     self.regex_h1 = re.compile(
         r"^#{1}(?!#)(.*)", re.MULTILINE
     )
     self.regex_h1_replace = re.compile(
         r"^(#{1})(?!#)(.*)", re.MULTILINE
     )
     self.regex_metrics = re.compile(
         r"(#{3} Metrics\n)([\s\S]*this integration.|[\s\S]*this check.)([\s\S]*)(#{3} Events\n)",
         re.DOTALL,
     )
     self.regex_service_check = re.compile(
         r"(#{3} Service Checks\n)([\s\S]*does not include any service checks at this time.)([\s\S]*)(#{2} Troubleshooting\n)",
         re.DOTALL,
     )
     self.regex_fm = re.compile(
         r"(?:-{3})(.*?)(?:-{3})(.*)", re.DOTALL
     )
     self.regex_source = re.compile(
         r"(\S*FROM_DISPLAY_NAME\s*=\s*\{)(.*?)\}",
         re.DOTALL,
     )
     self.datafile_json = []
     self.pool_size = 5
     self.integration_mutations = OrderedDict(
         {
             "hdfs": {
                 "action": "create",
                 "target": "hdfs",
                 "remove_header": False,
                 "fm": {
                     "is_public": True,
                     "kind": "integration",
                     "integration_title": "Hdfs",
                     "short_description": "Track cluster disk usage, volume failures, dead DataNodes, and more.",
                 },
             },
             "mesos": {
                 "action": "create",
                 "target": "mesos",
                 "remove_header": False,
                 "fm": {
                     "aliases": [
                         "/integrations/mesos_master/",
                         "/integrations/mesos_slave/",
                     ],
                     "is_public": True,
                     "kind": "integration",
                     "integration_title": "Mesos",
                     "short_description": "Track cluster resource usage, master and slave counts, tasks statuses, and more.",
                 },
             },
             "activemq_xml": {
                 "action": "merge",
                 "target": "activemq",
                 "remove_header": False,
             },
             "cassandra_nodetool": {
                 "action": "merge",
                 "target": "cassandra",
                 "remove_header": False,
             },
             "gitlab_runner": {
                 "action": "merge",
                 "target": "gitlab",
                 "remove_header": False,
             },
             "hdfs_datanode": {
                 "action": "merge",
                 "target": "hdfs",
                 "remove_header": False,
             },
             "hdfs_namenode": {
                 "action": "merge",
                 "target": "hdfs",
                 "remove_header": False,
             },
             "mesos_master": {
                 "action": "merge",
                 "target": "mesos",
                 "remove_header": True,
             },
             "mesos_slave": {
                 "action": "merge",
                 "target": "mesos",
                 "remove_header": False,
             },
             "kafka_consumer": {
                 "action": "merge",
                 "target": "kafka",
                 "remove_header": False,
             },
             "kube_dns": {
                 "action": "discard",
                 "target": "none",
                 "remove_header": False,
             },
             "kube_proxy": {
                 "action": "discard",
                 "target": "none",
                 "remove_header": False,
             },
             "kubernetes_state": {
                 "action": "discard",
                 "target": "none",
                 "remove_header": False,
             },
             "system_core": {
                 "action": "discard",
                 "target": "system",
                 "remove_header": False,
             },
             "system_swap": {
                 "action": "discard",
                 "target": "system",
                 "remove_header": False,
             },
             "hbase_regionserver": {
                 "action": "merge",
                 "target": "hbase_master",
                 "remove_header": False,
             },
         }
     )
     self.initial_integration_files = glob.glob(
         "{}*.md".format(self.content_integrations_dir)
     )
     makedirs(self.data_integrations_dir, exist_ok=True)
     makedirs(
         self.data_service_checks_dir, exist_ok=True
     )
     makedirs(
         self.content_integrations_dir, exist_ok=True
     )
SITE_ID = 1

# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = False

# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = False

# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION


########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))

# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'

# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
    normpath(join(SITE_ROOT, 'static')),
)

# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
    'django.contrib.staticfiles.finders.FileSystemFinder',
    'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
示例#48
0
def absify(path, dir):
    """Convert a path to an absolute path."""
    return normpath(join(dir, path))
示例#49
0
def run_command(cmd,
                dataset=None,
                inputs=None,
                outputs=None,
                expand=None,
                explicit=False,
                message=None,
                sidecar=None,
                rerun_info=None,
                rerun_outputs=None):
    rel_pwd = rerun_info.get('pwd') if rerun_info else None
    if rel_pwd and dataset:
        # recording is relative to the dataset
        pwd = normpath(opj(dataset.path, rel_pwd))
        rel_pwd = relpath(pwd, dataset.path)
    else:
        pwd, rel_pwd = get_command_pwds(dataset)

    ds = require_dataset(dataset,
                         check_installed=True,
                         purpose='tracking outcomes of a command')

    # not needed ATM
    #refds_path = ds.path

    # delayed imports
    from datalad.cmd import Runner

    lgr.debug('tracking command output underneath %s', ds)

    if not rerun_info:  # Rerun already takes care of this.
        # For explicit=True, we probably want to check whether any inputs have
        # modifications. However, we can't just do is_dirty(..., path=inputs)
        # because we need to consider subdatasets and untracked files.
        if not explicit and ds.repo.dirty:
            yield get_status_dict('run',
                                  ds=ds,
                                  status='impossible',
                                  message=('unsaved modifications present, '
                                           'cannot detect changes by command'))
            return

    cmd = normalize_command(cmd)

    inputs = GlobbedPaths(inputs, pwd=pwd, expand=expand in ["inputs", "both"])
    if inputs:
        for res in ds.get(inputs.expand(full=True), on_failure="ignore"):
            if res.get("state") == "absent":
                lgr.warning("Input does not exist: %s", res["path"])
            else:
                yield res

    outputs = GlobbedPaths(outputs,
                           pwd=pwd,
                           expand=expand in ["outputs", "both"])
    if outputs:
        for res in _unlock_or_remove(ds, outputs.expand(full=True)):
            yield res

    if rerun_outputs is not None:
        # These are files we need to unlock/remove for a rerun that aren't
        # included in the explicit outputs. Unlike inputs/outputs, these are
        # full paths, so we can pass them directly to unlock.
        for res in _unlock_or_remove(ds, rerun_outputs):
            yield res

    sub_namespace = {
        k.replace("datalad.run.substitutions.", ""): v
        for k, v in ds.config.items("datalad.run.substitutions")
    }
    try:
        cmd_expanded = format_command(cmd,
                                      pwd=pwd,
                                      dspath=ds.path,
                                      inputs=inputs,
                                      outputs=outputs,
                                      **sub_namespace)
    except KeyError as exc:
        yield get_status_dict(
            'run',
            ds=ds,
            status='impossible',
            message=('command has an unrecognized placeholder: %s', exc))
        return

    # we have a clean dataset, let's run things
    exc = None
    cmd_exitcode = None
    runner = Runner(cwd=pwd)
    try:
        lgr.info("== Command start (output follows) =====")
        runner.run(
            cmd_expanded,
            # immediate output
            log_online=True,
            # not yet sure what we should do with the command output
            # IMHO `run` itself should be very silent and let the command talk
            log_stdout=False,
            log_stderr=False,
            expect_stderr=True,
            expect_fail=True,
            # TODO stdin
        )
    except CommandError as e:
        # strip our own info from the exception. The original command output
        # went to stdout/err -- we just have to exitcode in the same way
        exc = e
        cmd_exitcode = e.code

        if rerun_info and rerun_info.get("exit", 0) != cmd_exitcode:
            # we failed in a different way during a rerun.  This can easily
            # happen if we try to alter a locked file
            #
            # TODO add the ability to `git reset --hard` the dataset tree on failure
            # we know that we started clean, so we could easily go back, needs gh-1424
            # to be able to do it recursively
            raise exc

    lgr.info("== Command exit (modification check follows) =====")

    # amend commit message with `run` info:
    # - pwd if inside the dataset
    # - the command itself
    # - exit code of the command
    run_info = {
        'cmd': cmd,
        'exit': cmd_exitcode if cmd_exitcode is not None else 0,
        'chain': rerun_info["chain"] if rerun_info else [],
        'inputs': inputs.paths,
        'outputs': outputs.paths,
    }
    if rel_pwd is not None:
        # only when inside the dataset to not leak information
        run_info['pwd'] = rel_pwd
    if ds.id:
        run_info["dsid"] = ds.id

    record = json.dumps(run_info, indent=1, sort_keys=True, ensure_ascii=False)

    use_sidecar = sidecar or (sidecar is None and ds.config.get(
        'datalad.run.record-sidecar', default=False))

    if use_sidecar:
        # record ID is hash of record itself
        from hashlib import md5
        record_id = md5(record.encode('utf-8')).hexdigest()
        record_dir = ds.config.get('datalad.run.record-directory',
                                   default=op.join('.datalad', 'runinfo'))
        record_path = op.join(ds.path, record_dir, record_id)
        if not op.lexists(record_path):
            # go for compression, even for minimal records not much difference, despite offset cost
            # wrap in list -- there is just one record
            dump2stream([run_info], record_path, compressed=True)

    # compose commit message
    msg = u"""\
[DATALAD RUNCMD] {}

=== Do not change lines below ===
{}
^^^ Do not change lines above ^^^
"""
    msg = msg.format(
        message if message is not None else _format_cmd_shorty(cmd),
        '"{}"'.format(record_id) if use_sidecar else record)
    msg = assure_bytes(msg)

    if not rerun_info and cmd_exitcode:
        msg_path = opj(relpath(ds.repo.repo.git_dir), "COMMIT_EDITMSG")
        with open(msg_path, "wb") as ofh:
            ofh.write(msg)
        lgr.info(
            "The command had a non-zero exit code. "
            "If this is expected, you can save the changes with "
            "'datalad save -r -F%s .'", msg_path)
        raise exc
    else:
        outputs_to_save = outputs.expand(full=True) if explicit else '.'
        if outputs_to_save:
            for r in ds.add(outputs_to_save, recursive=True, message=msg):
                yield r
示例#50
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--root',
                        required=True,
                        help="Root path of Turbulenz Engine")
    parser.add_argument('--assets-path',
                        required=True,
                        action='append',
                        help="Path to root of source assets")
    parser.add_argument('--build-path',
                        default=path_join('_build', 'assets'),
                        help="Path for intermediate build files")
    parser.add_argument('--install-path',
                        default='staticmax',
                        help="Path to install output assets into")
    parser.add_argument('--verbose', action='store_true')
    parser.add_argument(
        '--imagemagick-convert',
        help="Path to ImageMagick convert executable (enables TGA support)")
    try:
        default_num_threads = multiprocessing.cpu_count()
    except NotImplementedError:
        default_num_threads = 1
    parser.add_argument('-j',
                        '--num-threads',
                        help="Specify how many threads to use for building",
                        default=default_num_threads,
                        type=int)

    args = parser.parse_args(argv[1:])

    assets_paths = [normpath(p) for p in args.assets_path]
    base_build_path = normpath(args.build_path)
    create_dir(base_build_path)
    create_dir(args.install_path)

    tools = Tools(args, base_build_path)

    with open('deps.yaml', 'r') as f:
        asset_build_info = load_yaml(f.read())
        if asset_build_info:
            asset_build_info = [
                AssetInfo(asset_info) for asset_info in asset_build_info
            ]
        else:
            asset_build_info = []

    try:
        with open(path_join(base_build_path, 'sourcehashes.json'), 'r') as f:
            source_list = SourceList(load_json(f.read()), assets_paths)
    except IOError:
        if args.verbose:
            print 'No source hash file'
        source_list = SourceList({}, assets_paths)

    # Ensure all sources are in the source list so that the threads aren't writing to the list
    for a in asset_build_info:
        source_list.get_source(a.path)

    class AssetBuildThread(Thread):
        def __init__(self, asset_list, asset_list_mutex):
            Thread.__init__(self)
            self.asset_list = asset_list
            self.mutex = asset_list_mutex
            self.assets_rebuilt = 0
            self.exit = False
            self.error = None

        def run(self):
            while True:
                if self.exit:
                    return 0
                self.mutex.acquire(True)
                try:
                    # Try and pull the head off the list and if all it's dependencies are already built then
                    # build it. This could iterate down the remaining list in case the head isn't buildable but
                    # things later in the list are
                    asset_info = self.asset_list[0]
                    deps = [
                        source_list.get_source(path)
                        for path in asset_info.deps if path != asset_info.path
                    ]
                    if any([not d.built for d in deps]):
                        self.mutex.release()
                        sleep(0.01)
                        continue
                    self.asset_list.pop(0)
                    self.mutex.release()
                except IndexError:
                    self.mutex.release()
                    return 0
                try:
                    rebuild = build_asset(asset_info, source_list, tools,
                                          base_build_path, args.verbose)
                except CalledProcessError as e:
                    self.error = '%s - Tool failed - %s' % (asset_info.path,
                                                            str(e))
                    return 1
                except IOError as e:
                    self.error = str(e)
                    return 1

                if rebuild:
                    self.assets_rebuilt += 1

    num_threads = args.num_threads

    # Sort assets by dependencies
    assets_to_build = []
    while len(assets_to_build) != len(asset_build_info):
        num_assets_sorted = len(assets_to_build)
        for asset in asset_build_info:
            if asset in assets_to_build:
                continue
            for dep in asset.deps:
                if dep != asset.path and dep not in [
                        a.path for a in assets_to_build
                ]:
                    break
            else:
                assets_to_build.append(asset)
        if num_assets_sorted == len(assets_to_build):
            assets_left = [
                a for a in asset_build_info if a not in assets_to_build
            ]
            error('Detected cyclic dependencies between assets within - \n%s' %
                  '\n'.join([a.path for a in assets_left]))
            return 1

    # Create and start threads to build the assets in the sorted dependency list
    asset_threads = []
    asset_list_mutex = Lock()
    for t in xrange(num_threads):
        asset_threads.append(
            AssetBuildThread(assets_to_build, asset_list_mutex))

    for t in xrange(num_threads):
        asset_threads[t].start()

    while any(a.isAlive() for a in asset_threads):
        for t in xrange(num_threads):
            asset_threads[t].join(0.1)
            if not asset_threads[t].isAlive() and asset_threads[t].error:
                # One thread has an error ask all the others to finish asap
                for o in xrange(num_threads):
                    asset_threads[o].exit = True

    # Update the stats on number of assets rebuilt
    assets_rebuilt = 0
    for t in xrange(num_threads):
        assets_rebuilt += asset_threads[t].assets_rebuilt

    # Dump the state of the build for partial rebuilds
    with open(path_join(base_build_path, 'sourcehashes.json'), 'w') as f:
        f.write(dump_json(source_list.get_hashes()))

    # Check if any build threads failed and if so exit with an error
    for t in xrange(num_threads):
        if asset_threads[t].error:
            error(asset_threads[t].error)
            return 1

    # Dump the mapping table for the built assets
    print 'Installing assets and building mapping table...'
    mapping = install(asset_build_info, args.install_path)
    with open('mapping_table.json', 'w') as f:
        f.write(dump_json({'urnmapping': mapping}))

    # Cleanup any built files no longer referenced by the new mapping table
    remove_old_build_files(asset_build_info, base_build_path)

    print '%d assets rebuilt' % assets_rebuilt
    print 'Assets build complete'
示例#51
0
import hashlib
import logging
import os
import os.path as osp
import sys

cur_dir = osp.dirname(osp.abspath(__file__))
PROJ_ROOT = osp.normpath(osp.join(cur_dir, "../../.."))
sys.path.insert(0, PROJ_ROOT)
import time
from collections import OrderedDict
import mmcv
import numpy as np
from tqdm import tqdm
from transforms3d.quaternions import mat2quat, quat2mat
import random
import ref
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
from lib.pysixd import inout, misc
from lib.utils.mask_utils import binary_mask_to_rle, cocosegm2mask, mask2bbox_xywh
from lib.utils.utils import dprint, iprint, lazy_property


logger = logging.getLogger(__name__)
DATASETS_ROOT = osp.normpath(osp.join(PROJ_ROOT, "datasets"))


class LM_BLENDER_Dataset(object):
    """lm blender data, from pvnet-rendering."""
示例#52
0
if "CODEINTEL_NO_PYXPCOM" in os.environ:
    _xpcom_ = False
else:
    try:
        from xpcom import components
        from xpcom.server import UnwrapObject
        _xpcom_ = True
    except ImportError:
        _xpcom_ = False

#XXX Should only do this hack for non-Komodo local codeintel usage.
#XXX We need to have a better mechanism for rationalizing and sharing
#    common lexer style classes. For now we'll just HACKily grab from
#    Komodo's styles.py. Some of this is duplicating logic in
#    KoLanguageServiceBase.py.
_ko_src_dir = normpath(join(dirname(__file__), *([os.pardir] * 3)))
sys.path.insert(0, join(_ko_src_dir, "schemes"))
try:
    import styles
finally:
    del sys.path[0]
    del _ko_src_dir

#---- general codeintel pragmas

# Allow the CILEs to generate type guesses based on type names (e.g.
# "event" is an Event in JS).
ENABLE_HEURISTICS = True

#---- warnings
示例#53
0
def read_config(program, options, **kwargs):
    """
    Read settings for 'program' from configuration file specified by
    'options["config"]', with missing values provided by 'defaults'.
    """
    settings = Settings()
    settings.update(defaults)

    # Initialize default values if not set yet.
    for name, value in kwargs.items():
        settings.setdefault(name, value)

    graphite_root = kwargs.get("ROOT_DIR")
    if graphite_root is None:
        graphite_root = os.environ.get('GRAPHITE_ROOT')
    if graphite_root is None:
        raise CarbonConfigException("Either ROOT_DIR or GRAPHITE_ROOT "
                                    "needs to be provided.")

    # Default config directory to root-relative, unless overriden by the
    # 'GRAPHITE_CONF_DIR' environment variable.
    settings.setdefault(
        "CONF_DIR",
        os.environ.get("GRAPHITE_CONF_DIR", join(graphite_root, "conf")))
    if options["config"] is None:
        options["config"] = join(settings["CONF_DIR"], "carbon.conf")
    else:
        # Set 'CONF_DIR' to the parent directory of the 'carbon.conf' config
        # file.
        settings["CONF_DIR"] = dirname(normpath(options["config"]))

    # Storage directory can be overriden by the 'GRAPHITE_STORAGE_DIR'
    # environment variable. It defaults to a path relative to GRAPHITE_ROOT
    # for backwards compatibility though.
    settings.setdefault(
        "STORAGE_DIR",
        os.environ.get("GRAPHITE_STORAGE_DIR", join(graphite_root, "storage")))

    def update_STORAGE_DIR_deps():
        # By default, everything is written to subdirectories of the storage dir.
        settings.setdefault("PID_DIR", settings["STORAGE_DIR"])
        settings.setdefault("LOG_DIR",
                            join(settings["STORAGE_DIR"], "log", program))
        settings.setdefault("LOCAL_DATA_DIR",
                            join(settings["STORAGE_DIR"], "whisper"))
        settings.setdefault("WHITELISTS_DIR",
                            join(settings["STORAGE_DIR"], "lists"))

    # Read configuration options from program-specific section.
    section = program[len("carbon-"):]
    config = options["config"]

    if not exists(config):
        raise CarbonConfigException("Error: missing required config %r" %
                                    config)

    settings.readFrom(config, section)
    settings.setdefault("instance", options["instance"])
    update_STORAGE_DIR_deps()

    # If a specific instance of the program is specified, augment the settings
    # with the instance-specific settings and provide sane defaults for
    # optional settings.
    if options["instance"]:
        settings.readFrom(config, "%s:%s" % (section, options["instance"]))
        settings["pidfile"] = (options["pidfile"] or join(
            settings["PID_DIR"], "%s-%s.pid" % (program, options["instance"])))
        settings["LOG_DIR"] = (options["logdir"] or join(
            settings["LOG_DIR"], "%s-%s" % (program, options["instance"])))
    else:
        settings["pidfile"] = (options["pidfile"] or join(
            settings["PID_DIR"], '%s.pid' % program))
        settings["LOG_DIR"] = (options["logdir"] or settings["LOG_DIR"])

    update_STORAGE_DIR_deps()
    return settings
示例#54
0
#  - CORE_PYTHON3_STATUS   : ycm_core is compiled with Python 3 but loaded with
#    Python 2;
#  - CORE_OUTDATED_STATUS  : ycm_core version is outdated.
# Values 1 and 2 are not used because 1 is for general errors and 2 has often a
# special meaning for Unix programs. See
# https://docs.python.org/2/library/sys.html#sys.exit
CORE_COMPATIBLE_STATUS = 0
CORE_UNEXPECTED_STATUS = 3
CORE_MISSING_STATUS = 4
CORE_PYTHON2_STATUS = 5
CORE_PYTHON3_STATUS = 6
CORE_OUTDATED_STATUS = 7

VERSION_FILENAME = 'CORE_VERSION'

ROOT_DIR = p.normpath(p.join(p.dirname(__file__), '..'))
DIR_OF_THIRD_PARTY = p.join(ROOT_DIR, 'third_party')
DIR_PACKAGES_REGEX = re.compile('(site|dist)-packages$')

_logger = logging.getLogger(__name__)


def ExpectedCoreVersion():
    filepath = p.join(ROOT_DIR, VERSION_FILENAME)
    with io.open(filepath, encoding='utf8') as f:
        return int(f.read())


def ImportCore():
    """Imports and returns the ycm_core module. This function exists for easily
  mocking this import in tests."""
示例#55
0
    _check_negationexpression,
    _check_viewsumexpression, 
    substitute_pyomo2casadi,
)
from pyomo.core.base.template_expr import (
    IndexTemplate, 
    _GetItemIndexer,
)

import os
from pyutilib.misc import setup_redirect, reset_redirect
from pyutilib.misc import import_file

from os.path import abspath, dirname, normpath, join
currdir = dirname(abspath(__file__))
exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'dae'))

# We will skip tests unless we have scipy and not running in pypy
scipy_available = scipy_available and not is_pypy


class TestSimulator(unittest.TestCase):
    """
    Class for testing the pyomo.DAE simulator
    """

    def setUp(self):
        """
        Setting up testing model
        """
        self.m = m = ConcreteModel()
import os
import os.path as osp
import subprocess

COOKIECUTTER_REPO_NAME = 'cookiecutter-pypackage'

par_dir_path = osp.normpath(osp.join(osp.abspath(osp.curdir), osp.pardir))
if osp.basename(par_dir_path) == COOKIECUTTER_REPO_NAME:
    # This was most likely called `cookiecutter .`
    cookiecutter_repo_path = par_dir_path
else:
    # This was most likely called as `cookeicutter [email protected]:geomagical/labtech-wrapper.git`
    # This is the canonical location for the cached cookiecutter template
    cookiecutter_repo_path = osp.join(os.environ['HOME'], '.cookiecutters', COOKIECUTTER_REPO_NAME)

# Obtain Cookiecutter repo path
cookiecutter_hash = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cookiecutter_repo_path)
cookiecutter_hash = cookiecutter_hash.strip().decode('utf-8')

cookiecutter_uri = subprocess.check_output(["git", "config", "--get", "remote.origin.url"], cwd=cookiecutter_repo_path)
cookiecutter_uri = cookiecutter_uri.strip().decode('uft-8')


#######################
# Setting up git repo #
#######################
shell_cmds = [
        """git init""",
        """git remote add origin [email protected]:{{cookiecutter.github_username}}/{{project_slug}}.git""",
        """git add *""",
        """git add .gitignore""",
示例#57
0
                    stdout.write('%s already exists, skipping\n' % n_path)
                else:
                    stdout.write('%s -> %s\n' % (f_path, n_path))
                    copy(f_path, n_path)
            elif f_path.endswith('.ogg'):
                n_path = n_path[:-3] + 'mp3'
                if path.isfile(n_path):
                    stdout.write('%s already exists, skipping\n' % n_path)
                else:
                  try:
                    transcode(f_path, n_path[:-3] + 'mp3')
                  except OSError, inst:
                    stderr.write("Couldn't transcode %s: %s" % (n_path, inst))
            else:
                stdout.write('Skipping unsupported file "%s"\n' % f)
    recurse(root, path.split(path.normpath(root))[-1])


def sig_int_handler(p0, p1):
  """ Make CTRL-C less catasrophic """
  exit(1)

if __name__ == '__main__':

  # TODO: ensure oggdec, ogginfo, lame are available 

  signal(SIGINT, sig_int_handler)

  if len(argv) < 2 or (len(argv) >= 2 and argv[1] in ('-h', '--help', '-?')):
    progname = path.basename(argv[0])
    stdout.write("""
示例#58
0
from __future__ import print_function

import sys
from os import path, getenv
from time import time, sleep
import numpy as np
from collections import deque
import argparse
from pymavlink import mavutil

# if PAPARAZZI_HOME not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_HOME = getenv(
    "PAPARAZZI_HOME",
    path.normpath(
        path.join(path.dirname(path.abspath(__file__)), '../../../../')))
sys.path.append(PPRZ_HOME + "/var/lib/python")
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage

# parse args
parser = argparse.ArgumentParser(
    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-ac',
                    '--ac_id',
                    dest='ac_id',
                    type=int,
                    help='Aircraft ID to save the RSSI information to')
parser.add_argument('-b',
                    '--ivy_bus',
                    dest='ivy_bus',
def create_nb_project_main():
    files = list(source_list(SOURCE_DIR, filename_check=is_project_file))
    files_rel = [relpath(f, start=PROJECT_DIR) for f in files]
    files_rel.sort()

    if SIMPLE_PROJECTFILE:
        pass
    else:
        includes, defines = cmake_advanced_info()

        if (includes, defines) == (None, None):
            return

        # for some reason it doesnt give all internal includes
        includes = list(
            set(includes) | set(dirname(f) for f in files if is_c_header(f)))
        includes.sort()

        if 0:
            PROJECT_NAME = "Blender"
        else:
            # be tricky, get the project name from SVN if we can!
            PROJECT_NAME = project_name_get(SOURCE_DIR)

        # --------------- NB spesific
        defines = [("%s=%s" % cdef) if cdef[1] else cdef[0]
                   for cdef in defines]
        defines += [
            cdef.replace("#define", "").strip()
            for cdef in cmake_compiler_defines()
        ]

        def file_list_to_nested(files):
            # convert paths to hierarchy
            paths_nested = {}

            def ensure_path(filepath):
                filepath_split = filepath.split(os.sep)

                pn = paths_nested
                for subdir in filepath_split[:-1]:
                    pn = pn.setdefault(subdir, {})
                pn[filepath_split[-1]] = None

            for path in files:
                ensure_path(path)
            return paths_nested

        PROJECT_DIR_NB = join(PROJECT_DIR, "nbproject")
        if not exists(PROJECT_DIR_NB):
            os.mkdir(PROJECT_DIR_NB)

        # SOURCE_DIR_REL = relpath(SOURCE_DIR, PROJECT_DIR)

        f = open(join(PROJECT_DIR_NB, "project.xml"), 'w')

        f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
        f.write('<project xmlns="http://www.netbeans.org/ns/project/1">\n')
        f.write('    <type>org.netbeans.modules.cnd.makeproject</type>\n')
        f.write('    <configuration>\n')
        f.write(
            '        <data xmlns="http://www.netbeans.org/ns/make-project/1">\n'
        )
        f.write('            <name>%s</name>\n' % PROJECT_NAME)
        f.write('            <c-extensions>c,m</c-extensions>\n')
        f.write('            <cpp-extensions>cpp,mm</cpp-extensions>\n')
        f.write(
            '            <header-extensions>h,hpp,inl</header-extensions>\n')
        f.write('            <sourceEncoding>UTF-8</sourceEncoding>\n')
        f.write('            <make-dep-projects/>\n')
        f.write('            <sourceRootList>\n')
        f.write('                <sourceRootElem>%s</sourceRootElem>\n' %
                SOURCE_DIR)  # base_root_rel
        f.write('            </sourceRootList>\n')
        f.write('            <confList>\n')
        f.write('                <confElem>\n')
        f.write('                    <name>Default</name>\n')
        f.write('                    <type>0</type>\n')
        f.write('                </confElem>\n')
        f.write('            </confList>\n')
        f.write('        </data>\n')
        f.write('    </configuration>\n')
        f.write('</project>\n')

        f = open(join(PROJECT_DIR_NB, "configurations.xml"), 'w')

        f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
        f.write('<configurationDescriptor version="79">\n')
        f.write(
            '  <logicalFolder name="root" displayName="root" projectFiles="true" kind="ROOT">\n'
        )
        f.write('    <df name="blender" root="%s">\n' %
                SOURCE_DIR)  # base_root_rel

        # write files!
        files_rel_local = [
            normpath(relpath(join(CMAKE_DIR, path), SOURCE_DIR))
            for path in files_rel
        ]
        files_rel_hierarchy = file_list_to_nested(files_rel_local)

        # print(files_rel_hierarchy)

        def write_df(hdir, ident):
            dirs = []
            files = []
            for key, item in sorted(hdir.items()):
                if item is None:
                    files.append(key)
                else:
                    dirs.append((key, item))

            for key, item in dirs:
                f.write('%s  <df name="%s">\n' % (ident, key))
                write_df(item, ident + "  ")
                f.write('%s  </df>\n' % ident)

            for key in files:
                f.write('%s<in>%s</in>\n' % (ident, key))

        write_df(files_rel_hierarchy, ident="    ")

        f.write('    </df>\n')

        f.write('    <logicalFolder name="ExternalFiles"\n')
        f.write('                   displayName="Important Files"\n')
        f.write('                   projectFiles="false"\n')
        f.write('                   kind="IMPORTANT_FILES_FOLDER">\n')
        # f.write('      <itemPath>../GNUmakefile</itemPath>\n')
        f.write('    </logicalFolder>\n')

        f.write('  </logicalFolder>\n')
        # default, but this dir is infact not in blender dir so we can ignore it
        # f.write('  <sourceFolderFilter>^(nbproject)$</sourceFolderFilter>\n')
        f.write(
            '  <sourceFolderFilter>^(nbproject|__pycache__|.*\.py|.*\.html|.*\.blend)$</sourceFolderFilter>\n'
        )

        f.write('  <sourceRootList>\n')
        f.write('    <Elem>%s</Elem>\n' % SOURCE_DIR)  # base_root_rel
        f.write('  </sourceRootList>\n')

        f.write('  <projectmakefile>Makefile</projectmakefile>\n')

        # paths again
        f.write('  <confs>\n')
        f.write('    <conf name="Default" type="0">\n')

        f.write('      <toolsSet>\n')
        f.write(
            '        <remote-sources-mode>LOCAL_SOURCES</remote-sources-mode>\n'
        )
        f.write('        <compilerSet>default</compilerSet>\n')
        f.write('      </toolsSet>\n')
        f.write('      <makefileType>\n')

        f.write('        <makeTool>\n')
        f.write(
            '          <buildCommandWorkingDir>.</buildCommandWorkingDir>\n')
        f.write('          <buildCommand>${MAKE} -f Makefile</buildCommand>\n')
        f.write(
            '          <cleanCommand>${MAKE} -f Makefile clean</cleanCommand>\n'
        )
        f.write('          <executablePath>./bin/blender</executablePath>\n')

        def write_toolinfo():
            f.write('            <incDir>\n')
            for inc in includes:
                f.write('              <pElem>%s</pElem>\n' % inc)
            f.write('            </incDir>\n')
            f.write('            <preprocessorList>\n')
            for cdef in defines:
                f.write('              <Elem>%s</Elem>\n' % cdef)
            f.write('            </preprocessorList>\n')

        f.write('          <cTool>\n')
        write_toolinfo()
        f.write('          </cTool>\n')

        f.write('          <ccTool>\n')
        write_toolinfo()
        f.write('          </ccTool>\n')

        f.write('        </makeTool>\n')
        f.write('      </makefileType>\n')
        # finishe makefle info

        f.write('    \n')

        for path in files_rel_local:
            f.write('      <item path="%s"\n' % path)
            f.write('            ex="false"\n')
            f.write('            tool="1"\n')
            f.write('            flavor="0">\n')
            f.write('      </item>\n')

        f.write('      <runprofile version="9">\n')
        f.write('        <runcommandpicklist>\n')
        f.write('        </runcommandpicklist>\n')
        f.write('        <runcommand>%s</runcommand>\n' %
                os.path.join(CMAKE_DIR, "bin/blender"))
        f.write('        <rundir>%s</rundir>\n' % SOURCE_DIR)
        f.write('        <buildfirst>false</buildfirst>\n')
        f.write('        <terminal-type>0</terminal-type>\n')
        f.write('        <remove-instrumentation>0</remove-instrumentation>\n')
        f.write('        <environment>\n')
        f.write('        </environment>\n')
        f.write('      </runprofile>\n')

        f.write('    </conf>\n')
        f.write('  </confs>\n')

        # todo

        f.write('</configurationDescriptor>\n')
示例#60
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-s',
                        "--src",
                        help="source path for the file to be imported")
    parser.add_argument('-u', "--user", help="user name (optional)")
    parser.add_argument('-f',
                        "--folder",
                        help='folder path under user home dir (optional)')
    parser.add_argument('-p', "--passwd_file", help='password file (optional)')

    args = parser.parse_args()

    src_path = None
    username = None
    folder = None
    password_file = None

    if args.src:
        src_path = args.src
    else:
        print("no source file provided")
        return -1
    if not op.isfile(src_path):
        print("no file found")
        return -1
    if not h5py.is_hdf5(src_path):
        print("file must be an HDF5 file")

    if args.user:
        username = args.user
    else:
        print("Importing into public")

    if args.passwd_file:
        password_file = args.passwd_file
    else:
        password_file = config.get("password_file")

    if args.folder:
        folder = args.folder
        if op.isabs(folder):
            print("folder path must be relative")
            return -1
        folder = op.normpath(folder)

    print(">source:", src_path)
    print(">username:"******">password_file:", password_file)
    print(">folder:", folder)

    hdf5_ext = config.get("hdf5_ext")

    if username:
        userid = getUserId(username, password_file)

        if not userid:
            print("user not found")
            return -1

    tgt_dir = op.join(op.dirname(__file__), config.get("datapath"))
    tgt_dir = op.normpath(tgt_dir)

    if username:
        tgt_dir = op.join(tgt_dir, config.get("home_dir"))
        tgt_dir = op.join(tgt_dir, username)
    toc_file = op.join(tgt_dir, config.get("toc_name"))
    if not op.isfile(toc_file):
        print("toc_file:", toc_file, "not found")
        return -1
    if folder:
        tgt_dir = op.join(tgt_dir, folder)

    if not op.isdir(tgt_dir):
        print("directory:", tgt_dir, "not found, creating")
        makeDirs(tgt_dir)

    tgt_file = op.basename(src_path)
    tgt_file = op.splitext(tgt_file)[0]  # ignore the extension
    tgt_file = url_escape(tgt_file)  # make the filename url compatible
    tgt_file = tgt_file.replace('.', '_')  # replace dots with underscores

    tgt_path = op.join(tgt_dir, tgt_file)
    tgt_path = op.normpath(tgt_path)

    if op.isfile(tgt_path + hdf5_ext):
        print("file already exists")
        return -1

    # determine target domain
    domain = tgt_file
    if folder:
        domain += '.' + folder
    if username:
        domain += '.' + username + '.' + config.get("home_dir")
    domain += "." + config.get("domain")

    # determine the base so that the toc update can be done relative to the base.
    if username:
        base_domain = username + '.' + config.get(
            "home_dir") + '.' + config.get("domain")
    else:
        base_domain = config.get("domain")

    print("domain:", domain)
    # add toc entry
    addTocEntry(toc_file, domain, base_domain)
    # copy file
    tgt_path += hdf5_ext
    shutil.copyfile(src_path, tgt_path)

    return 0