Esempio n. 1
0
 def xtestStartSolrReally(self):
     tempdir = "/tmp/testSetupSolrConfig"
     isdir(tempdir) and rmtree(tempdir)
     mkdir(tempdir)
     solrDataDir = join(tempdir, 'solr-data')
     solrServer = self._createServer(stateDir=solrDataDir, port=8000, config={"test": {'admin': True}})
     solrServer.start(javaMX="1024M")
Esempio n. 2
0
def valid_dir(d):
    # type: (Dict) -> bool
    dir = d['path']
    if not path.exists(dir):
        return True
    if not path.isdir(dir):
        return False

    if set(['Makefile', 'make.bat']) & set(os.listdir(dir)):  # type: ignore
        return False

    if d['sep']:
        dir = os.path.join('source', dir)
        if not path.exists(dir):
            return True
        if not path.isdir(dir):
            return False

    reserved_names = [
        'conf.py',
        d['dot'] + 'static',
        d['dot'] + 'templates',
        d['master'] + d['suffix'],
    ]
    if set(reserved_names) & set(os.listdir(dir)):  # type: ignore
        return False

    return True
Esempio n. 3
0
def get_conf_path(filename=None):
    """Return absolute path for configuration file with specified filename"""
    # Define conf_dir
    if PYTEST:
        import py
        from _pytest.tmpdir import get_user
        conf_dir = osp.join(str(py.path.local.get_temproot()),
                            'pytest-of-{}'.format(get_user()),
                            SUBFOLDER)
    elif sys.platform.startswith('linux'):
        # This makes us follow the XDG standard to save our settings
        # on Linux, as it was requested on Issue 2629
        xdg_config_home = os.environ.get('XDG_CONFIG_HOME', '')
        if not xdg_config_home:
            xdg_config_home = osp.join(get_home_dir(), '.config')
        if not osp.isdir(xdg_config_home):
            os.makedirs(xdg_config_home)
        conf_dir = osp.join(xdg_config_home, SUBFOLDER)
    else:
        conf_dir = osp.join(get_home_dir(), SUBFOLDER)

    # Create conf_dir
    if not osp.isdir(conf_dir):
        if PYTEST:
            os.makedirs(conf_dir)
        else:
            os.mkdir(conf_dir)
    if filename is None:
        return conf_dir
    else:
        return osp.join(conf_dir, filename)
Esempio n. 4
0
def copytree(src, dst, symlinks=False):
	names = listdir(src)
	if os_path.isdir(dst):
		dst = os_path.join(dst, os_path.basename(src))
		if not os_path.isdir(dst):
			mkdir(dst)
	else:
		makedirs(dst)
	for name in names:
		srcname = os_path.join(src, name)
		dstname = os_path.join(dst, name)
		try:
			if symlinks and os_path.islink(srcname):
				linkto = readlink(srcname)
				symlink(linkto, dstname)
			elif os_path.isdir(srcname):
				copytree(srcname, dstname, symlinks)
			else:
				copyfile(srcname, dstname)
		except:
			print "dont copy srcname (no file or link or folder)"
	try:
		st = os_stat(src)
		mode = S_IMODE(st.st_mode)
		if have_chmod:
			chmod(dst, mode)
		if have_utime:
			utime(dst, (st.st_atime, st.st_mtime))
	except:
		print "copy stats for", src, "failed!"
Esempio n. 5
0
def snapshot(source, destination, name=None):
    """Snapshot one directory to another. Specify names to snapshot small, named differences."""
    source = source + sep
    destination = destination + sep

    if not path.isdir(source):
        raise RuntimeError("source is not a directory")

    if path.exists(destination):
        if not path.isdir(destination):
            raise RuntimeError("destination is not a directory")

        if name is None:
            raise RuntimeError("can't snapshot base snapshot if destination exists")

    snapdir = path.join(destination, ".snapdir")
    
    if path.exists(path.join(source, ".snapdir")):
        raise RuntimeError("snapdir exists in source directory")

    if name is None:
        check_call(["rsync", "--del", "-av", source, destination])
        makedirs(snapdir)
    else:
        if not path.exists(snapdir):
            raise RuntimeError("No snapdir in destination directory")

        check_call(["rsync", "--del", "-av", "--only-write-batch={}".format(path.join(snapdir, name)), source, destination])
Esempio n. 6
0
def copy_static_entry(source, targetdir, builder, context={},
                      exclude_matchers=(), level=0):
    # type: (unicode, unicode, Any, Dict, Tuple[Callable, ...], int) -> None
    """[DEPRECATED] Copy a HTML builder static_path entry from source to targetdir.

    Handles all possible cases of files, directories and subdirectories.
    """
    warnings.warn('sphinx.util.copy_static_entry is deprecated for removal',
                  RemovedInSphinx30Warning, stacklevel=2)

    if exclude_matchers:
        relpath = relative_path(path.join(builder.srcdir, 'dummy'), source)
        for matcher in exclude_matchers:
            if matcher(relpath):
                return
    if path.isfile(source):
        copy_asset_file(source, targetdir, context, builder.templates)
    elif path.isdir(source):
        if not path.isdir(targetdir):
            os.mkdir(targetdir)
        for entry in os.listdir(source):
            if entry.startswith('.'):
                continue
            newtarget = targetdir
            if path.isdir(path.join(source, entry)):
                newtarget = path.join(targetdir, entry)
            copy_static_entry(path.join(source, entry), newtarget,
                              builder, context, level=level + 1,
                              exclude_matchers=exclude_matchers)
Esempio n. 7
0
def fs_treesize( root, tree, files_as_nodes=True ):
    """Add 'size' attributes to all nodes.

    Root is the path on which the tree is rooted.

    Tree is a dict representing a node in the filesystem hierarchy.

    Size is cumulative.
    """
    assert isinstance(root, basestring) and isdir(root), repr(root)
    assert isinstance(tree, Node)

    if not tree.size:
        size = 0
        if tree.value:
            for node in tree.value: # for each node in this dir:
                path = join(root, node.name)
                if isdir(path):
                    # subdir, recurse and add size
                    fs_treesize(root, node)
                    size += node.size
                else:
                    # filename, add size
                    try:
                        csize = getsize(path)
                        node.size = csize
                        size += csize
                    except Exception, e:
                        pass#print >>sys.stderr, "could not get size of %s: %r" % (path, e)
        tree.size = size
Esempio n. 8
0
File: add.py Progetto: hanke/datalad
def _discover_subdatasets_recursively(
        discovered, top, trace, recursion_limit):
    # this beast walks the directory tree from a give `top` directory
    # and discovers valid repos that are scattered around, regardless
    # of whether they are already subdatasets or not
    # `trace` must be a list that has at least one element (the base
    # dataset)
    if recursion_limit is not None and len(trace) > recursion_limit:
        return
    if not isdir(top):
        return
    if not op.islink(top) and GitRepo.is_valid_repo(top):
        if top in discovered:
            # this was found already, assume everything beneath it too
            return
        discovered[top] = dict(
            path=top,
            # and its content
            process_content=True,
            type='dataset',
            parentds=trace[-1])
        # new node in the trace down
        trace = trace + [top]
    for path in listdir(top):
        path = opj(top, path)
        if not isdir(path):
            continue
        # next level down
        _discover_subdatasets_recursively(
            discovered, path, trace, recursion_limit)
Esempio n. 9
0
    def __scanDir( self, path ):
        """ Recursive function to scan one dir """
        # The path is with '/' at the end
        for item in os.listdir( path ):
            if self.shouldExclude( item ):
                continue

            # Exclude symlinks if they point to the other project
            # covered pieces
            candidate = path + item
            if islink( candidate ):
                realItem = realpath( candidate )
                if isdir( realItem ):
                    if self.isProjectDir( realItem ):
                        continue
                else:
                    if self.isProjectDir( os.path.dirname( realItem ) ):
                        continue

            if isdir( candidate ):
                self.filesList.add( candidate + sep )
                self.__scanDir( candidate + sep )
                continue
            self.filesList.add( candidate )
        return
Esempio n. 10
0
        def __init__(self):
            self.app_dir = join(NSSearchPathForDirectoriesInDomains(NSApplicationSupportDirectory, NSUserDomainMask, True)[0], appname)
            if not isdir(self.app_dir):
                mkdir(self.app_dir)

            self.plugin_dir = join(self.app_dir, 'plugins')
            if not isdir(self.plugin_dir):
                mkdir(self.plugin_dir)

            self.home = expanduser('~')

            self.respath = getattr(sys, 'frozen', False) and normpath(join(dirname(sys.executable), pardir, 'Resources')) or dirname(__file__)

            if not getattr(sys, 'frozen', False):
                # Don't use Python's settings if interactive
                self.bundle = 'uk.org.marginal.%s' % appname.lower()
                NSBundle.mainBundle().infoDictionary()['CFBundleIdentifier'] = self.bundle
            self.bundle = NSBundle.mainBundle().bundleIdentifier()
            self.defaults = NSUserDefaults.standardUserDefaults()
            settings = self.defaults.persistentDomainForName_(self.bundle) or {}
            self.settings = dict(settings)

            # Check out_dir exists
            if not self.get('outdir') or not isdir(self.get('outdir')):
                self.set('outdir', NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, True)[0])
Esempio n. 11
0
def get_repository_info(recipe_path):
    """This tries to get information about where a recipe came from.  This is different
    from the source - you can have a recipe in svn that gets source via git."""
    try:
        if exists(join(recipe_path, ".git")):
            origin = check_output_env(["git", "config", "--get", "remote.origin.url"],
                                      cwd=recipe_path)
            rev = check_output_env(["git", "rev-parse", "HEAD"], cwd=recipe_path)
            return "Origin {}, commit {}".format(origin, rev)
        elif isdir(join(recipe_path, ".hg")):
            origin = check_output_env(["hg", "paths", "default"], cwd=recipe_path)
            rev = check_output_env(["hg", "id"], cwd=recipe_path).split()[0]
            return "Origin {}, commit {}".format(origin, rev)
        elif isdir(join(recipe_path, ".svn")):
            info = check_output_env(["svn", "info"], cwd=recipe_path)
            server = re.search("Repository Root: (.*)$", info, flags=re.M).group(1)
            revision = re.search("Revision: (.*)$", info, flags=re.M).group(1)
            return "{}, Revision {}".format(server, revision)
        else:
            return "{}, last modified {}".format(recipe_path,
                                             time.ctime(os.path.getmtime(
                                                 join(recipe_path, "meta.yaml"))))
    except CalledProcessError:
        log.debug("Failed to checkout source in " + recipe_path)
        return "{}, last modified {}".format(recipe_path,
                                             time.ctime(os.path.getmtime(
                                                 join(recipe_path, "meta.yaml"))))
Esempio n. 12
0
File: muso.py Progetto: dflock/muso
def check_artist_folder(path):
    """
    Check that the given path looks like an Artist folder should:

    - has a folder.jpg, possibly more than one image, but no music.
    - otherwise, only contains folders
    """

    if isdir(path):
        contents = os.listdir(path)
        files = filter(lambda x: isfile(join(path, x)) and not x.startswith('.'), contents)

        has_art = False
        has_folder_jpg = False
        only_contains_folders = True

        for item in files:
            item_path = join(path, item)
            if is_image_file(item_path):
                has_art = True
                if is_folder_art(item_path):
                    has_folder_jpg = True
            elif not (isdir(item_path) or is_ignored_file(item_path)):
                only_contains_folders = False

    return {
        'ok': has_art and only_contains_folders and has_folder_jpg,
        'has_art': has_art and has_folder_jpg,
        'only_contains_folders': only_contains_folders
    }
Esempio n. 13
0
        def __init__(self):

            # http://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
            self.app_dir = join(getenv('XDG_DATA_HOME', expanduser('~/.local/share')), appname)
            if not isdir(self.app_dir):
                makedirs(self.app_dir)

            self.plugin_dir = join(self.app_dir, 'plugins')
            if not isdir(self.plugin_dir):
                mkdir(self.plugin_dir)

            self.home = expanduser('~')

            self.respath = dirname(__file__)

            self.filename = join(getenv('XDG_CONFIG_HOME', expanduser('~/.config')), appname, '%s.ini' % appname)
            if not isdir(dirname(self.filename)):
                makedirs(dirname(self.filename))

            self.config = RawConfigParser()
            try:
                self.config.readfp(codecs.open(self.filename, 'r', 'utf-8'))
            except:
                self.config.add_section('config')

            if not self.get('outdir') or not isdir(self.get('outdir')):
                self.set('outdir', expanduser('~'))
Esempio n. 14
0
 def scanDir(self,dPath,usages):
     dName=path.basename(dPath)
     if dName[0]==".":
         return
     elif dName in ["lnInclude","Doxygen"]:
         return
     elif dName in ["Make","platform","bin"]:
         for f in listdir(dPath):
             if f[0]==".":
                 continue
             nPath=path.join(dPath,f)
             if path.isdir(nPath):
                 isBin=False
                 for end in ["Opt","Debug","Prof"]:
                     if f.find(end)>0 and (f.find(end)+len(end))==len(f):
                         isBin=True
                 if isBin:
                     sz=diskUsage(nPath)
                     try:
                         usages[f]+=sz
                     except KeyError:
                         usages[f]=sz
                        # print_("Found architecture",f,"in",dPath)
     else:
         try:
             for f in listdir(dPath):
                 nPath=path.join(dPath,f)
                 if path.isdir(nPath) and not path.islink(nPath):
                     self.scanDir(nPath,usages)
         except OSError:
             self.warning("Can't process",dPath)
Esempio n. 15
0
		def iterLocations():
			if platform == 'android':
				# Under Android, the tcl set-up apparently differs from
				# other cross-platform setups. the search algorithm to find the
				# directory that will contain the tclConfig.sh script and the shared libs
				# is not applicable to Android. Instead, immediately return the correct
				# subdirectories to the routine that invokes iterLocations()
				sdl_android_port_path = environ['SDL_ANDROID_PORT_PATH']
				libpath = sdl_android_port_path + '/project/libs/armeabi'
				yield libpath
				tclpath = sdl_android_port_path + '/project/jni/tcl8.5/unix'
				yield tclpath
			else:
				if distroRoot is None or cls.isSystemLibrary(platform):
					if msysActive():
						roots = (msysPathToNative('/mingw32'), )
					else:
						roots = ('/usr/local', '/usr')
				else:
					roots = (distroRoot, )
				for root in roots:
					if isdir(root):
						for libdir in ('lib', 'lib64', 'lib/tcl'):
							libpath = root + '/' + libdir
							if isdir(libpath):
								yield libpath
								for entry in listdir(libpath):
									if entry.startswith('tcl8.'):
										tclpath = libpath + '/' + entry
										if isdir(tclpath):
											yield tclpath
Esempio n. 16
0
def create_env(prefix, specs, clear_cache=True, verbose=True, channel_urls=(),
    override_channels=False):
    '''
    Create a conda envrionment for the given prefix and specs.
    '''
    if not isdir(config.bldpkgs_dir):
        os.makedirs(config.bldpkgs_dir)
    update_index(config.bldpkgs_dir)
    if specs: # Don't waste time if there is nothing to do
        if clear_cache:
            # remove the cache such that a refetch is made,
            # this is necessary because we add the local build repo URL
            fetch_index.cache = {}
        index = get_index(channel_urls=[url_path(config.croot)] + list(channel_urls),
            prepend=not override_channels)

        warn_on_old_conda_build(index)

        cc.pkgs_dirs = cc.pkgs_dirs[:1]
        actions = plan.install_actions(prefix, index, specs)
        plan.display_actions(actions, index)
        plan.execute_actions(actions, index, verbose=verbose)
    # ensure prefix exists, even if empty, i.e. when specs are empty
    if not isdir(prefix):
        os.makedirs(prefix)
Esempio n. 17
0
def test_create_structure(tmpfolder):
    struct = {"my_file": "Some content",
              "my_folder": {
                  "my_dir_file": "Some other content",
                  "empty_file": "",
                  "file_not_created": None
              },
              "empty_folder": {}}
    expected = {"my_file": "Some content",
                "my_folder": {
                    "my_dir_file": "Some other content",
                    "empty_file": ""
                },
                "empty_folder": {}}
    changed, _ = structure.create_structure(struct, {})

    assert changed == expected
    assert isdir("my_folder")
    assert isdir("empty_folder")
    assert isfile("my_folder/my_dir_file")
    assert isfile("my_folder/empty_file")
    assert not isfile("my_folder/file_not_created")
    assert isfile("my_file")
    assert open("my_file").read() == "Some content"
    assert open("my_folder/my_dir_file").read() == "Some other content"
    assert open("my_folder/empty_file").read() == ""
Esempio n. 18
0
    def check_tbb_paths(self, tbb_path, tbb_fx_binary_path, tbb_profile_path):
        """Update instance variables based on the passed paths.

        TorBrowserDriver can be initialized by passing either
        1) path to TBB directory, or
        2) path to TBB's Firefox binary and profile
        """
        if not (tbb_path or (tbb_fx_binary_path and tbb_profile_path)):
            raise cm.TBDriverPathError("Either TBB path or Firefox profile"
                                       " and binary path should be provided"
                                       " %s" % tbb_path)

        if tbb_path:
            if not isdir(tbb_path):
                raise cm.TBDriverPathError("TBB path is not a directory %s"
                                           % tbb_path)
            tbb_fx_binary_path = join(tbb_path, cm.DEFAULT_TBB_FX_BINARY_PATH)
            tbb_profile_path = join(tbb_path, cm.DEFAULT_TBB_PROFILE_PATH)
        if not isfile(tbb_fx_binary_path):
            raise cm.TBDriverPathError("Invalid Firefox binary %s"
                                       % tbb_fx_binary_path)
        if not isdir(tbb_profile_path):
            raise cm.TBDriverPathError("Invalid Firefox profile dir %s"
                                       % tbb_profile_path)
        self.tbb_path = tbb_path
        self.tbb_profile_path = tbb_profile_path
        self.tbb_fx_binary_path = tbb_fx_binary_path
Esempio n. 19
0
def main_loop(argv=None):
    if argv is None:
        argv = sys.argv

    args = docopt.docopt(get_updated_docstring(), argv=argv[1:],
            version=".".join(map(str, __version__)))

    if not args["--silent"]:
        logcfg.set_loglevel(log, "INFO")
        for h in log.handlers:
            logcfg.set_loglevel(h, "INFO")
    elif args["--verbose"] > 0:
        logcfg.make_verbose()
        log.debug(pf(args))

    ext = args["--extension"]
    recursive = args["--recursive"]

    files_and_folders = []
    files_and_folders.extend(args["<file_or_folder>"])

    for faf in files_and_folders:
        if osp.isfile(faf):
            parse_file(faf, args)
        elif osp.isdir(faf):
            for entry in os.listdir(faf):
                path = osp.join(faf, entry)

                valid_file = osp.isfile(path)\
                    and osp.splitext(path)[-1] == ext\
                    and osp.basename(osp.splitext(path)[0]) != "cfg"
                valid_folder = recursive and osp.isdir(path)

                if valid_file or valid_folder:
                    files_and_folders.append(path)
Esempio n. 20
0
def compile_dir(env, src_path, dst_path, pattern=r'^[^\.].*\..*[^~]$',
                encoding='utf-8', base_dir=None,
                negative_pattern=r'^.*\.swp$'):
  """Compiles a directory of Jinja2 templates to python code.
  Params:
    `env`: a Jinja2 Environment instance.
    `src_path`: path to the source directory.
    `dst_path`: path to the destination directory.
    `encoding`: template encoding.
    `pattern`: a regular expression to match template file names.
    `base_dir`: the base path to be removed from the compiled template
      filename.
  """
  if base_dir is None:
    # In the first call, store the base dir.
    base_dir = src_path

  for filename in listdir(src_path):
    src_name = path.join(src_path, filename)
    dst_name = path.join(dst_path, filename)

    if path.isdir(src_name):
      if not path.isdir(dst_name):
        mkdir(dst_name)
      compile_dir(env, src_name, dst_name, encoding=encoding,
                  base_dir=base_dir)
    elif path.isfile(src_name) and re.match(pattern, filename) and \
          not re.match(negative_pattern, filename):
      compile_file(env, src_name, dst_name, encoding=encoding,
                   base_dir=base_dir)
Esempio n. 21
0
def hg_source(source_dict, src_dir, hg_cache, verbose):
    ''' Download a source from Mercurial repo. '''
    if verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    hg_url = source_dict['hg_url']
    if not isdir(hg_cache):
        os.makedirs(hg_cache)
    hg_dn = hg_url.split(':')[-1].replace('/', '_')
    cache_repo = join(hg_cache, hg_dn)
    if isdir(cache_repo):
        check_call_env(['hg', 'pull'], cwd=cache_repo, stdout=stdout, stderr=stderr)
    else:
        check_call_env(['hg', 'clone', hg_url, cache_repo], stdout=stdout, stderr=stderr)
        assert isdir(cache_repo)

    # now clone in to work directory
    update = source_dict.get('hg_tag') or 'tip'
    if verbose:
        print('checkout: %r' % update)

    check_call_env(['hg', 'clone', cache_repo, src_dir], stdout=stdout,
                   stderr=stderr)
    check_call_env(['hg', 'update', '-C', update], cwd=src_dir, stdout=stdout,
                   stderr=stderr)

    if not verbose:
        FNULL.close()

    return src_dir
Esempio n. 22
0
def copy(src, dst, hardlink=False, keep_symlink=True):
    assert not P.isdir(src), 'Source path must not be a dir'
    assert not P.isdir(dst), 'Destination path must not be a dir'

    if keep_symlink and P.islink(src):
        assert not P.isabs(readlink(src)), 'Cannot copy symlink that points to an absolute path (%s)' % src
        logger.debug('%8s %s -> %s' % ('symlink', src, dst))
        if P.exists(dst):
            assert readlink(dst) == readlink(src), 'Refusing to retarget already-exported symlink %s' % dst
        else:
            symlink(readlink(src), dst)
        return

    if P.exists(dst):
        assert hash_file(src) == hash_file(dst), 'Refusing to overwrite already exported dst %s' % dst
    else:
        if hardlink:
            try:
                link(src, dst)
                logger.debug('%8s %s -> %s' % ('hardlink', src, dst))
                return
            except OSError, o:
                if o.errno != errno.EXDEV: # Invalid cross-device link, not an error, fall back to copy
                    raise

        logger.debug('%8s %s -> %s' % ('copy', src, dst))
        shutil.copy2(src, dst)
Esempio n. 23
0
 def _retrieve_resource(self, uri):
     u"""
     Get the resource specified by the uri if it exist.
     Otherwise, raise a Exception
     """
     self._check_uri(uri)
     p = self._root + uri
     if isdir(p):
         body = ["<p>Directory Listing for "]
         body.append(uri)
         body.append("</p><ul>")
         dirs = []
         files = []
         for res in listdir(p):
             if isdir(p + res):
                 dirs.append(res + b'/')
             else:
                 files.append(res)
         dirs.sort()
         files.sort()
         resources = dirs + files
         for res in resources:
             body.append('<li><a href="{}">{}</a></li>'.format(res, res))
         body.append("</ul>")
         return ("".join(body), "text/html")
     elif isfile(p):
         with open(self._root + uri, 'rb') as resource:
             body = resource.read()
             content_type, content_encoding = mimetypes.guess_type(uri)
         return (body, content_type)
     else:
         raise ResourceNotFound
Esempio n. 24
0
def download_file(url, name, root_destination='~/data/', zipfile=False,
                  replace=False):
    """Download a file from dropbox, google drive, or a URL.

    This will download a file and store it in a '~/data/` folder,
    creating directories if need be. It will also work for zip
    files, in which case it will unzip all of the files to the
    desired location.

    Parameters
    ----------
    url : string
        The url of the file to download. This may be a dropbox
        or google drive "share link", or a regular URL. If it
        is a share link, then it should point to a single file and
        not a folder. To download folders, zip them first.
    name : string
        The name / path of the file for the downloaded file, or
        the folder to zip the data into if the file is a zipfile.
    root_destination : string
        The root folder where data will be downloaded.
    zipfile : bool
        Whether the URL points to a zip file. If yes, it will be
        unzipped to root_destination + name.
    replace : bool
        If True and the URL points to a single file, overwrite the
        old file if possible.
    """
    # Make sure we have directories to dump files
    home = op.expanduser('~')
    tmpfile = home + '/tmp/tmp'
    if not op.isdir(home + '/data/'):
        print('Creating data folder...')
        os.makedirs(home + '/data/')

    if not op.isdir(home + '/tmp/'):
        print('Creating tmp folder...')
        os.makedirs(home + '/tmp/')

    download_path = _convert_url_to_downloadable(url)

    # Now save to the new destination
    out_path = root_destination.replace('~', home) + name
    if not op.isdir(op.dirname(out_path)):
        print('Creating path {} for output data'.format(out_path))
        os.makedirs(op.dirname(out_path))

    if zipfile is True:
        _fetch_file(download_path, tmpfile)
        myzip = ZipFile(tmpfile)
        myzip.extractall(out_path)
        os.remove(tmpfile)
    else:
        if len(name) == 0:
            raise ValueError('Cannot overwrite the root data directory')
        if replace is False and op.exists(out_path):
            raise ValueError('Path {} exists, use `replace=True` to '
                             'overwrite'.format(out_path))
        _fetch_file(download_path, out_path)
    print('Successfully moved file to {}'.format(out_path))
Esempio n. 25
0
    def bootstrap_rustc_docs(self, force=False):
        self.ensure_bootstrapped()
        rust_root = self.config["tools"]["rust-root"]
        docs_dir = path.join(rust_root, "doc")
        if not force and path.exists(docs_dir):
            print("Rust docs already downloaded.", end=" ")
            print("Use |bootstrap-rust-docs --force| to download again.")
            return

        if path.isdir(docs_dir):
            shutil.rmtree(docs_dir)
        docs_name = self.rust_path().replace("rustc-", "rust-docs-")
        docs_url = ("https://static-rust-lang-org.s3.amazonaws.com/dist/rust-docs-nightly-%s.tar.gz"
                    % host_triple())
        tgz_file = path.join(rust_root, 'doc.tar.gz')

        download_file("Rust docs", docs_url, tgz_file)

        print("Extracting Rust docs...")
        temp_dir = path.join(rust_root, "temp_docs")
        if path.isdir(temp_dir):
            shutil.rmtree(temp_dir)
        extract(tgz_file, temp_dir)
        shutil.move(path.join(temp_dir, docs_name.split("/")[1],
                              "rust-docs", "share", "doc", "rust", "html"),
                    docs_dir)
        shutil.rmtree(temp_dir)
        print("Rust docs ready.")
Esempio n. 26
0
    def test_demo_deletion(self):
        """Ensure that demo files are deleted along with submission record"""

        fout = StringIO()
        zf = zipfile.ZipFile(fout, "w")
        zf.writestr("demo.html", """<html></html""")
        zf.writestr("css/main.css", "h1 { color: red }")
        zf.writestr("js/main.js", 'alert("HELLO WORLD");')
        zf.close()

        s = Submission(
            title="Hello world", slug="hello-world", description="This is a hello world demo", creator=self.user
        )

        s.demo_package.save("play_demo.zip", ContentFile(fout.getvalue()))
        s.demo_package.close()
        s.clean()
        s.save()

        s.process_demo_package()

        path = s.demo_package.path.replace(".zip", "")

        ok_(isdir(path))
        ok_(isfile("%s/index.html" % path))
        ok_(isfile("%s/css/main.css" % path))
        ok_(isfile("%s/js/main.js" % path))

        s.delete()

        ok_(not isfile("%s/index.html" % path))
        ok_(not isfile("%s/css/main.css" % path))
        ok_(not isfile("%s/js/main.js" % path))
        ok_(not isdir(path))
Esempio n. 27
0
    def test_censored_demo_files_are_deleted(self):
        """Demo files should be deleted when the demo is censored."""
        fout = StringIO()
        zf = zipfile.ZipFile(fout, "w")
        zf.writestr("demo.html", """<html></html""")
        zf.writestr("css/main.css", "h1 { color: red }")
        zf.writestr("js/main.js", 'alert("HELLO WORLD");')
        zf.close()

        s = Submission(
            title="Hello world", slug="hello-world", description="This is a hello world demo", creator=self.user
        )

        s.demo_package.save("play_demo.zip", ContentFile(fout.getvalue()))
        s.demo_package.close()
        s.clean()
        s.save()

        s.process_demo_package()

        path = s.demo_package.path.replace(".zip", "")

        ok_(isdir(path))
        ok_(isfile(s.demo_package.path))
        ok_(isfile("%s/index.html" % path))
        ok_(isfile("%s/css/main.css" % path))
        ok_(isfile("%s/js/main.js" % path))

        s.censor(url="http://example.com/censored-explanation")

        ok_(not isfile(s.demo_package.path))
        ok_(not isfile("%s/index.html" % path))
        ok_(not isfile("%s/css/main.css" % path))
        ok_(not isfile("%s/js/main.js" % path))
        ok_(not isdir(path))
Esempio n. 28
0
    def __init__(self, env):
        self.pkgname = self.__class__.__name__

        # Yes, it is possible to get a / into a class name.
        # os.path.join fails pathologically there. So catch that specific case.
        assert '/' not in self.pkgname

        self.pkgdir  = P.abspath(P.dirname(inspect.getfile(self.__class__)))
        #info(self.pkgdir)
        self.tarball = None
        self.workdir = None
        self.env = copy.deepcopy(env)
        self.arch = get_platform(self)

        self.env['CPPFLAGS'] = self.env.get('CPPFLAGS', '') + ' -I%(NOINSTALL_DIR)s/include -I%(INSTALL_DIR)s/include' % self.env
        self.env['CXXFLAGS'] = self.env.get('CXXFLAGS', '') + ' -I%(NOINSTALL_DIR)s/include -I%(INSTALL_DIR)s/include' % self.env
        # If we include flags to directories that don't exist, we
        # cause compiler tests to fail.
        if P.isdir(self.env['ISIS3RDPARTY']):
            self.env['LDFLAGS'] = self.env.get('LDFLAGS', '') + ' -L%(ISIS3RDPARTY)s' % self.env
        if P.isdir(self.env['INSTALL_DIR']+'/lib'):
            self.env['LDFLAGS'] = self.env.get('LDFLAGS', '') + ' -L%(INSTALL_DIR)s/lib' % self.env

        # Remove repeated entries in CPPFLAGS, CXXFLAGS, LDFLAGS
        self.env['CPPFLAGS'] = unique_compiler_flags(self.env['CPPFLAGS'])
        self.env['CXXFLAGS'] = unique_compiler_flags(self.env['CXXFLAGS'])
        self.env['CFLAGS']   = unique_compiler_flags(self.env['CFLAGS'])
        self.env['LDFLAGS']  = unique_compiler_flags(self.env['LDFLAGS'])
Esempio n. 29
0
  def __extract_queries_from_test_files(workload, query_names):
    """
    Enumerate all the query files for a workload and extract the query strings.
    If the user has specified a subset of queries to execute, only extract those query
    strings.
    """
    query_regex = None
    if query_names:
      # Build a single regex from all query name regex strings.
      query_regex = r'(?:' + '$)|('.join([name for name in query_names.split(',')]) + '$)'
    workload_base_dir = os.path.join(WORKLOAD_DIR, workload)
    if not isdir(workload_base_dir):
      raise ValueError,\
             "Workload '%s' not found at path '%s'" % (workload, workload_base_dir)

    query_dir = os.path.join(workload_base_dir, 'queries')
    if not isdir(query_dir):
      raise ValueError, "Workload query directory not found at path '%s'" % (query_dir)

    query_map = defaultdict(list)
    for query_file_name in WorkloadRunner.__enumerate_query_files(query_dir):
      LOG.debug('Parsing Query Test File: ' + query_file_name)
      sections = parse_query_test_file(query_file_name)
      test_name = re.sub('/', '.', query_file_name.split('.')[0])[1:]
      # If query_names is not none, only extract user specified queries to
      # the query map.
      if query_names:
        sections = [s for s in sections if re.match(query_regex, s['QUERY_NAME'], re.I)]
      for section in sections:
        query_map[test_name].append((section['QUERY_NAME'],
                                     (section['QUERY'], section['RESULTS'])))
    return query_map
Esempio n. 30
0
def link(target, lnk, force=False):
    """
    Creates symbolic link 'lnk' pointing to 'target'.
    """

    if system() not in ('Linux', 'Windows', 'MSYS_NT-6.1'):
        print("{} operating system is not supported.".format(system()))
        return

    isdir = False

    lnk = path.normpath(path.expandvars(path.expanduser(lnk)))
    if path.isdir(target):
        isdir = True
    target = path.normpath(path.expandvars(path.expanduser(target)))

    if isdir:
        print("\n{} -> {} : DIR".format(lnk, target))
    else:
        print("\n{} -> {} : FILE".format(lnk, target))

    if path.isdir(lnk) or path.isfile(lnk):
        if not force:
            print("'{}': link exists".format(lnk))
            return
        else:
            remove(lnk)

    if system() in ('Linux', 'MSYS_NT-6.1'):
        Popen(['ln', '-s', target, lnk]).wait()
    elif system() == 'Windows':
        if isdir:
            CreateSymbolicLink(lnk, target, 1)
        else:
            CreateSymbolicLink(lnk, target, 0)
Esempio n. 31
0
def file_exists_not_dir(path):
    if IO.exists(path):
        return not IO.isdir(path)
    return False
Esempio n. 32
0
def _check (path):
    return exists(path) and isdir(path) and isfile(path+"/AUTHORS")
Esempio n. 33
0
 def __cleanup(self):
     """Remove old build and dist directories"""
     remove_dir("build")
     if osp.isdir("dist"):
         remove_dir("dist")
     remove_dir(self.target_dir)
Esempio n. 34
0
def get_msvc_dlls(msvc_version, architecture=None, check_architecture=False):
    """Get the list of Microsoft Visual C++ DLLs associated to 
    architecture and Python version, create the manifest file.
    
    architecture: integer (32 or 64) -- if None, take the Python build arch
    python_version: X.Y"""
    current_architecture = 64 if sys.maxsize > 2**32 else 32
    if architecture is None:
        architecture = current_architecture
    assert architecture in (32, 64)

    filelist = []

    msvc_major = msvc_version.split('.')[0]
    msvc_minor = msvc_version.split('.')[1]

    if msvc_major == '9':
        key = "1fc8b3b9a1e18e3b"
        atype = "" if architecture == 64 else "win32"
        arch = "amd64" if architecture == 64 else "x86"
        
        groups = {
                  'CRT': ('msvcr90.dll', 'msvcp90.dll', 'msvcm90.dll'),
#                  'OPENMP': ('vcomp90.dll',)
                  }

        for group, dll_list in groups.items():
            dlls = ''
            for dll in dll_list:
                dlls += '    <file name="%s" />%s' % (dll, os.linesep)
        
            manifest =\
"""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!-- Copyright (c) Microsoft Corporation.  All rights reserved. -->
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
    <noInheritable/>
    <assemblyIdentity
        type="%(atype)s"
        name="Microsoft.VC90.%(group)s"
        version="%(version)s"
        processorArchitecture="%(arch)s"
        publicKeyToken="%(key)s"
    />
%(dlls)s</assembly>
""" % dict(version=msvc_version, key=key, atype=atype, arch=arch,
           group=group, dlls=dlls)

            vc90man = "Microsoft.VC90.%s.manifest" % group
            open(vc90man, 'w').write(manifest)
            _remove_later(vc90man)
            filelist += [vc90man]
    
            winsxs = osp.join(os.environ['windir'], 'WinSxS')
            vcstr = '%s_Microsoft.VC90.%s_%s_%s' % (arch, group,
                                                    key, msvc_version)
            for fname in os.listdir(winsxs):
                path = osp.join(winsxs, fname)
                if osp.isdir(path) and fname.lower().startswith(vcstr.lower()):
                    for dllname in os.listdir(path):
                        filelist.append(osp.join(path, dllname))
                    break
            else:
                raise RuntimeError("Microsoft Visual C++ %s DLLs version %s "\
                                    "were not found" % (group, msvc_version))

    elif msvc_major == '10':
        namelist = [name % (msvc_major + msvc_minor) for name in 
                    (
                     'msvcp%s.dll', 'msvcr%s.dll',
                     'vcomp%s.dll',
                     )]
        
        windir = os.environ['windir']
        is_64bit_windows = osp.isdir(osp.join(windir, "SysWOW64"))

        # Reminder: WoW64 (*W*indows 32-bit *o*n *W*indows *64*-bit) is a 
        # subsystem of the Windows operating system capable of running 32-bit 
        # applications and is included on all 64-bit versions of Windows
        # (source: http://en.wikipedia.org/wiki/WoW64)
        #
        # In other words, "SysWOW64" contains 32-bit DLL and applications, 
        # whereas "System32" contains 64-bit DLL and applications on a 64-bit 
        # system.
        if architecture == 64:
            # 64-bit DLLs are located in...
            if is_64bit_windows:
                sysdir = "System32"  # on a 64-bit OS
            else:
                # ...no directory to be found!
                raise RuntimeError("Can't find 64-bit DLLs on a 32-bit OS")
        else:
            # 32-bit DLLs are located in...
            if is_64bit_windows:
                sysdir = "SysWOW64"  # on a 64-bit OS
            else:
                sysdir = "System32"  # on a 32-bit OS

        for dllname in namelist:
            fname = osp.join(windir, sysdir, dllname)
            if osp.exists(fname):
                filelist.append(fname)
            else:
                raise RuntimeError("Microsoft Visual C++ DLLs version %s "\
                                   "were not found" % msvc_version)

    else:
        raise RuntimeError("Unsupported MSVC version %s" % msvc_version)

    if check_architecture:
        for path in filelist:
            if path.endswith('.dll'):
                try:
                    arch = get_dll_architecture(path)
                except RuntimeError:
                    return
                if arch != architecture:
                    raise RuntimeError("%s: expecting %dbit, found %dbit"\
                                       % (path, architecture, arch))

    return filelist
Esempio n. 35
0
 def makedirs(self, *parts):
     d = self.join(*parts)
     if not path.exists(d):
         os.makedirs(d)
     assert path.isdir(d) and not path.islink(d)
     return d
Esempio n. 36
0
 def __get_path(self):
     assert path.abspath(self.__path) == self.__path
     assert self.__path.startswith(path.join(tempfile.gettempdir(),
                                             'ipa.tests.'))
     assert path.isdir(self.__path) and not path.islink(self.__path)
     return self.__path
Esempio n. 37
0
def dir_exists(path):
    if IO.exists(path):
        if IO.isdir(path):
            return True
    return False
Esempio n. 38
0
    def upload_nightly(self, platform, secret_from_taskcluster):
        import boto3

        def get_taskcluster_secret(name):
            url = (
                os.environ.get("TASKCLUSTER_PROXY_URL", "http://taskcluster") +
                "/secrets/v1/secret/project/servo/" + name)
            return json.load(urllib.urlopen(url))["secret"]

        def get_s3_secret():
            aws_access_key = None
            aws_secret_access_key = None
            if secret_from_taskcluster:
                secret = get_taskcluster_secret("s3-upload-credentials")
                aws_access_key = secret["aws_access_key_id"]
                aws_secret_access_key = secret["aws_secret_access_key"]
            return (aws_access_key, aws_secret_access_key)

        def nightly_filename(package, timestamp):
            return '{}-{}'.format(
                timestamp.isoformat() + 'Z',  # The `Z` denotes UTC
                path.basename(package))

        def upload_to_s3(platform, package, timestamp):
            (aws_access_key, aws_secret_access_key) = get_s3_secret()
            s3 = boto3.client('s3',
                              aws_access_key_id=aws_access_key,
                              aws_secret_access_key=aws_secret_access_key)
            BUCKET = 'servo-builds'

            nightly_dir = 'nightly/{}'.format(platform)
            filename = nightly_filename(package, timestamp)
            package_upload_key = '{}/{}'.format(nightly_dir, filename)
            extension = path.basename(package).partition('.')[2]
            latest_upload_key = '{}/servo-latest.{}'.format(
                nightly_dir, extension)

            s3.upload_file(package, BUCKET, package_upload_key)
            copy_source = {
                'Bucket': BUCKET,
                'Key': package_upload_key,
            }
            s3.copy(copy_source, BUCKET, latest_upload_key)

        def update_maven(directory):
            (aws_access_key, aws_secret_access_key) = get_s3_secret()
            s3 = boto3.client('s3',
                              aws_access_key_id=aws_access_key,
                              aws_secret_access_key=aws_secret_access_key)
            BUCKET = 'servo-builds'

            nightly_dir = 'nightly/maven'
            dest_key_base = directory.replace(
                "target/android/gradle/servoview/maven", nightly_dir)
            if dest_key_base[-1] == '/':
                dest_key_base = dest_key_base[:-1]

            # Given a directory with subdirectories like 0.0.1.20181005.caa4d190af...
            for artifact_dir in os.listdir(directory):
                base_dir = os.path.join(directory, artifact_dir)
                if not os.path.isdir(base_dir):
                    continue
                package_upload_base = "{}/{}".format(dest_key_base,
                                                     artifact_dir)
                # Upload all of the files inside the subdirectory.
                for f in os.listdir(base_dir):
                    file_upload_key = "{}/{}".format(package_upload_base, f)
                    print("Uploading %s to %s" %
                          (os.path.join(base_dir, f), file_upload_key))
                    s3.upload_file(os.path.join(base_dir, f), BUCKET,
                                   file_upload_key)

        def update_brew(package, timestamp):
            print("Updating brew formula")

            package_url = 'https://download.servo.org/nightly/macbrew/{}'.format(
                nightly_filename(package, timestamp))
            with open(package) as p:
                digest = hashlib.sha256(p.read()).hexdigest()

            brew_version = timestamp.strftime('%Y.%m.%d')

            with TemporaryDirectory(prefix='homebrew-servo') as tmp_dir:

                def call_git(cmd, **kwargs):
                    subprocess.check_call(['git', '-C', tmp_dir] + cmd,
                                          **kwargs)

                call_git([
                    'clone',
                    'https://github.com/servo/homebrew-servo.git',
                    '.',
                ])

                script_dir = path.dirname(path.realpath(__file__))
                with open(path.join(script_dir,
                                    'servo-binary-formula.rb.in')) as f:
                    formula = f.read()
                formula = formula.replace('PACKAGEURL', package_url)
                formula = formula.replace('SHA', digest)
                formula = formula.replace('VERSION', brew_version)
                with open(path.join(tmp_dir, 'Formula', 'servo-bin.rb'),
                          'w') as f:
                    f.write(formula)

                call_git(['add', path.join('.', 'Formula', 'servo-bin.rb')])
                call_git([
                    '-c',
                    'user.name=Tom Servo',
                    '-c',
                    '[email protected]',
                    'commit',
                    '--message=Version Bump: {}'.format(brew_version),
                ])

                if secret_from_taskcluster:
                    token = get_taskcluster_secret(
                        'github-homebrew-token')["token"]
                else:
                    token = os.environ['GITHUB_HOMEBREW_TOKEN']

                push_url = 'https://{}@github.com/servo/homebrew-servo.git'
                # TODO(aneeshusa): Use subprocess.DEVNULL with Python 3.3+
                with open(os.devnull, 'wb') as DEVNULL:
                    call_git([
                        'push',
                        '-qf',
                        push_url.format(token),
                        'master',
                    ],
                             stdout=DEVNULL,
                             stderr=DEVNULL)

        timestamp = datetime.utcnow().replace(microsecond=0)
        for package in PACKAGES[platform]:
            if path.isdir(package):
                continue
            if not path.isfile(package):
                print("Could not find package for {} at {}".format(
                    platform, package),
                      file=sys.stderr)
                return 1
            upload_to_s3(platform, package, timestamp)

        if platform == 'maven':
            for package in PACKAGES[platform]:
                update_maven(package)

        if platform == 'macbrew':
            packages = PACKAGES[platform]
            assert (len(packages) == 1)
            update_brew(packages[0], timestamp)

        return 0
Esempio n. 39
0
def seedFiles(inputFile, numSeeds, seed=None, outputDir=None, link=False,
              digits=10):
    """
    Copy input file multiple times with unique seeds.

    Parameters
    ----------
    inputFile: str
        Path to input file
    numSeeds: int
        Number of files to create
    seed: int
        Optional argument to set the seed of the builtin random
        number generator
    outputDir: str
        Path to desired output directory. Files will be copied here.
        If the folder does not exist, try to make the directory. Assumes path
        relative to directory that contains the input file
    link: bool
        If True, do not copy the full file. Instead, create a new file
        with 'include <inputFile>' and the new seed declaration.
    digits: int
        Average number of digits for random seeds

    See Also
    --------
    :py:mod:`random`
    :py:func:`random.seed()`
    :py:func:`random.getrandbits()`

    """
    if '~' in inputFile:
        inputFile = os.path.expanduser(inputFile)

    if not path.exists(inputFile):
        error('Input file {} does not exist'.format(inputFile))
        return

    if numSeeds < 1:
        error('Require positive number of files to create')
        return

    if digits < 1:
        error('Require positive number of digits in random seeds')
    bits = int((digits - OFFSET) / SLOPE)

    random.seed(seed)

    inputPath = path.abspath(path.join(os.getcwd(), inputFile))
    inputRoot = path.dirname(inputPath)

    if outputDir is not None:
        fPrefix = path.abspath(path.join(inputRoot, outputDir))
        if not path.isdir(fPrefix):
            debug('Creating directory at {}'.format(fPrefix))
            os.mkdir(fPrefix)
    else:
        fPrefix = inputRoot

    fileFmt = path.join(fPrefix, _makeFileFmt(inputFile))

    writeFunc = _include if link else _copy
    writeFunc(inputPath, numSeeds, fileFmt, bits, digits)

    return
Esempio n. 40
0
def response_path(path):
    """
    This method should return appropriate content and a mime type.

    If the requested path is a directory, then the content should be a
    plain-text listing of the contents with mimetype `text/plain`.

    If the path is a file, it should return the contents of that file
    and its correct mimetype.

    If the path does not map to a real location, it should raise an
    exception that the server can catch to return a 404 response.

    Ex:
        response_path('/a_web_page.html') -> (b"<html><h1>North Carolina...",
                                            b"text/html")

        response_path('/images/sample_1.png')
                        -> (b"A12BCF...",  # contents of sample_1.png
                            b"image/png")

        response_path('/') -> (b"images/, a_web_page.html, make_type.py,...",
                             b"text/plain")

        response_path('/a_page_that_doesnt_exist.html') -> Raises a NameError

    """

    # TODO: Raise a NameError if the requested content is not present
    # under webroot.

    # TODO: Fill in the appropriate content and mime_type give the path.
    # See the assignment guidelines for help on "mapping mime-types", though
    # you might need to create a special case for handling make_time.py
    #
    # If the path is "make_time.py", then you may OPTIONALLY return the
    # result of executing `make_time.py`. But you need only return the
    # CONTENTS of `make_time.py`.

    content = b"not implemented"
    mime_type = b"not implemented"
    full_path = 'webroot' + path

    try:
        if isfile(full_path):
            mime_type = mimetypes.guess_type(full_path)[0].encode()

            with open(full_path, 'rb') as in_file:
                content = in_file.read()
        elif isdir(full_path):
            mime_type = b"text/plain"

            content = '\n'.join([f for f in listdir(full_path) if f]).encode()

        else:
            raise NameError

    except:
        print('file not found')
        raise NameError

    return content, mime_type
Esempio n. 41
0
def main():
    try:
        config_file = sys.argv[1]
        outdir = sys.argv[2]
        outputtxt = sys.argv[3]
    except IndexError:
        print("Wrong number of command line ", sys.exc_info()[0])
        raise

    helper.read_config(config_file, None, None, outdir)

    bitcode = es.LLVM_OBJ
    llvmopt = es.LLVM_OPT
    libmackeopt = es.LIB_MACKEOPT
    aflcovered = path.join(outdir, "afl_out/covered_functions.txt")
    kleecovered = path.join(outdir, "klee_out/covered_funcs.txt")
    #kleecovered = path.dirname(es.LLVM_OBJ)+"/covered_funcs.txt"
    #aflcovered = path.dirname(es.AFL_BINARY)+"/"+es.AFL_RESULTS_FOLDER+"/covered_functions.txt"

    if not bitcode.endswith(".bc"):
        print("ERROR: KLEE compiled file should have a .bc extension")
    if not path.isfile(llvmopt):
        print("ERROR: llvmopt loader not found: %s" % llvmopt)
    if not path.isfile(libmackeopt):
        print("ERROR: Macke optllvm library not found: %s" % libmackeopt)
    if not outputtxt.endswith(".txt"):
        print("ERROR: Output textfile should have .txt extension: %s" %
              outputtxt)
        output_to_file = False
    elif not path.isdir(path.dirname(outputtxt)):
        print("ERROR: Output textfile path does not exist: %s" % outputtxt)
        output_to_file = False
    else:
        output_to_file = True

    if not path.isfile(kleecovered) and kleecovered != "None":
        print("ERROR: wrong KLEE coverage file: %s\nGenerating now..." %
              kleecovered)
        read_KLEE_coverage.main(bitcode, verbose=False, store=True)
    if not path.isfile(aflcovered) and aflcovered != "None":
        print("ERROR: wrong AFL coverage file: %s\nGenerating now..." %
              aflcovered)
        fuzz_with_afl.run_afl_cov(
            es.AFL_BINARY,
            path.dirname(es.AFL_BINARY) + "/" + es.AFL_RESULTS_FOLDER,
            es.GCOV_DIR)

    callgraph = json.loads(
        subprocess.check_output([
            llvmopt, "-load", libmackeopt, "-extractcallgraph", bitcode,
            "-disable-output"
        ]).decode("utf-8"))
    all_funcs, distancedict = calc_distance_to_main(callgraph)

    print("Total functions discovered in connected graph: %d" %
          (len(all_funcs)))
    if kleecovered == "None":
        kleecovered_f = None
    else:
        kleecovered_f = kleecovered
    if aflcovered == "None":
        aflcovered_f = None
    else:
        aflcovered_f = aflcovered

    covered = read_coverage(kleecovered_f, aflcovered_f)

    depthdict = {}
    covered_funcs_connected = []
    for d in distancedict.keys():
        depth = distancedict[d]
        if depth == -1:
            continue
        if depth not in depthdict.keys():
            depthdict[depth] = [[], []]
        if d in covered:
            covered_funcs_connected.append(d)
            if d not in depthdict[depth][0]:
                depthdict[depth][0].append(d)
        else:
            if d not in depthdict[depth][1]:
                depthdict[depth][1].append(d)

    print("Total functions covered in connected graph: %d" %
          (len(covered_funcs_connected)))
    print("Covered   |     Total")
    for d in depthdict.keys():
        print("%10d|%10d" % (len(
            depthdict[d][0]), len(depthdict[d][0]) + len(depthdict[d][1])))

    if output_to_file:
        outfile = open(outputtxt, "w+")
        for c in covered_funcs_connected:
            outfile.write("%s\n" % (c))
Esempio n. 42
0
# TODO:
# * Implement CONFIG_SDL to be able to compile without needing SDL at all.
# * Currently, it only supports text subtitles - bitmap subtitles are ignored.
#   Unless one uses a filter to overlay the subtitle.
# * We can not yet visualize audio to video. Provide a filter chain link between
#   audio to video filters to acomplish this.

dep_bins = []
'''A list of paths to the binaries used by the library. It can be used during
packaging for including required binaries.

It is read only.
'''

_ffmpeg = join(sys.prefix, 'share', 'ffpyplayer', 'ffmpeg', 'bin')
if isdir(_ffmpeg):
    if hasattr(os, 'add_dll_directory'):
        os.add_dll_directory(_ffmpeg)
    else:
        os.environ["PATH"] += os.pathsep + _ffmpeg
    dep_bins.append(_ffmpeg)

_sdl = join(sys.prefix, 'share', 'ffpyplayer', 'sdl', 'bin')
if isdir(_sdl):
    if hasattr(os, 'add_dll_directory'):
        os.add_dll_directory(_sdl)
    else:
        os.environ["PATH"] += os.pathsep + _sdl
    dep_bins.append(_sdl)

if 'SDL_AUDIODRIVER' not in os.environ and platform.system() == 'Windows':
Esempio n. 43
0
def data_path(dataset='evoked', path=None, force_update=False,
              update_path=True, verbose=None):
    u"""Get path to local copy of the high frequency SEF dataset.

    Gets a local copy of the high frequency SEF MEG dataset [1]_.

    Parameters
    ----------
    dataset : 'evoked' | 'raw'
        Whether to get the main dataset (evoked, structural and the rest) or
        the separate dataset containing raw MEG data only.
    path : None | str
        Where to look for the HF-SEF data storing location.
        If None, the environment variable or config parameter
        ``MNE_DATASETS_HF_SEF_PATH`` is used. If it doesn't exist, the
        "~/mne_data" directory is used. If the HF-SEF dataset
        is not found under the given path, the data
        will be automatically downloaded to the specified folder.
    force_update : bool
        Force update of the dataset even if a local copy exists.
    update_path : bool | None
        If True, set the MNE_DATASETS_HF_SEF_PATH in mne-python
        config to the given path. If None, the user is prompted.
    %(verbose)s

    Returns
    -------
    path : str
        Local path to the directory where the HF-SEF data is stored.

    References
    ----------
    .. [1] Nurminen, J., Paananen, H., Mäkelä, J. (2017): High frequency
           somatosensory MEG dataset. https://doi.org/10.5281/zenodo.889234
    """
    key = 'MNE_DATASETS_HF_SEF_PATH'
    name = 'HF_SEF'
    path = _get_path(path, key, name)
    destdir = op.join(path, 'HF_SEF')

    urls = {'evoked':
            'https://zenodo.org/record/3523071/files/hf_sef_evoked.tar.gz',
            'raw':
            'https://zenodo.org/record/889296/files/hf_sef_raw.tar.gz'}
    hashes = {'evoked': '13d34cb5db584e00868677d8fb0aab2b',
              'raw': '33934351e558542bafa9b262ac071168'}
    _check_option('dataset', dataset, sorted(urls.keys()))
    url = urls[dataset]
    hash_ = hashes[dataset]
    fn = url.split('/')[-1]  # pick the filename from the url
    archive = op.join(destdir, fn)

    # check for existence of evoked and raw sets
    has = dict()
    subjdir = op.join(destdir, 'subjects')
    megdir_a = op.join(destdir, 'MEG', 'subject_a')
    has['evoked'] = op.isdir(destdir) and op.isdir(subjdir)
    has['raw'] = op.isdir(megdir_a) and any(['raw' in fn_ for fn_ in
                                             os.listdir(megdir_a)])

    if not has[dataset] or force_update:
        if not op.isdir(destdir):
            os.mkdir(destdir)
        _fetch_file(url, archive, hash_=hash_)

        with tarfile.open(archive) as tar:
            logger.info('Decompressing %s' % archive)
            for member in tar.getmembers():
                # strip the leading dirname 'hf_sef/' from the archive paths
                # this should be fixed when making next version of archives
                member.name = member.name[7:]
                try:
                    tar.extract(member, destdir)
                except IOError:
                    # check whether file exists but could not be overwritten
                    fn_full = op.join(destdir, member.name)
                    if op.isfile(fn_full):
                        os.remove(fn_full)
                        tar.extract(member, destdir)
                    else:  # some more sinister cause for IOError
                        raise

        os.remove(archive)

    _do_path_update(path, update_path, key, name)
    return destdir
Esempio n. 44
0
def walk_dir(dirname,
             recursive,
             files,
             excluded_files,
             excluded_extensions,
             get_cluster_item_key,
             get_md5=True,
             whoami='master'):
    walk_files = {}

    try:
        entries = listdir(common.ossec_path + dirname)
    except OSError as e:
        raise WazuhException(3015, str(e))

    for entry in entries:
        if entry in excluded_files or reduce(
                add, map(lambda x: entry[-(len(x)):] == x,
                         excluded_extensions)):
            continue

        try:
            full_path = path.join(dirname, entry)
            if entry in files or files == ["all"]:

                if not path.isdir(common.ossec_path + full_path):
                    file_mod_time = datetime.utcfromtimestamp(
                        stat(common.ossec_path + full_path).st_mtime)

                    if whoami == 'worker' and file_mod_time < (
                            datetime.utcnow() - timedelta(minutes=30)):
                        continue

                    entry_metadata = {
                        "mod_time": str(file_mod_time),
                        'cluster_item_key': get_cluster_item_key
                    }
                    if '.merged' in entry:
                        entry_metadata['merged'] = True
                        entry_metadata[
                            'merge_type'] = 'agent-info' if 'agent-info' in entry else 'agent-groups'
                        entry_metadata['merge_name'] = dirname + '/' + entry
                    else:
                        entry_metadata['merged'] = False

                    if get_md5:
                        entry_metadata['md5'] = md5(common.ossec_path +
                                                    full_path)

                    walk_files[full_path] = entry_metadata

            if recursive and path.isdir(common.ossec_path + full_path):
                walk_files.update(
                    walk_dir(full_path, recursive, files, excluded_files,
                             excluded_extensions, get_cluster_item_key,
                             get_md5, whoami))

        except Exception as e:
            logger.error("Could not get checksum of file {}: {}".format(
                entry, e))

    return walk_files
Esempio n. 45
0
def replace_line(line):
    new_line = ''
    for i in range(0, len(line)):
        if line[i] == '\t':
            new_line += '    '
        else:
            new_line += line[i]
    return new_line

def write_file(path_to_file, data):
    with open(path_to_file, 'w') as f:
        f.write('')
        f.write(data)


if __name__ == '__main__':
    if len(sys.argv) < 2:
        print('Usage: ./tabs2spaces.py [filename] [filename2] [...]')
        exit(1)
    for arg in sys.argv:
        filename = arg
        if dir.isdir(filename):
            continue
        data = read_file(filename)
        lines = data.split('\n')
        buffer = ''
        for line in lines:
            line = replace_line(line)
            buffer += line + '\n'
        write_file(filename, buffer)
    def upload_dir(self, path):

        if self.API_KEY is None:
            logging.error('In order to upload files to unpac.me, you must have a valid API key configured.')
            return None

        if isfile(path):
            logging.error('The path specified appears to be a file and not a directory.')
            return None
        elif not isdir(path):
            logging.error('The directory specified does not exist.')
            return None

        files = [f for f in listdir(path) if isfile(join(path, f))]
        remaining_quota = self.get_remaining_quota()

        if len(files) > remaining_quota:
            logging.error(f'Insufficient quota: You are attempting to upload {len(files)} '
                          f'files but you only have {remaining_quota} private API requests remaining. '
                          f'Please remove {len(files) - remaining_quota} files from the directory '
                          f'before trying again.')
            return None

        results = list()

        for file in files:
            full_path_and_filename = join(path, file)

            with open(full_path_and_filename, "rb") as f:
                file_data = f.read()

            file_size = len(file_data)

            record = {'filename': file, 'filesize': file_size}

            auth_header = {'Authorization': f'Key {self.API_KEY}'}
            files = {'file': (basename(path), file_data)}

            response = None
            logging.info('Uploading %s' % path)

            try:
                response = requests.post(f'{self.__API_TARGET}/private/upload', files=files, headers=auth_header)
                response.raise_for_status()
            except requests.exceptions.HTTPError as err:
                logging.error(f'File upload failed with the following error: {err}.')
                record['success'] = False
                record['msg'] = err
            except requests.exceptions.Timeout:
                logging.error('File upload timed out.')
                record['success'] = False
                record['msg'] = 'timed out'
            except requests.exceptions.RequestException as err:
                logging.error(f'File upload failed with the following error: {err}.')
                record['success'] = False
                record['msg'] = err

            if 'id' not in response.json():
                logging.error('The upload appears to have failed as an ID value was not returned from the server.')
                record['success'] = False
                record['msg'] = 'no id'
            else:
                submission_id = response.json()['id']
                logging.debug(f'Upload succeeded. The submission ID is {submission_id}.')
                record['success'] = True
                record['id'] = submission_id

            results.append(record)

            if len(results) < len(files):
                sleep(self.RATE_LIMIT)

        return results
Esempio n. 47
0
    def __init__(self, srcdir, confdir, outdir, doctreedir, buildername,
                 confoverrides=None, status=sys.stdout, warning=sys.stderr,
                 freshenv=False, warningiserror=False, tags=None, verbosity=0,
                 parallel=0):
        self.verbosity = verbosity
        self.next_listener_id = 0
        self._extensions = {}
        self._extension_metadata = {}
        self._listeners = {}
        self.domains = BUILTIN_DOMAINS.copy()
        self.buildername = buildername
        self.builderclasses = BUILTIN_BUILDERS.copy()
        self.builder = None
        self.env = None

        self.srcdir = srcdir
        self.confdir = confdir
        self.outdir = outdir
        self.doctreedir = doctreedir

        self.parallel = parallel

        if status is None:
            self._status = cStringIO()
            self.quiet = True
        else:
            self._status = status
            self.quiet = False

        if warning is None:
            self._warning = cStringIO()
        else:
            self._warning = warning
        self._warncount = 0
        self.warningiserror = warningiserror

        self._events = events.copy()
        self._translators = {}

        # keep last few messages for traceback
        self.messagelog = deque(maxlen=10)

        # say hello to the world
        self.info(bold('Running Sphinx v%s' % sphinx.__display_version__))

        # status code for command-line application
        self.statuscode = 0

        if not path.isdir(outdir):
            self.info('making output directory...')
            os.makedirs(outdir)

        # read config
        self.tags = Tags(tags)
        self.config = Config(confdir, CONFIG_FILENAME,
                             confoverrides or {}, self.tags)
        self.config.check_unicode(self.warn)
        # defer checking types until i18n has been initialized

        # set confdir to srcdir if -C given (!= no confdir); a few pieces
        # of code expect a confdir to be set
        if self.confdir is None:
            self.confdir = self.srcdir

        # extension loading support for alabaster theme
        # self.config.html_theme is not set from conf.py at here
        # for now, sphinx always load a 'alabaster' extension.
        if 'alabaster' not in self.config.extensions:
            self.config.extensions.append('alabaster')

        # load all user-given extension modules
        for extension in self.config.extensions:
            self.setup_extension(extension)
        # the config file itself can be an extension
        if self.config.setup:
            # py31 doesn't have 'callable' function for below check
            if hasattr(self.config.setup, '__call__'):
                self.config.setup(self)
            else:
                raise ConfigError(
                    "'setup' that is specified in the conf.py has not been " +
                    "callable. Please provide a callable `setup` function " +
                    "in order to behave as a sphinx extension conf.py itself."
                )

        # now that we know all config values, collect them from conf.py
        self.config.init_values(self.warn)

        # check the Sphinx version if requested
        if self.config.needs_sphinx and \
           self.config.needs_sphinx > sphinx.__display_version__[:3]:
            raise VersionRequirementError(
                'This project needs at least Sphinx v%s and therefore cannot '
                'be built with this version.' % self.config.needs_sphinx)

        # check extension versions if requested
        if self.config.needs_extensions:
            for extname, needs_ver in self.config.needs_extensions.items():
                if extname not in self._extensions:
                    self.warn('needs_extensions config value specifies a '
                              'version requirement for extension %s, but it is '
                              'not loaded' % extname)
                    continue
                has_ver = self._extension_metadata[extname]['version']
                if has_ver == 'unknown version' or needs_ver > has_ver:
                    raise VersionRequirementError(
                        'This project needs the extension %s at least in '
                        'version %s and therefore cannot be built with the '
                        'loaded version (%s).' % (extname, needs_ver, has_ver))

        # set up translation infrastructure
        self._init_i18n()
        # check all configuration values for permissible types
        self.config.check_types(self.warn)
        # set up the build environment
        self._init_env(freshenv)
        # set up the builder
        self._init_builder(self.buildername)
Esempio n. 48
0
    def run(self):
        if not self.opts.actions:
            self.error("No action defined")

        dirs=[]
        if self.opts.openfoam and "WM_PROJECT_DIR" in environ:
            dirs.append(environ["WM_PROJECT_DIR"])
        dirs+=self.parser.getArgs()
        if self.opts.currentDir:
            dirs.append(path.curdir)

        fullDirs=[]
        for d in dirs:
            if path.isdir(d):
                fullDirs.append(path.abspath(d))

        info=dict(list(zip(fullDirs,[{} for i in range(len(fullDirs))])))

        for d in fullDirs:
            info[d]["writable"]=os.access(d,os.W_OK)

            info[d]["isFoam"]=(d==fullDirs[0] and self.opts.openfoam)

            info[d]["vcs"]=whichVCS(d)

            if path.exists(path.join(d,"Allwmake")):
                info[d]["make"]="Allwmake"
            elif path.exists(path.join(d,"Makefile")):
                info[d]["make"]="make"
            else:
                info[d]["make"]="wmake"

            if info[d]["vcs"]=="":
                info[d]["branch"]="unknown"
            else:
                vcs=getVCS(info[d]["vcs"],
                           d,
                           tolerant=True)
                if vcs:
                    try:
                        info[d]["branch"]=vcs.branchName()
                    except FatalErrorPyFoamException:
                        info[d]["branch"]="notImplemented"

                    try:
                        info[d]["revision"]=vcs.getRevision()
                    except FatalErrorPyFoamException:
                        info[d]["revision"]="notImplemented"
                else:
                    info[d]["branch"]="noVCS"
                    info[d]["revision"]="noVCS"

        for action in self.opts.actions:
            if action=="info":
                print_("Project directories:\n")
                for i,d in enumerate(fullDirs):
                    print_("%2d.  %s" % (i+1,d))
                    print_("    ",info[d])
                    print_()

                self.setData({'order' : fullDirs,
                              'info'  : info})
            elif action=="name":
                name=""
                if self.opts.openfoam:
                    name+="%s-%s_%s_%s_%s" % (environ["WM_PROJECT"],
                                              environ["WM_PROJECT_VERSION"],
                                              environ["WM_ARCH"],
                                              environ["WM_OPTIONS"],
                                              environ["WM_MPLIB"])
                else:
                    name+="%s_%s" % (uname()[0],
                                     uname()[-1])
                name += "_branch-%s" % info[fullDirs[-1]]["branch"]

                print_(name)
                self.setData({'name'   : name,
                              'info'   : info,
                              'order'  : fullDirs})
            elif action=="update":
                success=True
                for d in fullDirs:
                    if info[d]["writable"]:
                        print_("Attempting to update",d)
                        print_()
                        vcs=getVCS(info[d]["vcs"],
                                   d,
                                   tolerant=True)
                        if vcs:
                            try:
                                if not vcs.update(timeout=self.opts.timeout):
                                    success=False
                            except FatalErrorPyFoamException:
                                e = sys.exc_info()[1] # Needed because python 2.5 does not support 'as e'
                                print_("Problem:",e)
                                success=False
                        else:
                            print_("Not under version control ... skipping")
                            success=False
                    else:
                        print_(d,"not writable .... skipping update")

                    print_()
                if not success:
                    self.error("Problem during updating")
            elif action=="build":
                success=True
                oldDir=os.getcwd()

                for d in fullDirs:
                    if info[d]["writable"]:
                        print_("Attempting to build",d)
                        print_()
                        makeCommand={"make"    :["make"],
                                     "wmake"   :["wmake"],
                                     "Allwmake":["./Allwmake"]}[info[d]["make"]]

                        print_("Changing to",d,"and executing"," ".join(makeCommand))
                        print_()
                        os.chdir(d)
                        erg=subprocess.call(makeCommand)
                        if erg:
                            print_()
                            print_("Result of build command:",erg)
                            success=False
                    else:
                        print_(d,"not writable .... skipping build")

                    print_()

                os.chdir(oldDir)

                if not success:
                    self.error("Problem during building")

            else:
                self.error("Unimplemented action",action)
def main(model_config, train_config, track_config):
    os.environ['CUDA_VISIBLE_DEVICES'] = auto_select_gpu()

    # Create training directory which will be used to save: configurations, model files, TensorBoard logs
    train_dir = train_config['train_dir']
    if not osp.isdir(train_dir):
        logging.info('Creating training directory: %s', train_dir)
        mkdir_p(train_dir)

    g = tf.Graph()
    with g.as_default():
        # Set fixed seed for reproducible experiments
        random.seed(train_config['seed'])
        np.random.seed(train_config['seed'])
        tf.set_random_seed(train_config['seed'])

        # Build the training and validation model
        model = siamese_model.SiameseModel(model_config,
                                           train_config,
                                           mode='train')
        model.build()
        model_va = siamese_model.SiameseModel(model_config,
                                              train_config,
                                              mode='validation')
        model_va.build(reuse=True)

        # Save configurations for future reference
        save_cfgs(train_dir, model_config, train_config, track_config)

        learning_rate = _configure_learning_rate(train_config,
                                                 model.global_step)
        optimizer = _configure_optimizer(train_config, learning_rate)
        tf.summary.scalar('learning_rate', learning_rate)

        # Set up the training ops
        opt_op = tf.contrib.layers.optimize_loss(
            loss=model.total_loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=optimizer,
            clip_gradients=train_config['clip_gradients'],
            learning_rate_decay_fn=None,
            summaries=['learning_rate'])

        with tf.control_dependencies([opt_op]):
            train_op = tf.no_op(name='train')

        saver = tf.train.Saver(
            tf.global_variables(),
            max_to_keep=train_config['max_checkpoints_to_keep'])

        summary_writer = tf.summary.FileWriter(train_dir, g)
        summary_op = tf.summary.merge_all()

        global_variables_init_op = tf.global_variables_initializer()
        local_variables_init_op = tf.local_variables_initializer()
        g.finalize()  # Finalize graph to avoid adding ops by mistake

        # Dynamically allocate GPU memory
        gpu_options = tf.GPUOptions(allow_growth=True)
        sess_config = tf.ConfigProto(gpu_options=gpu_options)

        sess = tf.Session(config=sess_config)
        model_path = tf.train.latest_checkpoint(train_config['train_dir'])

        if not model_path:
            sess.run(global_variables_init_op)
            sess.run(local_variables_init_op)
            start_step = 0

            if model_config['embed_config']['embedding_checkpoint_file']:
                model.init_fn(sess)
        else:
            logging.info('Restore from last checkpoint: {}'.format(model_path))
            sess.run(local_variables_init_op)
            saver.restore(sess, model_path)
            start_step = tf.train.global_step(sess, model.global_step.name) + 1

        # Training loop
        data_config = train_config['train_data_config']
        total_steps = int(data_config['epoch'] *
                          data_config['num_examples_per_epoch'] /
                          data_config['batch_size'])
        logging.info('Train for {} steps'.format(total_steps))
        for step in range(start_step, total_steps):
            start_time = time.time()
            _, loss, batch_loss = sess.run(
                [train_op, model.total_loss, model.batch_loss])
            duration = time.time() - start_time

            if step % 10 == 0:
                examples_per_sec = data_config['batch_size'] / float(duration)
                time_remain = data_config['batch_size'] * (
                    total_steps - step) / examples_per_sec
                m, s = divmod(time_remain, 60)
                h, m = divmod(m, 60)
                format_str = (
                    '%s: step %d, total loss = %.2f, batch loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch; %dh:%02dm:%02ds remains)')
                logging.info(format_str %
                             (datetime.now(), step, loss, batch_loss,
                              examples_per_sec, duration, h, m, s))

            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            if step % train_config['save_model_every_n_step'] == 0 or (
                    step + 1) == total_steps:
                checkpoint_path = osp.join(train_config['train_dir'],
                                           'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
 def setup(self):
     skills_dir = self.rt.paths.skills
     if isdir(skills_dir) and not isdir(join(skills_dir, '.git')):
         call(['mv', skills_dir, join(dirname(skills_dir), 'skills-old')])
     self.create_git_repo().try_pull()
Esempio n. 51
0
# parser.add_argument('--dataset', help='root folder of dataset', default='dta/HED-BSD')
parser.add_argument('--band_mode', help='weather using band of normal gt', type=bool, default=True)
parser.add_argument('--save_mid_result', help='weather save mid result', type=bool, default=False)
args = parser.parse_args()

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

""""""""""""""""""""""""""""""
"          路径               "
""""""""""""""""""""""""""""""
model_save_dir = abspath(dirname(__file__))
model_save_dir = join(model_save_dir, args.model_save_dir)

if not isdir(model_save_dir):
    os.makedirs(model_save_dir)

""""""""""""""""""""""""""""""
"    ↓↓↓↓需要修改的参数↓↓↓↓     "
""""""""""""""""""""""""""""""

# tensorboard 使用

writer = SummaryWriter(
    '../runs/' + name)
email_header = 'Python'
output_name_file_name = name + '_checkpoint%d-two_stage-%f-f1%f-precision%f-acc%f-recall%f.pth'
""""""""""""""""""""""""""""""
"    ↑↑↑↑需要修改的参数↑↑↑↑     "
""""""""""""""""""""""""""""""
Esempio n. 52
0
=============================================================================
=============================================================================
[ERROR]
[ERROR] Failed to import the python module sklearn (scikit-learn)
[ERROR] Some analyses (kernelPCA plots, adjusted_mutual_information) will fail
[ERROR] Take a look at http://scikit-learn.org/stable/install.html
[ERROR]
=============================================================================
=============================================================================
"""
    #exit() ## not exiting since most stuff will probably still work...

## setup the
print 'Making the ./external/ directory'
external_dir = 'external/'
if not isdir(external_dir):
    mkdir(external_dir)

chdir(external_dir)

## download blast
blastdir = './blast-2.2.16'

if not isdir(blastdir):
    if mac_osx:
        address = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.16/blast-2.2.16-universal-macosx.tar.gz'
    else:
        address = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.16/blast-2.2.16-x64-linux.tar.gz'

    tarfile = address.split('/')[-1]
    if not exists(tarfile):
Esempio n. 53
0
 def save_annotator(self, data=True):
     """ Save annotator to annotation directory. """
     if not isdir(self.annotator_path):
         mkdir(self.annotator_path)
     self.annotator.save(self.annotator_path, data=data)
kinds of creative coding, interactive objects, spaces or physical experiences.

http://arduino.cc/en/Reference/HomePage
"""

# Extends: https://github.com/platformio/platform-espressif32/blob/develop/builder/main.py

from os.path import isdir, join

from SCons.Script import DefaultEnvironment

env = DefaultEnvironment()
platform = env.PioPlatform()

FRAMEWORK_DIR = platform.get_package_dir("framework-arduinoespressif32")
assert isdir(FRAMEWORK_DIR)

env.Prepend(
    CPPDEFINES=[
        ("ARDUINO", 10805),
        "ARDUINO_ARCH_ESP32",
        ("ARDUINO_BOARD", '\\"%s\\"' % env.BoardConfig().get("name").replace('"', ""))
    ],

    CFLAGS=["-Wno-old-style-declaration"],

    CCFLAGS=[
        "-Wno-error=deprecated-declarations",
        "-Wno-error=unused-function",
        "-Wno-unused-parameter",
        "-Wno-sign-compare",
def detect_traffic_lights(PATH_TO_TEST_IMAGES_DIR, MODEL_NAME, Num_images, plot_flag=False):
    """
    Detect traffic lights and draw bounding boxes around the traffic lights
    :param PATH_TO_TEST_IMAGES_DIR: testing image directory
    :param MODEL_NAME: name of the model used in the task
    :return: commands: True: go, False: stop
    """

    #--------test images------
    TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'img_{}.jpg'.format(i)) for i in range(1, Num_images+1) ]


    commands = []

    # What model to download
    MODEL_FILE = MODEL_NAME + '.tar.gz'
    DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'

    # Path to frozen detection graph. This is the actual model that is used for the object detection.
    PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'

    # List of the strings that is used to add correct label for each box.
    PATH_TO_LABELS = 'mscoco_label_map.pbtxt'

    # number of classes for COCO dataset
    NUM_CLASSES = 90


    #--------Download model----------
    if path.isdir(MODEL_NAME) is False:
        opener = urllib.request.URLopener()
        opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
        tar_file = tarfile.open(MODEL_FILE)
        for file in tar_file.getmembers():
          file_name = os.path.basename(file.name)
          if 'frozen_inference_graph.pb' in file_name:
            tar_file.extract(file, os.getcwd())

    #--------Load a (frozen) Tensorflow model into memory
    detection_graph = tf.Graph()
    with detection_graph.as_default():
      od_graph_def = tf.GraphDef()
      with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')


    #----------Loading label map
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map,
                                                                max_num_classes=NUM_CLASSES,
                                                                use_display_name=True)
    category_index = label_map_util.create_category_index(categories)


    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')

            for image_path in TEST_IMAGE_PATHS:
                image = Image.open(image_path)

                # the array based representation of the image will be used later in order to prepare the
                # result image with boxes and labels on it.
                image_np = load_image_into_numpy_array(image)
                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(image_np, axis=0)
                # Actual detection.
                (boxes, scores, classes, num) = sess.run(
                  [detection_boxes, detection_scores, detection_classes, num_detections],
                  feed_dict={image_tensor: image_np_expanded})


                red_flag = read_traffic_lights(image, np.squeeze(boxes), np.squeeze(scores), np.squeeze(classes).astype(np.int32))
                if red_flag:
                    plot_origin_image(image_np, boxes, classes, scores, category_index)
                    print('{}: stop'.format(image_path))  # red or yellow
                    commands.append(False)
                else:
                    plot_origin_image(image_np, boxes, classes, scores, category_index)
                    print('{}: go'.format(image_path))
                    commands.append(True)

                # Visualization of the results of a detection.
                if plot_flag:
                    plot_origin_image(image_np, boxes, classes, scores, category_index)

    return commands
Esempio n. 56
0
def download_combined_database(out_dir, overwrite=False):
    """
    Download coordinates/annotations from brainspell and images/annotations
    from Neurovault.

    Currently, the largest barrier is the lack of links between experiments
    (tables) in brainspell/NeuroSynth and those in NeuroVault. The closest we
    have is overall papers, via DOIs.

    Additional problems:
    -   Does NeuroVault have standard error maps?
        -   If so, I doubt there's any way to associate a given SE map and beta
            map within a collection.
    -   How should space be handled?
        -   Should everything be resliced and transformed to the same space at
            this stage or later on?
        -   How can we link a target template (for images) to a target space
            (for coordinates)?
        -   Should we even allow custom targets? Maybe we just limit it to 2mm
            and 1mm MNI templates.

    Parameters
    ----------
    out_dir : :obj:`str`
        Folder in which to write out Dataset object and subfolders containing
        images.
    overwrite: :obj:`bool`, optional
        Whether to overwrite existing database, if one exists in `out_dir`.
        Defaults to False.
    """
    # Download collections metadata from Neurovault
    collections_file = join(out_dir, 'neurovault_collections.csv')
    if overwrite or not isfile(collections_file):
        colls_df = api.get_collections()
        colls_df.to_csv(collections_file, index=False, encoding='utf-8')
    else:
        colls_df = pd.read_csv(collections_file, encoding='utf-8')

    # Only include collections from published papers (or preprints)
    papers_file = join(out_dir, 'neurovault_papers.csv')
    if overwrite or not isfile(papers_file):
        paper_df = colls_df.dropna(subset=['DOI'])
        paper_df.to_csv(papers_file, index=False, encoding='utf-8')
    else:
        paper_df = pd.read_csv(papers_file, encoding='utf-8')

    # Get metadata for individual images from valid collections
    papers_metadata_file = join(out_dir, 'neurovault_papers_metadata.csv')
    if overwrite or not isfile(papers_metadata_file):
        valid_collections = sorted(paper_df['collection_id'].tolist())

        # Sleep between get_images calls to avoid spamming Neurovault
        image_dfs = []
        for chunk in to_chunks(valid_collections, 500):
            image_dfs.append(api.get_images(collection_pks=chunk))
            time.sleep(10)

        image_df = pd.concat(image_dfs)
        image_df.to_csv(papers_metadata_file, index=False, encoding='utf-8')
    else:
        image_df = pd.read_csv(papers_metadata_file, encoding='utf-8')

    # Reduce images database according to additional criteria
    # Only keep unthresholded, MNI, group level fMRI maps
    red_df = image_df.loc[image_df['modality'] == 'fMRI-BOLD']
    red_df = red_df.loc[red_df['image_type'] == 'statistic_map']
    red_df = red_df.loc[red_df['analysis_level'] == 'group']
    red_df = red_df.loc[red_df['is_thresholded'] is False]
    red_df = red_df.loc[red_df['not_mni'] is False]

    # Look for relevant metadata
    red_df = red_df.dropna(subset=['cognitive_paradigm_cogatlas'])

    ## MFX/FFX GLMs need contrast (beta) + standard error
    mffx_df = red_df.loc[red_df['map_type'] == 'univariate-beta map']

    ## RFX GLMs need contrast (beta)
    rfx_df = red_df.loc[red_df['map_type'] == 'univariate-beta map']

    ## Stouffer's, Stouffer's RFX, and Fisher's IBMAs can use Z maps.
    # T and F maps can be transformed into Z maps, but T maps need sample size.
    # Only keep test statistic maps
    acc_map_types = ['Z map', 'T map', 'F map']
    st_df = red_df.loc[red_df['map_type'].isin(acc_map_types)]
    keep_idx = st_df['map_type'].isin(['Z map', 'F map'])
    keep_idx2 = (st_df['map_type'] == 'T map') & ~pd.isnull(st_df['number_of_subjects'])
    keep_idx = keep_idx | keep_idx2
    st_df = st_df.loc[keep_idx]

    ## Weighted Stouffer's IBMAs need Z + sample size.
    st_df['id_str'] = st_df['image_id'].astype(str).str.zfill(6)

    if not isdir(out_dir):
        mkdir(out_dir)
        api.download_images(out_dir, red_df, target=None, resample=False)
    elif overwrite:
        # clear out out_dir
        raise Exception('Currently not prepared to overwrite database.')
        api.download_images(out_dir, red_df, target=None, resample=False)
Esempio n. 57
0
def main():
    parser = parse_input()
    # Parse inputs
    args = parser.parse_args()
    # Test inputs
    # Logger
    level = args.level * 10
    if 0 > level > 50:
        level = 50
    if args.log:
        basicConfig(filename=path.abspath(args.log.name),
                    level=level,
                    format='%(asctime)s %(levelname)s:%(message)s')
    else:
        basicConfig(level=level,
                    format='%(asctime)s %(levelname)s:%(message)s')
    p = path.abspath(args.dir)
    if path.isdir(p):
        g_path = p
    else:
        warning("Invalid folder:", p)
        parser.print_help()
        return
    # Set hashes
    users_set_hashes = []
    for x in args.hash.replace(", ",
                               "\n").replace(",",
                                             "\n").replace(" ",
                                                           "\n").split("\n"):
        if x in SUPPORTED_HASHES:
            users_set_hashes.append(x)
    if len(users_set_hashes) > 0:
        hashes_types = users_set_hashes
    else:
        warning("Hash not recognized", args.hash)
        return
    # Prepare output and communication
    manager = Manager()
    q_files = manager.Queue()
    q_output = manager.Queue()
    dir_info = manager.Queue()
    # Prepare process
    info("*" * 50)
    info("Starting with:")
    info("Output file:" + str(path.abspath(args.output_file.name)))
    info("Path: " + str(g_path))
    info("Used hashes:" + ", ".join(hashes_types))
    info("Prepare workers")
    number_of_cpu = cpu_count()
    if 0 < args.w < number_of_cpu:
        number_of_worker = args.w
    else:
        number_of_worker = number_of_cpu
    P_WORKERS = [
        HashFoundFiles(q_files, q_output, hashes_types)
        for _ in range(number_of_worker)
    ]
    info("Num of workers: " + str(len(P_WORKERS)))
    P_WRITER = WriterOut(path.abspath(args.output_file.name), q_output,
                         hashes_types)
    P_WALKER = Walker(g_path, q_output, q_files, dir_info)
    info("*" * 50)
    info("Start workers")
    # Start process
    try:
        P_WRITER.start()
        P_WRITER.join(0.1)
        P_WALKER.start()
        [x.start() for x in P_WORKERS]
        [x.join(0.1) for x in P_WORKERS]
    except BaseException as e3:
        print("Start Workers error:")
        print(e3)
        print(exc_info())
        print(print_exc())
    # Start walking
    try:
        while P_WALKER.is_alive():
            sleep(0.1)
    except KeyboardInterrupt:
        warning("Keyboard interrupt")
        warning("Start killing")
        P_WALKER.kill()
        [x.kill() for x in P_WORKERS]
        while P_WALKER.is_alive():
            sleep(0.1)
        while q_files.qsize() > 0:
            sleep(0.1)
    except Exception as e2:
        warning(str(e2))
        warning(print_stack())
        return
    info("Walker finished")
    P_WALKER.join()
    # Finish walking and send end signals
    sleep(1)
    info("Send end signal to workers")
    [q_files.put(False) for _ in P_WORKERS]
    # Wait for workers

    while True:
        if not any([x.is_alive() for x in P_WORKERS]):
            break
        sleep(1)

    [x.join() for x in P_WORKERS]
    info("Workers ended")
    # Finish rest
    q_output.put(False)
    info("Finish writings")
    P_WRITER.join()
    info("End")
    if 'norm' in first_path:
        for img in os.listdir(first_path):
            img_path.append(osp.join(first_path, img))
            label.append('norm')
    else:
        for second_path in os.listdir(first_path):
            defect_label = second_path
            second_path = osp.join(first_path, second_path)
            if defect_label != 'defect11':
                for img in os.listdir(second_path):
                    img_path.append(osp.join(second_path, img))
                    label.append(defect_label)
            else:
                for third_path in os.listdir(second_path):
                    third_path = osp.join(second_path, third_path)
                    if osp.isdir(third_path):
                        for img in os.listdir(third_path):
                            if 'DS_Store' not in img:
                                img_path.append(osp.join(third_path, img))
                                label.append(defect_label)

label_file = pd.DataFrame({'img_path': img_path, 'label': label})
label_file['label'] = label_file['label'].map(label_warp)

label_file.to_csv('data/label.csv', index=False)

# test data
test_data_path = 'data/guangdong_round1_test_a_20180916'
all_test_img = os.listdir(test_data_path)
test_img_path = []
Esempio n. 59
0
def exec_process():
    if not isdir(consts.descargas):
        return {'ok': False, 'message': 'No existe la ruta con los archivos que se deben procesar'}, 500
    
    # Se listan los archivos comprimidos descargados
    archivos_comprimidos = read_file(consts.descargas, consts.extension_comprimidos, consts.nombre_comprimido_devoluciones)

    if len(archivos_comprimidos) > 0:
        for archivo_comprimido in archivos_comprimidos:
            # Se descomprimime el archivo
            archivo_zip = ZipFile(archivo_comprimido)
            try:
                archivo_zip.extractall(consts.descargas)
                # print('Extrayendo el archivo ' + archivo_comprimido)
            except Exception as e:
                print('Error al extraer el archivo ' + archivo_comprimido)
                print('Detalle del error' + str(e))
                pass
            archivo_zip.close()
            # Una vez descomprimido, se puede eliminar el archivo comprimido
            remove(archivo_comprimido)
            # print('Eliminando el archivo ' + archivo_comprimido)

    # Se vuelve a listar los archivos en busca de los .txt de las devoluciones
    archivos_txt = read_file(consts.descargas, consts.extension_txt, consts.nombre_txt)

    if len(archivos_txt) > 0:
        for archivo_txt in archivos_txt:
            # print('Archivo: ' + archivo_txt)
            # Lectura del archivo
            archivo_texto = open(archivo_txt, 'r')
            contenido = archivo_texto.read()
            archivo_texto.seek(0)
            # Verificamos si aún no ha sido leído el archivo previamente
            if search_file(archivo_txt.split(sep)[-1]):
                print('El archivo ' + archivo_txt.split(sep)[-1] + ' ya fue procesado')
            # Verificamos si el archivo corresponde a un archivo de devoluciones de empanadas Joselo
            elif contenido.count(consts.proveedor) > 0 and contenido.count(consts.codigo_proveedor) > 0:
                # Se lee línea por ĺínea para obtener los valores
                lineas = archivo_texto.readlines()
                numero_linea = 0
                linea_local = -99
                ciudad = ''
                local = ''
                fecha = ''
                numero_bandejas = 0
                for linea in lineas:
                    if linea.count(consts.etiqueta_local) == 1:
                        # El local está en la siguiente línea de etiquetaLocal = 'Tda/Alm/CDI:'
                        linea_local = numero_linea + 1
                    if numero_linea == linea_local:
                        # Se trata de la línea del local, el cual se encuentra desde el inicio de la línea
                        local = linea[0:linea.index(consts.codigo_proveedor)].strip()
                    if linea.count(consts.pais) == 1:
                        # Se trata de la línea donde se encuentra la ciudad ej. PORTOVIEJO - ECUADOR
                        ciudad = linea[0:linea.index(consts.pais) - 3].strip()
                    if linea.count(consts.etiqueta_fecha_elaboracion) == 1:
                        # Se trata de la línea en la que se encuentra la fecha
                        fecha = linea[linea.index(consts.etiqueta_fecha_elaboracion) + len(consts.etiqueta_fecha_elaboracion):len(
                            linea) - 1].strip()
                    if linea.count(consts.etiqueta_total) == 1:
                        # Se trata de la línea en la que se encuentra el total de la devolución
                        valor = float(linea[linea.index(consts.etiqueta_total) + len(consts.etiqueta_total):len(linea) - 1].strip())
                        # Se verifica que el valor sea múltipo de alguno de los precios de las bandejas
                        for precio in consts.precios:
                            if round((valor % precio), consts.decimales_a_considerar) == 0 or consts.precios.count(round((valor % precio), consts.decimales_a_considerar)) > 0:
                                numero_bandejas = int(valor / precio)

                    numero_linea = numero_linea + 1
                
                if numero_bandejas == 0:
                    print('El valor del archivo: ' + archivo_txt + ' no es correcto')
                else:
                    # Se ingresan los valores del archivo y el nombre del archivo leído a la base de datos
                    insert_filename(archivo_txt.split(sep)[-1])
                    # Se separa la fecha en día, mes y año
                    insert_devolucion(ciudad, local, int(fecha.split('/')[2]), mes(fecha.split('/')[1]), int(fecha.split('/')[0]), numero_bandejas)
                    print('Datos del archivo ' + archivo_txt.split(sep)[-1] + ':')
                    print(ciudad)
                    print(local)
                    print(fecha)
                    print(str(numero_bandejas) + ' bandejas\n')
            # Una vez obtenido los valores, se puede eliminar el archivo txt
            remove(archivo_txt)
            archivo_texto.close()
    return {'ok': True, 'message': 'Proceso finalizado correctamente', 'archivos_procesados': archivos_txt}
Esempio n. 60
0
File: utils.py Progetto: zclvip/node
 def IsSuite(path):
   return isdir(path) and exists(join(path, 'testcfg.py'))