def filter_text(self, text):
		text = super(GotoSelectionFileCommand, self).filter_text(text)
		# filter begining path until last ..
		path, filename = os.path.split(text)
		path = path.rpartition("..")[2]
		path = path.rpartition(".")[2]
		return os.path.join(path, filename)
    def cancel_nodes(self,
                     nodePaths=None,
                     statuses=("unknwon", ),
                     recursive=False):
        """
        Cancel the nodes whose status match the input 'statuses'.

        Inputs:
            nodePaths - A list of strings specifying the nodes to be canceled.
            statuses - An iterable of strings with the full names of the
                     statuses that are to be used as a match for finding nodes
                     to cancel.
            recursive - A boolean specifying if the cancel operation is to be
                        extended to each node's parent families.
        """

        if nodePaths is None:
            nodePaths = self.traverse_nodes(self.suiteNode.childNodes)[0]
        for nodePath in nodePaths:
            path = "/%s" % nodePath
            pathStatus = self.get_node_status(path)
            if pathStatus in statuses:
                self.cancel_node(path)
            if recursive:
                path = path.rpartition("/")[0]
                while path != "":
                    pathStatus = self.get_node_status(path)
                    if pathStatus in statuses:
                        self.cancel_node(path)
                    path = path.rpartition("/")[0]
    def cancel_nodes(self, nodePaths=None, statuses=("unknwon",), recursive=False):
        """
        Cancel the nodes whose status match the input 'statuses'.

        Inputs:
            nodePaths - A list of strings specifying the nodes to be canceled.
            statuses - An iterable of strings with the full names of the
                     statuses that are to be used as a match for finding nodes
                     to cancel.
            recursive - A boolean specifying if the cancel operation is to be
                        extended to each node's parent families.
        """

        if nodePaths is None:
            nodePaths = self.traverse_nodes(self.suiteNode.childNodes)[0]
        for nodePath in nodePaths:
            path = "/%s" % nodePath
            pathStatus = self.get_node_status(path)
            if pathStatus in statuses:
                self.cancel_node(path)
            if recursive:
                path = path.rpartition("/")[0]
                while path != "":
                    pathStatus = self.get_node_status(path)
                    if pathStatus in statuses:
                        self.cancel_node(path)
                    path = path.rpartition("/")[0]
Exemple #4
0
 def _import_symbol(self, path):
     module_name, _, symbol_name = path.rpartition(':')
     if not module_name:
         module_name, _, symbol_name = path.rpartition('.')
     if not module_name:
         module_name, symbol_name = path, ''
     mod = import_module(module_name)
     if symbol_name:
         try:
             return getattr(mod, symbol_name)
         except AttributeError:
             raise ImportError('cannot import name '+symbol_name)
     else:
         return mod
Exemple #5
0
def publish_object(path,
                   version,
                   comment,
                   obj,
                   host='localhost',
                   port=server.DEFAULT_PORT):
    """
    Publish egg on server at `host`:`port` under `path` and `version` with
    `comment` given component `obj`.

    path: string
        Component path to be published.

    version: string
        Version to be published.

    comment: string
        Description of this version of this component.

    obj: Component
        Component to publish.

    host: string
        Host name of server to publish to.

    port: int
        Port number of server to publish to.
    """
    category, slash, name = path.rpartition('/')
    egg_info = obj.save_to_egg(name, version)
    eggfile = egg_info[0]
    try:
        publish_egg(path, version, comment, eggfile, host, port)
    finally:
        os.remove(eggfile)
    def revisions(self, repository, *args, **kwargs):
        path = "/".join(args)
        path.rstrip("/")
        if not path.startswith("/"):
            path = "/" + path

        path, _, script = path.rpartition('/')
        path = path + '/'

        scr = Script.visible(request,
                             repository,
                             path).filter(Script.name == script).first()
        if not scr:
            return O.error(msg="Script not found")

        revisions = sorted([r.serialize(
            skip=['id', 'script_id', 'draft', 'content', 'meta'],
            rel=[("created_at", "created_at", lambda d: d)])
            for r in scr.history
            if not r.draft], key=lambda r: r["created_at"], reverse=True)
        return O.history(
            script=scr.name,
            owner=scr.owner.username,
            revisions=revisions
        )
def publish_object(path, version, comment, obj,
                   host='localhost', port=server.DEFAULT_PORT):
    """
    Publish egg on server at `host`:`port` under `path` and `version` with
    `comment` given component `obj`.

    path: string
        Component path to be published.

    version: string
        Version to be published.

    comment: string
        Description of this version of this component.

    obj: Component
        Component to publish.

    host: string
        Host name of server to publish to.

    port: int
        Port number of server to publish to.
    """
    category, slash, name = path.rpartition('/')
    egg_info = obj.save_to_egg(name, version)
    eggfile = egg_info[0]
    try:
        publish_egg(path, version, comment, eggfile, host, port)
    finally:
        os.remove(eggfile)
    def check_parallel_arrays(self, doc_list):
        # check if there are parallel arrays. MongoDB can not sort multiple fields with parallel arrays
        # https://jira.mongodb.org/browse/SERVER-13122

        # I'm not sure this check is actually quite right, because it looks to me like the two arrays must also
        # be in the same object.
        for doc in doc_list:
            array_path = ''
            for sort_key, sort_dir in self.sort:
                path = str(sort_key)
                while path:
                    item = get_subitem(doc, path, sort_dir != 1)[1]
                    if isinstance(item, list):
                        if not path.startswith(
                                array_path) and not array_path.startswith(
                                    path):
                            raise MongoModelException(
                                'BadValue ' + sort_key +
                                ' cannot sort with keys that are parallel arrays'
                            )

                        array_path = max(path, array_path)
                        break

                    path = path.rpartition('.')[0]
Exemple #9
0
 def parent(self, *names):
     path = self._url.path.rstrip("/")
     if not path: # Nothing left, so we must be at the root
         return None
     base, _, _ = path.rpartition("/")
     return URL()
     return URL(self._url._replace(path=base, params='', query='', fragment='').geturl())
Exemple #10
0
 def load_class(self, path):
     module, dot, clazz = path.rpartition('.')
     try:
         mod = __import__(module, context, context, [clazz], -1)
         constr = getattr(mod, clazz)
         return constr
     except Exception as e:
         raise Exception(e, "Cannot load class %s" % path)
Exemple #11
0
    def __init__( self , path ) : 
        """Initialize class from the given handler 
        subroutine dot-path"""

        self.path = path 
        self.module , _ , self.func = path.rpartition(".")
        self.modules = [] 
        self.handler = None 
Exemple #12
0
def clean_download_path(path):
    """Turns an absolute path in the BOINC download dir into a relative path.

    This allows paths to be used as URL components, and doesn't expose
    unnecessary server configuration data.
    """
    _, _, relpath = path.rpartition('/download/')
    return relpath
    def __init__( self , path ) : 
        """Initialize class from the given handler 
        subroutine dot-path"""

        self.path = path 
        self.module , _ , self.func = path.rpartition(".")
        self.modules = [] 
        self.handler = None 
def ensureFolder(path):
    p = Path(path)
    p2 = Path(path.rpartition(slash)[0])
    if not p.exists():
        if not p2.exists():
            ensureFolder(str(p2.absolute()))
            os.mkdir(str(p.absolute()))
        else:
            os.mkdir(str(p.absolute()))
    def is_spike_archive_compatible(self, path):
        """Check if the archive at the given path, if present, is suitable
        for providing the simulation data necessary to perform the
        analysis specified by this data point. Simulation duration and
        number of stimulus patterns need to be greater in the archive
        than in the analysis settings, while the number of trials that
        can be extracted from the archive can depend (via time
        slicing) on the length of the original simulations compared to
        the length of the analysis duration and the transient time (ie
        sim_duration-ana_duration) we are asking for.

        """
        path_sdur = float(path.rstrip('.hdf5').partition('sdur')[2])
        path_n_trials = float(path.rpartition('_t')[2].partition('_sdur')[0]) * (1 + max(0, (path_sdur - self.sim_duration)//(self.ana_duration + self.SIM_DECORRELATION_TIME)))
        path_spn = float(path.rpartition('_t')[0].rpartition('sp')[2])
        sdur_c = path_sdur >= self.sim_duration
        n_trials_c = path_n_trials >= self.n_trials
        spn_c = path_spn >= self.n_stim_patterns
        return all([sdur_c, n_trials_c, spn_c])
Exemple #16
0
    def join(self, path, *paths):
        """Join paths with a slash."""
        self._validate_path(path)

        before, sep, last_path = path.rpartition(self.CHAIN_SEPARATOR)
        chain_prefix = before + sep
        protocol, path = fsspec.core.split_protocol(last_path)
        fs = fsspec.get_filesystem_class(protocol)
        if protocol:
            chain_prefix += protocol + self.SEPARATOR
        return chain_prefix + self._join(fs.sep, ((path, ) + paths))
Exemple #17
0
def _ensure_package_loaded(path, component):
    """Ensure that the given module is loaded as a submodule.

    Returns:
        str: The name that the module should be imported as.
    """

    logger = logging.getLogger(__name__)

    packages = component.find_products('support_package')
    if len(packages) == 0:
        return None
    elif len(packages) > 1:
        raise ExternalError(
            "Component had multiple products declared as 'support_package",
            products=packages)

    if len(path) > 2 and ':' in path[2:]:  # Don't flag windows C: type paths
        path, _, _ = path.rpartition(":")

    package_base = packages[0]
    relative_path = os.path.normpath(os.path.relpath(path, start=package_base))
    if relative_path.startswith('..'):
        raise ExternalError(
            "Component had python product output of support_package",
            package=package_base,
            product=path,
            relative_path=relative_path)

    if not relative_path.endswith('.py'):
        raise ExternalError("Python product did not end with .py", path=path)

    relative_path = relative_path[:-3]
    if os.pathsep in relative_path:
        raise ExternalError(
            "Python support wheels with multiple subpackages not yet supported",
            relative_path=relative_path)

    support_distro = component.support_distribution
    if support_distro not in sys.modules:
        logger.debug("Creating dynamic support wheel package: %s",
                     support_distro)
        spec = importlib.util.spec_from_file_location(
            os.path.basename(package_base), package_base)
        if spec is None:
            raise ExternalError("importlib cannot find module at this path",
                                path=package_base)
        module = importlib.util.module_from_spec(spec)
        sys.modules[support_distro] = module
        spec.loader.exec_module(module)

    return "{}.{}".format(support_distro, relative_path)
def writemakepath(path):
    if os.path.exists(path) == False:
        makepath = path.rpartition("/")
        #replace vulnerable os.system()...
        #if the file is in the dir, it will make it
        if not makepath[0]:
            with open(path, 'w') as temp:
                temp.close
        else:
            #if not, it will make the folder.
            if os.path.exists(makepath[0]) == False:
                os.makedirs(makepath[0])
            with open(f'{makepath[0]}/{makepath[2]}', 'w') as temp:
                temp.close
def local_link_to_github(link, docname, githublink):
    # get source document dir
    path = docname.rpartition('/')[0]
    # remove './'
    if link.startswith('./'):
        link = link[2:]
    # move up if necessary
    while link.startswith('../'):
        link = link.partition('/')[2]
        path = path.rpartition('/')[0]
    # combine with repo path
    if len(path):
        link = path.rstrip('/') + '/' + link.lstrip('/')
    # combine with repo link
    link = githublink.rstrip('/') + '/' + link.lstrip('/')
    return link
Exemple #20
0
def replace_absolute_symlinks(basedir, d):
    """
    Walk basedir looking for absolute symlinks and replacing them with relative ones.
    The absolute links are assumed to be relative to basedir
    (compared to make_relative_symlink above which tries to compute common ancestors
    using pattern matching instead)
    """
    for walkroot, dirs, files in os.walk(basedir):
        for file in files + dirs:
            path = os.path.join(walkroot, file)
            if not os.path.islink(path):
                continue
            link = os.readlink(path)
            if not os.path.isabs(link):
                continue
            walkdir = os.path.dirname(path.rpartition(basedir)[2])
            base = os.path.relpath(link, walkdir)
            bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
            os.remove(path)
            os.symlink(base, path)
 def guess(mcs, path):
     """
     :param str path:
     :return ExternalExecutor:
     """
     for env, config in mcs._config.items():
         if 'suffix' in config:
             if path.endswith(config['suffix']):
                 break
         if 'filename_pattern' in config:
             _, _, filename = path.rpartition('/')
             if re.match(config['filename_pattern'], filename) is not None:
                 break
     else:
         raise ValueError(
             natural_format(
                 'cannot identify the way to execute {}. Supported external environment{s}: {item}',
                 path,
                 item=mcs.supported_environments()))
     return mcs.get(env)
Exemple #22
0
def replace_absolute_symlinks(basedir, d):
    """
    Walk basedir looking for absolute symlinks and replacing them with relative ones.
    The absolute links are assumed to be relative to basedir
    (compared to make_relative_symlink above which tries to compute common ancestors
    using pattern matching instead)
    """
    for walkroot, dirs, files in os.walk(basedir):
        for file in files + dirs:
            path = os.path.join(walkroot, file)
            if not os.path.islink(path):
                continue
            link = os.readlink(path)
            if not os.path.isabs(link):
                continue
            walkdir = os.path.dirname(path.rpartition(basedir)[2])
            base = os.path.relpath(link, walkdir)
            bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
            os.remove(path)
            os.symlink(base, path)
Exemple #23
0
  def __init__(self,source_dir=None, url=None, base_url=None, base_branch=None,
               source_branch=None, target_branch=None, dryrun=None,
               inherit=None, submodule=None):
    if inherit:
      self.root = source_dir or inherit.root
      self.base_url = base_url or inherit.base_url
      self.base_branch = base_branch or inherit.base_branch
      self.source_branch = source_branch or inherit.source_branch
      self.target_branch = target_branch or inherit.target_branch
      self.dryrun = (dryrun == True) if dryrun != None else inherit.dryrun
      if submodule:
        self.root = os.path.join(self.root, submodule)
        for origin in self.GitCommand("remote", ["-v"]):
          name, url = origin.split()[0:2]
          if name == "origin":
            self.url = url
      else:
        self.url=url or inherit.url
    else:
      self.root = source_dir
      self.url=url
      self.base_url = base_url
      self.base_branch = base_branch
      self.source_branch = source_branch
      self.target_branch = target_branch
      self.dryrun = (dryrun == True)

    assert(self.root)

    self.parent = os.path.split(self.root)[0]
    self.checkout_dir = os.path.split(self.root)[1]

    self.relative_url_prefix = []
    if self.base_url:
      prefix = ""
      path = self.url
      base = self.base_url[0:-1] if self.base_url[-1] == "/" else self.base_url
      while path.startswith(base+"/"):
        path = path.rpartition("/")[0]
        prefix = prefix + "../"
        self.relative_url_prefix.append((path,prefix))
def check_folder(pathname):
    path = pathname
    segments = path.rpartition('/')
    path = segments[2]

    # I'd like to do this better
    # but we found the start of our version number
    # so we know it's the correct folder
    idx = path.find('boost_1_')

    # If it's -1, the version folder was not found in its intended spot
    # If it's greather than 0, the index is incorrect because we should
    # have a boost_1_49_0 type structure, not <some_chars>/boost_1_49_0
    if idx != 0:
        print('Please feed the program the /boost_#_##_#/ directory')
        print('Example: ~/Downloads/boost_1_49_0/')
        return False

    global version_folder 
    version_folder = path
    return True
    def set_path(self, path, tag, node_dict):
        """
        Insert values for a path.

        :param path:
        :param tag:
        :param node_dict:
        :return:
        """
        if len(node_dict) != 1:
            raise ValueError("The node_dict argument must contains exactly "
                             "one element! {0}".format(node_dict))
        logger.debug("Set input with path '{0}' and node dict '{1}'".format(
            path, node_dict))
        _path, _, keyword = path.rpartition('/')

        value = node_dict[tag]
        if isinstance(value, dict) and keyword != tag:
            try:
                value = value[keyword]
            except KeyError:
                if keyword == '_text':
                    value = node_dict[tag]
                else:
                    raise ValueError(
                        "Keyword '{0}' not found in node_dict \"{1}\"!".format(
                            keyword, node_dict))

        if value is None:
            logger.debug("Skip element '%s': None value!" % path)
            return

        # Set the target parameter if the path is in invariant_map dictionary
        if not isinstance(value, dict) and path in self.invariant_map:
            self.set_parameter(path, value)

        # Add argument to variant transformations associated with the path
        if path in self.variant_map:
            self.add_kwarg(path, tag, node_dict)
Exemple #26
0
    def set_path(self, path, tag, node_dict):
        """
        Insert values for a path.

        :param path:
        :param tag:
        :param node_dict:
        :return:
        """
        if len(node_dict) != 1:
            raise ValueError("The node_dict argument must contains exactly "
                             "one element! {0}".format(node_dict))
        logger.debug("Set input with path '{0}' and node dict '{1}'".format(path, node_dict))
        _path, _, keyword = path.rpartition('/')

        value = node_dict[tag]
        if isinstance(value, dict) and keyword != tag:
            try:
                value = value[keyword]
            except KeyError:
                if keyword == '_text':
                    value = node_dict[tag]
                else:
                    raise ValueError(
                        "Keyword '{0}' not found in node_dict \"{1}\"!".format(keyword, node_dict)
                    )

        if value is None:
            logger.debug("Skip element '%s': None value!" % path)
            return

        # Set the target parameter if the path is in invariant_map dictionary
        if not isinstance(value, dict) and path in self.invariant_map:
            self.set_parameter(path, value)

        # Add argument to variant transformations associated with the path
        if path in self.variant_map:
            self.add_kwarg(path, tag, node_dict)
Exemple #27
0
def file_basename (path):
    """
    @param path Filepath as a string
    @return The basename of a file without folder location and extension
    """
    return path.rpartition('/')[2].partition('.')[0]
def remove_suffix(path):
    for suffix in reversed(suffices):
        if path.endswith(suffix):
            return path.rpartition(suffix)[0]
    else:
        return path
Exemple #29
0
def file_extension (path):
    """
    @param path Filepath as a string
    @return The extension of a file in lowercase
    """
    return path.rpartition(".")[2].lower()
Exemple #30
0
	def _filter_text_for_st(self, text):
		# filter begining path until last ..
		path, filename = os.path.split(text)
		path = path.rpartition("..")[2]
		path = path.rpartition(".")[2]
		return os.path.join(path, filename)
Exemple #31
0
def changeNameExtensionsToDotKryptos(path):
    return path.rpartition('.')[0] + ".txt"
Exemple #32
0
def file_name (path):
    """
    @param path Filepath as a string
    @return The complete name of a file with the extension but without folder location
    """
    return path.rpartition("/")[2]
def get_frame(path):
	return int(path.rpartition('.')[0].rpartition('\\')[2])
Exemple #34
0
def _try_load_module(path, import_name=None):
    """Try to programmatically load a python module by path.

    Path should point to a python file (optionally without the .py) at the
    end.  If it ends in a :<name> then name must point to an object defined in
    the module, which is returned instead of the module itself.

    Args:
        path (str): The path of the module to load
        import_name (str): The explicity name that the module should be given.
            If not specified, this defaults to being the basename() of
            path.  However, if the module is inside of a support package,
            you should pass the correct name so that relative imports
            proceed correctly.

    Returns:
        str, object: The basename of the module loaded and the requested object.
    """

    logger = logging.getLogger(__name__)

    obj_name = None
    if len(path) > 2 and ':' in path[2:]:  # Don't flag windows C: type paths
        path, _, obj_name = path.rpartition(":")

    folder, basename = os.path.split(path)
    if folder == '':
        folder = './'

    if basename == '' or not os.path.exists(path):
        raise ArgumentError("Could not find python module to load extension",
                            path=path)

    basename, ext = os.path.splitext(basename)
    if ext not in (".py", ".pyc", ""):
        raise ArgumentError(
            "Attempted to load module is not a python package or module (.py or .pyc)",
            path=path)

    if import_name is None:
        import_name = basename
    else:
        logger.debug("Importing module as subpackage: %s", import_name)

    # Don't load modules twice
    if basename in sys.modules:
        mod = sys.modules[basename]
    else:
        spec = importlib.util.spec_from_file_location(basename, path)
        if spec is None:
            raise ArgumentError(
                "importlib failed to find module at this location", path=path)
        mod = importlib.util.module_from_spec(spec)
        sys.modules[basename] = mod
        spec.loader.exec_module(mod)

    if obj_name is not None:
        if obj_name not in mod.__dict__:
            raise ArgumentError(
                "Cannot find named object '%s' inside module '%s'" %
                (obj_name, basename),
                path=path)

        mod = mod.__dict__[obj_name]

    return basename, mod
Exemple #35
0
def _path_parts(path):
    directory, _, filename = path.rpartition('/')
    basename, dot, extension = filename.partition('.')
    return directory, basename, dot + extension
Exemple #36
0
	def run(self, information):
		path = information['filepath']

		filecodec = self.get_audio_codec(path)
		if filecodec is None:
			self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
			return None

		more_opts = []
		if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
			if self._preferredcodec == 'm4a' and filecodec == 'aac':
				# Lossless, but in another container
				acodec = 'copy'
				extension = self._preferredcodec
				more_opts = ['-absf', 'aac_adtstoasc']
			elif filecodec in ['aac', 'mp3', 'vorbis']:
				# Lossless if possible
				acodec = 'copy'
				extension = filecodec
				if filecodec == 'aac':
					more_opts = ['-f', 'adts']
				if filecodec == 'vorbis':
					extension = 'ogg'
			else:
				# MP3 otherwise.
				acodec = 'libmp3lame'
				extension = 'mp3'
				more_opts = []
				if self._preferredquality is not None:
					more_opts += ['-ab', self._preferredquality]
		else:
			# We convert the audio (lossy)
			acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
			extension = self._preferredcodec
			more_opts = []
			if self._preferredquality is not None:
				more_opts += ['-ab', self._preferredquality]
			if self._preferredcodec == 'aac':
				more_opts += ['-f', 'adts']
			if self._preferredcodec == 'm4a':
				more_opts += ['-absf', 'aac_adtstoasc']
			if self._preferredcodec == 'vorbis':
				extension = 'ogg'
			if self._preferredcodec == 'wav':
				extension = 'wav'
				more_opts += ['-f', 'wav']

		prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
		new_path = prefix + sep + extension
		self._downloader.to_screen(u'[ffmpeg] Destination: ' + new_path)
		try:
			self.run_ffmpeg(path, new_path, acodec, more_opts)
		except:
			etype,e,tb = sys.exc_info()
			if isinstance(e, AudioConversionError):
				self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message)
			else:
				self._downloader.to_stderr(u'ERROR: error running ffmpeg')
			return None

 		# Try to update the date time for extracted audio file.
		if information.get('filetime') is not None:
			try:
				os.utime(_encodeFilename(new_path), (time.time(), information['filetime']))
			except:
				self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')

		if not self._keepvideo:
			try:
				os.remove(_encodeFilename(path))
			except (IOError, OSError):
				self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
				return None

		information['filepath'] = new_path
		return information
Exemple #37
0
def dir_name (path):
    """
    @param path Filepath as a string
    @return The complete path where is located the file without the file name
    """
    return path.rpartition("/")[0].rpartition("/")[2]
Exemple #38
0
 def basepath(self, path):
     dir, _, _ = path.rpartition('/')
     return self.urlprefix + dir + '/'
Exemple #39
0
def file_extension (path):
    """
    Return the extension of a file.
    @param path Filepath as a string
    """
    return path.rpartition(".")[2]