示例#1
0
    def _relpath(path, start=None):
        """Return a relative version of a path.

        Implementation by James Gardner in his BareNecessities
        package, under MIT licence.

        With a fix for Windows where posixpath.sep (and functions like
        join) use the Unix slash not the Windows slash.
        """
        import posixpath
        if start is None:
            start = posixpath.curdir
        else:
            start = start.replace(os.path.sep, posixpath.sep)
        if not path:
            raise ValueError("no path specified")
        else:
            path = path.replace(os.path.sep, posixpath.sep)
        start_list = posixpath.abspath(start).split(posixpath.sep)
        path_list = posixpath.abspath(path).split(posixpath.sep)
        # Work out how much of the filepath is shared by start and path.
        i = len(posixpath.commonprefix([start_list, path_list]))
        rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
        if not rel_list:
            return posixpath.curdir.replace(posixpath.sep, os.path.sep)
        return posixpath.join(*rel_list).replace(posixpath.sep, os.path.sep)
示例#2
0
def should_skip(filename, config, path=''):
    """Returns True if the file and/or folder should be skipped based on the passed in settings."""
    os_path = os.path.join(path, filename)

    normalized_path = os_path.replace('\\', '/')
    if normalized_path[1:2] == ':':
        normalized_path = normalized_path[2:]

    if path and config['safety_excludes']:
        check_exclude = '/' + filename.replace('\\', '/') + '/'
        if path and os.path.basename(path) in ('lib', ):
            check_exclude = '/' + os.path.basename(path) + check_exclude
        if safety_exclude_re.search(check_exclude):
            return True

    for skip_path in config['skip']:
        if posixpath.abspath(normalized_path) == posixpath.abspath(skip_path.replace('\\', '/')):
            return True

    position = os.path.split(filename)
    while position[1]:
        if position[1] in config['skip']:
            return True
        position = os.path.split(position[0])

    for glob in config['skip_glob']:
        if fnmatch.fnmatch(filename, glob) or fnmatch.fnmatch('/' + filename, glob):
            return True

    if not (os.path.isfile(os_path) or os.path.isdir(os_path) or os.path.islink(os_path)):
        return True

    return False
示例#3
0
    def test_abspath(self):
        self.assert_("foo" in posixpath.abspath("foo"))

        # Issue 3426: check that abspath retuns unicode when the arg is unicode
        # and str when it's str, with both ASCII and non-ASCII cwds
        saved_cwd = os.getcwd()
        cwds = ['cwd']
        try:
            cwds.append(u'\xe7w\xf0'.encode(sys.getfilesystemencoding()
                                            or 'ascii'))
        except UnicodeEncodeError:
            pass # the cwd can't be encoded -- test with ascii cwd only
        for cwd in cwds:
            try:
                os.mkdir(cwd)
                os.chdir(cwd)
                for path in ('', 'foo', 'f\xf2\xf2', '/foo', 'C:\\'):
                    self.assertTrue(isinstance(posixpath.abspath(path), str))
                for upath in (u'', u'fuu', u'f\xf9\xf9', u'/fuu', u'U:\\'):
                    self.assertTrue(isinstance(posixpath.abspath(upath), unicode))
            finally:
                os.chdir(saved_cwd)
                os.rmdir(cwd)

        self.assertRaises(TypeError, posixpath.abspath)
示例#4
0
文件: parser.py 项目: aioupload/dexy
    def calculate_environment_for_directory(self, path):
        dir_path = posixpath.dirname(posixpath.abspath(path))
        env = {}

        for d, args in self.environment_for_directory:
            if posixpath.abspath(d) in dir_path:
                env.update(args)

        return env
示例#5
0
文件: parser.py 项目: aioupload/dexy
    def calculate_default_args_for_directory(self, path):
        dir_path = posixpath.dirname(posixpath.abspath(path))
        default_kwargs = {}

        for d, args in self.default_args_for_directory:
            if posixpath.abspath(d) in dir_path:
                default_kwargs.update(args)

        return default_kwargs
示例#6
0
def get_view_dir(args_view):
    caller_dir = posixpath.abspath('.')
    view_dir = posixpath.abspath(args_view)
    os.chdir(view_dir)
    view_dir = posixpath.abspath('')
    while not dir_has_rspec(view_dir):
        os.chdir('..')
        if view_dir == posixpath.abspath(''):
            logging.userErrorExit('ctx could not find an rspec in the supplied argument or any subdirectory')
        view_dir = posixpath.abspath('')
    return view_dir
示例#7
0
 def relpath(path, start=curdir):
     """Return a relative version of a path"""
     if not path:
         raise ValueError("no path specified")
     
     start_list = abspath(start).split(sep)
     path_list = abspath(path).split(sep)
     # Work out how much of the filepath is shared by start and path.
     i = len(commonprefix([start_list, path_list]))
     rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
     return curdir if not rel_list else join(*rel_list)
示例#8
0
    def posix_relpath(path, start):
        sep = posixpath.sep
        start_list = [x for x in posixpath.abspath(start).split(sep) if x]
        path_list = [x for x in posixpath.abspath(path).split(sep) if x]

        # Work out how much of the filepath is shared by start and path.
        i = len(posixpath.commonprefix([start_list, path_list]))

        rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
        if not rel_list:
            return posixpath.curdir
        return posixpath.join(*rel_list)
示例#9
0
 def relpath(path, start=posixpath.curdir):   # NOQA
     """Return a relative version of a path"""
     if not path:
         raise ValueError("no path specified")
     start_list = posixpath.abspath(start).split(posixpath.sep)
     path_list = posixpath.abspath(path).split(posixpath.sep)
     # Work out how much of the filepath is shared by start and path.
     i = len(posixpath.commonprefix([start_list, path_list]))
     rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:]
     if not rel_list:
         return posixpath.curdir
     return posixpath.join(*rel_list)
示例#10
0
文件: util.py 项目: floe/pubMunch
def relpath(path, start=curdir):
    """Return a relative version of a path, backport to python2.4 from 2.6"""
    """http://www.saltycrane.com/blog/2010/03/ospathrelpath-source-code-python-25/ """
    if not path:
        raise ValueError("no path specified")
    start_list = posixpath.abspath(start).split(sep)
    path_list = posixpath.abspath(path).split(sep)
    # Work out how much of the filepath is shared by start and path.
    i = len(posixpath.commonprefix([start_list, path_list]))
    rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
    if not rel_list:
        return curdir
    return join(*rel_list)
示例#11
0
文件: buck.py 项目: azatoth/buck
def relpath(path, start=posixpath.curdir):
  """
  Return a relative filepath to path from the current directory or an optional start point.
  """
  if not path:
    raise ValueError("no path specified")
  start_list = posixpath.abspath(start).split(posixpath.sep)
  path_list = posixpath.abspath(path).split(posixpath.sep)
  # Work out how much of the filepath is shared by start and path.
  common = len(posixpath.commonprefix([start_list, path_list]))
  rel_list = [posixpath.pardir] * (len(start_list) - common) + path_list[common:]
  if not rel_list:
    return posixpath.curdir
  return posixpath.join(*rel_list)
示例#12
0
 def relpath_unix(self, path, start="."):
     # Relpath implementation directly ripped and modified from Python 2.6 source.
     sep="/"
     
     if not path:
         raise ValueError("no path specified")
     
     start_list = posixpath.abspath(start).split(sep)
     path_list = posixpath.abspath(path).split(sep)
     # Work out how much of the filepath is shared by start and path.
     i = len(self.commonprefix([start_list, path_list]))
     rel_list = [".."] * (len(start_list)-i) + path_list[i:]
     if not rel_list:
         return "."
     return posixpath.join(*rel_list)
示例#13
0
    def _apply_regex_rule(self, rule_name, rule, cat_name, cat_path, settings):
        accepted_flags = {
            'a': re.ASCII,
            'i': re.IGNORECASE,
            'l': re.LOCALE,
            'x': re.VERBOSE
        }
        flags = sum([accepted_flags[f] for f in rule.get('flags', [])])

        pattern = None
        try:
            pattern = re.compile(rule['pattern'], flags)
        except KeyError:
            raise InvalidRegexFilter(cat_name, rule_name)

        actions = []

        rename = rule.get('rename', None)
        for root, dirs, files in os.walk(self._path):
            if posixpath.abspath(root) == self._repo_path:
                continue

            for file_name in files:
                file_name = posixpath.relpath(posixpath.join(root, file_name), self._path)

                match = pattern.match(file_name)
                if match:
                    new_name = file_name
                    if rename:
                        new_name = rename.format(**match.groupdict())

                    new_name = posixpath.join(cat_path, new_name)
                    actions.append(('mv', posixpath.join(self._path, file_name), new_name))

        return actions
示例#14
0
def update_config_file(dryrun=False):
    for site, site_config in config.sites.items():
        redis_processes = [(x, site_config.processes[x]) for x in site_config.processes if site_config.processes[x]["type"] == "redis"]
        template_path = site_config['redis']['template']
        print redis_processes
        for process_name, process in redis_processes:
            working_directoy = posixpath.normpath(posixpath.join(env.path, '..', 'data', 'redis', process_name))
            log_directory = posixpath.normpath(posixpath.join(site_config['deployment']['logdir'], 'log', 'redis'))
            run('mkdir -p ' + working_directoy)
            run('mkdir -p ' + log_directory)
            context_dict = site_config
            context_dict.update({
                'site': site,
                'working_directory': working_directoy,
                'log_directory': log_directory,
                'process_name': process_name,
                'socket': process['socket'],
            })
            path = posixpath.abspath(posixpath.join(site_config['deployment']['path'], '..', 'config', process_name + '.conf'))
            output = render_template(template_path, context_dict)
            if dryrun:
                print path + ":"
                print output
            else:
                put(StringIO.StringIO(output), path)
        def test_realpath_resolve_before_normalizing(self):
            # Bug #990669: Symbolic links should be resolved before we
            # normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
            # in the following hierarchy:
            # a/k/y
            #
            # and a symbolic link 'link-y' pointing to 'y' in directory 'a',
            # then realpath("link-y/..") should return 'k', not 'a'.
            try:
                old_path = abspath('.')
                os.mkdir(ABSTFN)
                os.mkdir(ABSTFN + "/k")
                os.mkdir(ABSTFN + "/k/y")
                os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")

                # Absolute path.
                self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
                # Relative path.
                os.chdir(dirname(ABSTFN))
                self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."), ABSTFN + "/k")
            finally:
                os.chdir(old_path)
                test_support.unlink(ABSTFN + "/link-y")
                safe_rmdir(ABSTFN + "/k/y")
                safe_rmdir(ABSTFN + "/k")
                safe_rmdir(ABSTFN)
示例#16
0
    def test_relpath_bytes(self):
        (real_getcwdb, os.getcwdb) = (os.getcwdb, lambda: br"/home/user/bar")
        try:
            curdir = os.path.split(os.getcwdb())[-1]
            self.assertRaises(ValueError, posixpath.relpath, b"")
            self.assertEqual(posixpath.relpath(b"a"), b"a")
            self.assertEqual(posixpath.relpath(posixpath.abspath(b"a")), b"a")
            self.assertEqual(posixpath.relpath(b"a/b"), b"a/b")
            self.assertEqual(posixpath.relpath(b"../a/b"), b"../a/b")
            self.assertEqual(posixpath.relpath(b"a", b"../b"),
                             b"../"+curdir+b"/a")
            self.assertEqual(posixpath.relpath(b"a/b", b"../c"),
                             b"../"+curdir+b"/a/b")
            self.assertEqual(posixpath.relpath(b"a", b"b/c"), b"../../a")
            self.assertEqual(posixpath.relpath(b"a", b"a"), b".")
            self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x/y/z"), b'../../../foo/bar/bat')
            self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/foo/bar"), b'bat')
            self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/"), b'foo/bar/bat')
            self.assertEqual(posixpath.relpath(b"/", b"/foo/bar/bat"), b'../../..')
            self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x"), b'../foo/bar/bat')
            self.assertEqual(posixpath.relpath(b"/x", b"/foo/bar/bat"), b'../../../x')
            self.assertEqual(posixpath.relpath(b"/", b"/"), b'.')
            self.assertEqual(posixpath.relpath(b"/a", b"/a"), b'.')
            self.assertEqual(posixpath.relpath(b"/a/b", b"/a/b"), b'.')

            self.assertRaises(TypeError, posixpath.relpath, b"bytes", "str")
            self.assertRaises(TypeError, posixpath.relpath, "str", b"bytes")
        finally:
            os.getcwdb = real_getcwdb
示例#17
0
    def dumps(self, obj):
        """
        Dump the given object which may be a container of any objects
        supported by the reports added to the manager.
        """
        document = Document()

        if self.stylesheet:
            type = "text/xsl"
            href = "file://%s" % posixpath.abspath(self.stylesheet)
            style = document.createProcessingInstruction("xml-stylesheet",
                "type=\"%s\" href=\"%s\"" % (type, href))
            document.appendChild(style)

        node = document.createElement(self.name)
        document.appendChild(node)

        if self.version:
            node.setAttribute("version", self.version)

        try:
            self.call_dumps(obj, node)
        except KeyError as e:
            raise ValueError("Unsupported type: %s" % e)

        return document
示例#18
0
    def _load(self, fs, partname, ct_dict, parts_dict):
        """
        Load part identified as *partname* from filesystem *fs* and propagate
        the load to related parts.
        """
        # calculate working values
        baseURI = os.path.split(partname)[0]
        content_type = ct_dict[partname]

        # set persisted attributes
        self.__partname = partname
        self.blob = fs.getblob(partname)
        self.typespec = PartTypeSpec(content_type)

        # load relationships and propagate load to target parts
        self.__relationships = []  # discard any rels from prior load
        rel_elms = self.__get_rel_elms(fs)
        for rel_elm in rel_elms:
            rId = rel_elm.get('Id')
            reltype = rel_elm.get('Type')
            target_relpath = rel_elm.get('Target')
            target_partname = posixpath.abspath(posixpath.join(baseURI,
                                                               target_relpath))
            if target_partname in parts_dict:
                target_part = parts_dict[target_partname]
            else:
                target_part = Part()
                parts_dict[target_partname] = target_part
                target_part._load(fs, target_partname, ct_dict, parts_dict)

            # create relationship to target_part
            rel = Relationship(rId, self, reltype, target_part)
            self.__relationships.append(rel)

        return self
示例#19
0
def _setup_path():
    """
    Assigns all the various paths that will be needed (to the env object)
    in deploying code, populating config templates, etc.
    """
    env.sup_template_path = posixpath.join(posixpath.abspath(settings.__file__),settings.SUPERVISOR_TEMPLATE_PATH)
    env.project_root = settings.PROJECT_ROOT
    env.www_root = posixpath.join(env.project_root,'www',env.environment)
    env.log_dir = posixpath.join(env.www_root,'log')
    env.code_root = posixpath.join(env.www_root,'code_root')
    env.project_media = posixpath.join(env.code_root, 'media')
    env.project_static = posixpath.join(env.project_root, 'static')
    env.virtualenv_name = getattr(settings, 'PYTHON_ENV_NAME', 'python_env') #not a required setting and should be sufficient with default name
    env.virtualenv_root = posixpath.join(env.www_root, env.virtualenv_name)
    env.services_root = posixpath.join(env.project_root, 'services')
    env.httpd_services_root = posixpath.join(env.services_root, 'apache')
    env.httpd_services_template_name = '%(project)s.conf' % env
    env.httpd_remote_services_template_path = posixpath.join(env.httpd_services_root,env.httpd_services_template_name)
    env.supervisor_conf_root = posixpath.join(env.services_root, 'supervisor')
    env.supervisor_conf_path = posixpath.join(env.supervisor_conf_root, 'supervisor.conf')
    env.supervisor_init_template_path = settings.SUPERVISOR_INIT_TEMPLATE
    if env.os == 'ubuntu':
        env.httpd_remote_conf_root = '/etc/apache2/sites-enabled'
        env.httpd_user_group = 'www-data'
    elif env.os == 'redhat':
        env.httpd_remote_conf_root = '/etc/httpd/conf.d'
        env.httpd_user_group = 'apache'
    else:
        utils.abort('In Web module. Remote operating system ("%(os)s") not recognized. Aborting.' % env)
示例#20
0
def update_config_file(dryrun=False):
    for site, site_config in config.sites.items():
        redis_processes = [
            (x, site_config.processes[x]) for x in site_config.processes if site_config.processes[x]["type"] == "redis"
        ]
        template_path = site_config["redis"]["template"]
        print redis_processes
        for process_name, process in redis_processes:
            working_directoy = posixpath.normpath(posixpath.join(env.path, "..", "data", "redis", process_name))
            log_directory = posixpath.normpath(posixpath.join(env.path, "..", "log", "redis"))
            run("mkdir -p " + working_directoy)
            run("mkdir -p " + log_directory)
            context_dict = site_config
            context_dict.update(
                {
                    "site": site,
                    "working_directory": working_directoy,
                    "log_directory": log_directory,
                    "process_name": process_name,
                    "socket": process["socket"],
                }
            )
            path = posixpath.abspath(
                posixpath.join(site_config["deployment"]["path"], "..", "config", process_name + ".conf")
            )
            output = render_template(template_path, context_dict)
            if dryrun:
                print path + ":"
                print output
            else:
                put(StringIO.StringIO(output), path)
示例#21
0
def main(args):
  if len(args) != 3:
    print "usage: action_process_keywords_table.py /path/to/Keywords.table /path/to/output/dir"
    sys.exit(-1)
  kw_path = args[1]
  output_dir_path = args[2]
  # it is assumed that the scripts live one higher than the current working dir
  script_path = posixpath.abspath(os.path.join(os.getcwd(), '..'))
    
  # first command:
  # ../create_hash_table /path/to/Keywords.table > /path/to/output/dir/Lexer.lut.h
  if not os.path.exists(output_dir_path):
    os.makedirs(output_dir_path)
  out_file = open(os.path.join(output_dir_path, 'Lexer.lut.h'), 'w')
  subproc_cmd = [os.path.join(script_path, 'create_hash_table'), kw_path]
  return_code = subprocess.call(subproc_cmd, stdout=out_file)
  out_file.close()
  assert(return_code == 0)

  # second command:
  # python ../KeywordLookupGenerator.py  > /path/to/output/dir/KeywordLookup.h
  out_file = open(os.path.join(output_dir_path, 'KeywordLookup.h'), 'w')
  subproc_cmd = ['python', '../KeywordLookupGenerator.py', kw_path]
  return_code = subprocess.call(subproc_cmd, stdout=out_file)
  out_file.close()
  assert(return_code == 0)
  
  return 0
示例#22
0
 def test_relpath(self) -> None:
     real_getcwd = os.getcwd
     # mypy: can't modify os directly
     setattr(os, 'getcwd', lambda: r"/home/user/bar")
     try:
         curdir = os.path.split(os.getcwd())[-1]
         self.assertRaises(ValueError, posixpath.relpath, "")
         self.assertEqual(posixpath.relpath("a"), "a")
         self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
         self.assertEqual(posixpath.relpath("a/b"), "a/b")
         self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
         self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
         self.assertEqual(posixpath.relpath("a/b", "../c"),
                          "../"+curdir+"/a/b")
         self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
         self.assertEqual(posixpath.relpath("a", "a"), ".")
         self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
         self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
         self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
         self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
         self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
         self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
         self.assertEqual(posixpath.relpath("/", "/"), '.')
         self.assertEqual(posixpath.relpath("/a", "/a"), '.')
         self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
     finally:
         setattr(os, 'getcwd', real_getcwd)
示例#23
0
    def test_relpath_bytes(self) -> None:
        real_getcwdb = os.getcwdb
        # mypy: can't modify os directly
        setattr(os, 'getcwdb', lambda: br"/home/user/bar")
        try:
            curdir = os.path.split(os.getcwdb())[-1]
            self.assertRaises(ValueError, posixpath.relpath, b"")
            self.assertEqual(posixpath.relpath(b"a"), b"a")
            self.assertEqual(posixpath.relpath(posixpath.abspath(b"a")), b"a")
            self.assertEqual(posixpath.relpath(b"a/b"), b"a/b")
            self.assertEqual(posixpath.relpath(b"../a/b"), b"../a/b")
            self.assertEqual(posixpath.relpath(b"a", b"../b"),
                             b"../"+curdir+b"/a")
            self.assertEqual(posixpath.relpath(b"a/b", b"../c"),
                             b"../"+curdir+b"/a/b")
            self.assertEqual(posixpath.relpath(b"a", b"b/c"), b"../../a")
            self.assertEqual(posixpath.relpath(b"a", b"a"), b".")
            self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x/y/z"), b'../../../foo/bar/bat')
            self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/foo/bar"), b'bat')
            self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/"), b'foo/bar/bat')
            self.assertEqual(posixpath.relpath(b"/", b"/foo/bar/bat"), b'../../..')
            self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x"), b'../foo/bar/bat')
            self.assertEqual(posixpath.relpath(b"/x", b"/foo/bar/bat"), b'../../../x')
            self.assertEqual(posixpath.relpath(b"/", b"/"), b'.')
            self.assertEqual(posixpath.relpath(b"/a", b"/a"), b'.')
            self.assertEqual(posixpath.relpath(b"/a/b", b"/a/b"), b'.')

            self.assertRaises(TypeError, posixpath.relpath, b"bytes", "str")
            self.assertRaises(TypeError, posixpath.relpath, "str", b"bytes")
        finally:
            setattr(os, 'getcwdb', real_getcwdb)
示例#24
0
    def generate_cover_letters(self, context):
        """
        Generate cover letters for all companies in the business YAML file.

        Parameters
        ----------
        context : ContextRenderer
            The renderer to use.

        """
        businesses = load_yaml(
            posixpath.join(config.YAML_DIR,
                           config.YAML_BUSINESSES + ".yaml"))

        if not businesses:
            return

        # Create cover letter directory
        os.makedirs(posixpath.join(config.OUTPUT_DIR, config.LETTERS_DIR),
                    exist_ok=True)

        self.data["pwd"] = posixpath.abspath(".").replace("\\", "/")

        for business in tqdm.tqdm(businesses, desc="Generating cover letters",
                                  unit="letter", leave=True):
            self.data["business"] = businesses[business]
            self.data["business"]["body"] = context.render_template(
                config.LETTER_FILE_NAME, self.data
            )
            self.process_resume(context, base=business)
示例#25
0
 def test_relpath(self):
     (real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
     try:
         curdir = os.path.split(os.getcwd())[-1]
         self.assertRaises(ValueError, posixpath.relpath, "")
         self.assertEqual(posixpath.relpath("a"), "a")
         self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
         self.assertEqual(posixpath.relpath("a/b"), "a/b")
         self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
         self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
         self.assertEqual(posixpath.relpath("a/b", "../c"),
                          "../"+curdir+"/a/b")
         self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
         self.assertEqual(posixpath.relpath("a", "a"), ".")
         self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
         self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
         self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
         self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
         self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
         self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
         self.assertEqual(posixpath.relpath("/", "/"), '.')
         self.assertEqual(posixpath.relpath("/a", "/a"), '.')
         self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
     finally:
         os.getcwd = real_getcwd
示例#26
0
    def __init__(self, filename=None, h5path=None, slices=None, url=None):
        self.filename = filename
        if h5path:
            self.dataset = posixpath.abspath(h5path)
            self.group = posixpath.dirname(self.dataset)
        else:
            self.dataset = None
            self.group = None

        self.slice = copy.deepcopy(slices)
        self.last_index = None # where should I increment when next.
        if self.slice:
            for i, j  in enumerate(self.slice):
                if "__len__" in dir(j):
                    if ":" in j:
                        self.slice[i] = slice(None, None, 1)
                    else:
                        self.slice[i] = int(j)
                        self.last_index = i
                else:
                    self.slice[i] = int(j)
                    self.last_index = i

        if url is not None:
            self.parse(url)
示例#27
0
 def from_rel_ref(baseURI, relative_ref):
     """
     Return a |PackURI| instance containing the absolute pack URI formed by
     translating *relative_ref* onto *baseURI*.
     """
     joined_uri = posixpath.join(baseURI, relative_ref)
     abs_uri = posixpath.abspath(joined_uri)
     return PackURI(abs_uri)
示例#28
0
def should_skip(filename, config, path='/'):
    """Returns True if the file should be skipped based on the passed in settings."""
    for skip_path in config['skip']:
        if posixpath.abspath(posixpath.join(path, filename)) == posixpath.abspath(skip_path.replace('\\', '/')):
            return True

    position = os.path.split(filename)
    while position[1]:
        if position[1] in config['skip']:
            return True
        position = os.path.split(position[0])

    for glob in config['skip_glob']:
        if fnmatch.fnmatch(filename, glob):
            return True

    return False
示例#29
0
def normalize_and_sort_includes(include_paths):
    normalized_include_paths = []
    for include_path in include_paths:
        match = re.search(r'/gen/blink/(.*)$', posixpath.abspath(include_path))
        if match:
            include_path = match.group(1)
        normalized_include_paths.append(include_path)
    return sorted(normalized_include_paths)
示例#30
0
def relpath(path, start=os.getcwd()):
    """Return a relative version of a path"""
    sep = "/"
    if platform.system() == 'Windows':
      sep = "\\"

    if not path:
        raise ValueError("no path specified")
    start_list = posixpath.abspath(start).split(sep)
    path_list = posixpath.abspath(path).split(sep)
    # Work out how much of the filepath is shared by start and path.
    i = len(posixpath.commonprefix([start_list, path_list]))
    rel_list = [start] * (len(start_list)-i) + path_list[i:]
    if not rel_list:
        return start
    r = sep.join(rel_list)
    return r
    def test_abspath(self):
        self.assert_("foo" in posixpath.abspath("foo"))

        self.assertRaises(TypeError, posixpath.abspath)
示例#32
0
 def __get_path(self, path):
     return posixpath.abspath(
         posixpath.join(self.__home_directory, path or ''))
示例#33
0
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.

"""

from __future__ import absolute_import, division, print_function, unicode_literals

import os
import sys
import posixpath

web_path = posixpath.split(os.path.abspath(__file__))[0]
web_uppath = posixpath.abspath(posixpath.join(web_path, '..'))

sys.path.append(web_path)
sys.path.append(web_uppath)

os.chdir(web_uppath)

os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web.settings")

# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()

# Apply WSGI middleware here.
示例#34
0
# -*- coding: utf-8 -*-

# 自定义配置
from posixpath import abspath, join, dirname, pardir
BASE_DIR = abspath(join(dirname(__file__), pardir))

# Scrapy settings for yaozhotc project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'yaozhotc'

SPIDER_MODULES = ['yaozhotc.spiders']
NEWSPIDER_MODULE = 'yaozhotc.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'yaozhotc (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
示例#35
0
    def deployment(self, c, site, env_dict):
        """
        Here we add the information from deployments.py and merge it into our site dictionaries.
        Can also be used to output warnings to the user, if he is using an old deployments.py
        format.
        """
        deployment_dict = {
            # Old settings
            'servername': env_dict.get("host_string"),
            'path': env_dict.get("path"),
            'backup_dir': env_dict.get("backup_dir"),
            'repo': env_dict.get("repo"),
            'branch': env_dict.get("branch"),
            'user': env_dict.get("user"),
            'buildout_cfg': env_dict.get("buildout_cfg"),
            'django_settings_module': env_dict.get("django_settings_module"),
            'generated_settings_path': posixpath.join(env_dict.get("path"), "_gen/settings.py"),

            # New settings
            'domains_redirect': env_dict.get('domains_redirect'),
            'url_redirect': env_dict.get('url_redirect'),

            'basic_auth': env_dict.get('basic_auth', False),
            'basic_auth_path': os.path.join(env_dict.get("path"), env_dict.get('basic_auth_path', None) or ""),

            'ssl': env_dict.get('ssl', False),
            'ssl_cert_path': os.path.join(env_dict.get("path"), env_dict.get('ssl_cert_path', None) or ""),
            'ssl_key_path': os.path.join(env_dict.get("path"), env_dict.get('ssl_key_path', None) or ""),
            'bind_ip': env_dict.get('bind_ip', '*'),
            'static_error_pages': env_dict.get('static_error_pages', []),
            'big_body_endpoints': env_dict.get('big_body_endpoints', []),
            'home': '/home/%s' % env_dict.get("user"),
        }
        deployment_dict['logdir'] = env_dict.get("logdir") or os.path.join(deployment_dict['home'], 'log')

        if not env_dict.get("databases"):
            deployment_dict["databases"] = {
                'default': {
                    'ENGINE': env_dict.get("db_engine", "django.db.backends.postgresql_psycopg2"),
                    'NAME': env_dict.get("db_name"),
                    'USER': env_dict.get("db_username"),
                    'PASSWORD': env_dict.get("db_password"),
                    'HOST': env_dict.get("db_host", ""),
                }
            }

        if type(env_dict.get("domains")) == list:
            domains = {
                "main": env_dict.get("domains"),
            }
            print(DOMAIN_DICT_DEPRECATION_WARNING)
        elif type(env_dict.get("domains")) == dict:
            domains = env_dict.get("domains")
        elif env_dict.get("domains") is None:
            domains = {
                "main": [],
            }
            print("Warning: No domains supplied in settings, ignoring.")
        else:
            raise Exception("Invalid domain format")
        deployment_dict.update({'domains': domains})

        ###############
        # Environment #
        ###############

        environment_dict = self.sites(c)[site].get("environment")
        for key, value in list(env_dict.get("environment", {}).items()):
            environment_dict[key] = value

        #################
        # Gunicorn dict #
        #################
        gunicorn_dict = self.sites(c)[site].get("gunicorn")
        gunicorn_dict["workers"] = env_dict.get("gunicorn", {}).get("workers", gunicorn_dict.get("workers"))
        gunicorn_dict["maxrequests"] = env_dict.get("gunicorn", {}).get("maxrequests", gunicorn_dict.get("maxrequests"))
        gunicorn_dict["timeout"] = env_dict.get("gunicorn", {}).get("timeout", gunicorn_dict.get("timeout"))
        gunicorn_dict["bind"] = env_dict.get("gunicorn", {}).get("bind", gunicorn_dict.get("bind"))

        ###############
        # Celery dict #
        ###############
        celery_dict = self.sites(c)[site].get("celery")

        celery_dict["concurrency"] = env_dict.get("celery", {}).get("concurrency", celery_dict.get("concurrency"))
        celery_dict["maxtasksperchild"] = env_dict.get("celery", {}).get("maxtasksperchild",
                                                                         celery_dict.get("maxtasksperchild"))

        ##############
        # nginx dict #
        ##############

        nginx_dict = self.sites(c)[site].get("nginx")
        nginx_dict["enabled"] = env_dict.get("nginx", {}).get("enabled", nginx_dict.get("enabled"))
        nginx_dict["location_settings"] = {
            "client_max_body_size": env_dict.get("nginx", {}).get("client_max_body_size",
                                                                  nginx_dict.get("client_max_body_size")),
        }
        nginx_dict["template"] = env_dict.get("nginx", {}).get("template", nginx_dict.get("template"))

        ##############
        # redis dict #
        ##############

        redis_dict = self.sites(c)[site].get("redis")
        redis_dict["template"] = env_dict.get("redis", {}).get("template", redis_dict.get("template"))

        ##################
        # memcached dict #
        ##################

        memcached_dict = self.sites(c)[site].get("memcached")
        memcached_dict["enabled"] = env_dict.get("memcached", {}).get("enabled", memcached_dict.get("enabled"))
        memcached_dict["size"] = env_dict.get("memcached", {}).get("size", memcached_dict.get("size"))

        ###################
        # supervisor dict #
        ###################

        supervisor_dict = self.sites(c)[site].get("supervisor")
        supervisor_dict["template"] = env_dict.get("supervisor", {}).get("template", supervisor_dict.get("template"))
        supervisor_dict["daemon_template"] = env_dict.get("supervisor", {}).get("daemon_template", supervisor_dict.get("daemon_template"))
        supervisor_dict["group_template"] = env_dict.get("supervisor", {}).get("group_template", supervisor_dict.get("group_template"))
        supervisor_dict["gunicorn_command_template"] = env_dict.get("supervisor", {}).get("gunicorn_command_template", supervisor_dict.get("gunicorn_command_template"))
        supervisor_dict["celeryd_command_template"] = env_dict.get("supervisor", {}).get("celeryd_command_template", supervisor_dict.get("celeryd_command_template"))
        supervisor_dict["celeryd_command_template"] = env_dict.get("supervisor", {}).get("celeryd_command_template", supervisor_dict.get("celeryd_command_template"))
        supervisor_dict["supervisorctl_command"] = env_dict.get("supervisor", {}).get("supervisorctl_command", supervisor_dict.get("supervisorctl_command"))
        supervisor_dict["supervisord_command"] = env_dict.get("supervisor", {}).get("supervisord_command", supervisor_dict.get("supervisord_command"))
        supervisor_dict["use_global_supervisord"] = env_dict.get("supervisor", {}).get("use_global_supervisord", supervisor_dict.get("use_global_supervisord"))
        if supervisor_dict["supervisorctl_command"] is None:
            if supervisor_dict["use_global_supervisord"]:
                supervisor_dict["supervisorctl_command"] = 'sudo supervisorctl'
            else:
                supervisor_dict["supervisorctl_command"] = 'supervisorctl --config={}../config/supervisord.conf'.format(
                    deployment_dict['path'])

        if supervisor_dict["supervisord_command"] is None and not supervisor_dict["use_global_supervisord"]:
            supervisor_dict["supervisord_command"] = 'supervisord -c {}../config/supervisord.conf'.format(
                deployment_dict['path'])

        #################
        # newrelic dict #
        #################

        newrelic_dict = self.sites(c)[site].get("newrelic")
        newrelic_dict["enabled"] = env_dict.get("newrelic", {}).get("enabled", newrelic_dict.get("enabled"))
        newrelic_dict["config_file"] = env_dict.get("newrelic", {}).get("config_file", newrelic_dict.get("config_file"))
        if not newrelic_dict["config_file"].startswith('/'):
            newrelic_dict["config_file"] = posixpath.abspath(posixpath.join(
                    deployment_dict["path"],
                    newrelic_dict["config_file"],
                ))
        self.sites(c)[site]["environment"]["NEW_RELIC_CONFIG_FILE"] = newrelic_dict["config_file"]
        newrelic_dict["environment_name"] = env_dict.get("newrelic", {}).get("environment_name",
                                                                             newrelic_dict.get("environment_name"))
        if newrelic_dict["environment_name"]:
            self.sites(c)[site]["environment"]["NEW_RELIC_ENVIRONMENT"] = newrelic_dict["environment_name"]

        newrelic_dict["license_key"] = env_dict.get("newrelic", {}).get("license_key", newrelic_dict.get("license_key"))
        if newrelic_dict["license_key"]:
            self.sites(c)[site]["environment"]["NEW_RELIC_LICENSE_KEY"] = newrelic_dict["license_key"]

        return {
            'deployment': deployment_dict,
            'environment': environment_dict,
            'gunicorn': gunicorn_dict,
            'celery': celery_dict,
            'nginx': nginx_dict,
            'redis': redis_dict,
            'memcached': memcached_dict,
            'supervisor': supervisor_dict,
            'newrelic': newrelic_dict,
        }
示例#36
0
 def update_event(self, inp=-1):
     self.set_output_val(0, posixpath.abspath(self.input(0)))
示例#37
0
 def setUp(self):
     super().setUp()
     os.mkdir('bag')
     os.mkdir('apple')
     os.symlink(posixpath.abspath('apple'), 'bag/apple')
示例#38
0
 def abspath(path):
   return posixpath.abspath(path)
import unittest
from test import support

import posixpath, os
from posixpath import realpath, abspath, join, dirname, basename, relpath

# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.

ABSTFN = abspath(support.TESTFN)


def safe_rmdir(dirname):
    try:
        os.rmdir(dirname)
    except OSError:
        pass


class PosixPathTest(unittest.TestCase):
    def setUp(self):
        self.tearDown()

    def tearDown(self):
        for suffix in ["", "1", "2"]:
            support.unlink(support.TESTFN + suffix)
            safe_rmdir(support.TESTFN + suffix)

    def assertIs(self, a, b):
        self.assert_(a is b)
示例#40
0
from test import test_genericpath
from test.support import import_helper
from test.support import os_helper
from test.support.os_helper import FakePath
from unittest import mock

try:
    import posix
except ImportError:
    posix = None


# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.

ABSTFN = abspath(os_helper.TESTFN)

def skip_if_ABSTFN_contains_backslash(test):
    """
    On Windows, posixpath.abspath still returns paths with backslashes
    instead of posix forward slashes. If this is the case, several tests
    fail, so skip them.
    """
    found_backslash = '\\' in ABSTFN
    msg = "ABSTFN is not a posix path - tests fail"
    return [test, unittest.skip(msg)(test)][found_backslash]

def safe_rmdir(dirname):
    try:
        os.rmdir(dirname)
    except OSError:
示例#41
0
def user_url_root(data_directory, username):
    user_directory = safe_url_join(data_directory, username)
    user_directory = posixpath.abspath(user_directory)
    if posixpath.basename(user_directory) != username:
        raise IOError('security error')
    return user_directory
import unittest
from test import test_support

import posixpath, os
from posixpath import realpath, abspath, join, dirname, basename, relpath

# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.

ABSTFN = abspath(test_support.TESTFN)


def safe_rmdir(dirname):
    try:
        os.rmdir(dirname)
    except OSError:
        pass


class PosixPathTest(unittest.TestCase):
    def setUp(self):
        self.tearDown()

    def tearDown(self):
        for suffix in ["", "1", "2"]:
            test_support.unlink(test_support.TESTFN + suffix)
            safe_rmdir(test_support.TESTFN + suffix)

    def assertIs(self, a, b):
        self.assert_(a is b)
示例#43
0
 def abspath(p):
     return Path._forward_slash(posixpath.abspath(p))
示例#44
0
class RemoteDirectory(DirectoryBase):
    """
	Remote Directory object that defines private methods for rendering remote
	data using Paramiko's SFTP functionality.
	"""
    location = 'remote'
    root_directory = posixpath.abspath(posixpath.sep)
    transfer_direction = 'download'
    treeview_name = 'treeview_remote'
    working_directory_combobox_name = 'SFTPClient.notebook.page_stfp.comboboxtext_remote_working_directory'

    def __init__(self, application, config, ssh):
        self.ssh = ssh
        self.path_mod = posixpath
        wd_history = config['directories'].get('remote', {})
        wd_history = wd_history.get(
            application.config['server'].split(':', 1)[0], [])
        self._thread_local_ftp = {}
        super(RemoteDirectory, self).__init__(application, config, wd_history)

        self.default_directory = application.config['server_config'][
            'server.web_root']
        try:
            self.change_cwd(self.default_directory)
        except (IOError, OSError):
            logger.info("failed to set remote directory to the web root: " +
                        application.config['server_config']['server.web_root'],
                        exc_info=True)
            self.default_directory = self.root_directory
            self.change_cwd(self.default_directory)

    def _chdir(self, path):
        for obj_lock in self._thread_local_ftp.values():
            with obj_lock.lock:
                obj_lock.object.chdir(path)
        return

    # todo: should this be rename_path?
    def _rename_file(self, _iter, path):
        with self.ftp_handle() as ftp:
            ftp.rename(self._tv_model[_iter][2], path)  # pylint: disable=unsubscriptable-object

    def _yield_dir_list(self, path):
        with self.ftp_handle() as ftp:
            for name in ftp.listdir(path):
                yield name

    def change_cwd(self, new_dir):
        new_dir = super(RemoteDirectory, self).change_cwd(new_dir)
        if new_dir is not None:
            logger.debug('set the remote working directory to: ' + new_dir)
        return new_dir

    def make_dir(self, path):
        with self.ftp_handle() as ftp:
            ftp.mkdir(path)

    def path_is_hidden(self, path):
        if self.path_mod.basename(path).startswith('.'):
            return True
        return False

    @sftp_utilities.handle_permission_denied
    def delete(self, treeiter):
        """
		Delete the selected file or directory.

		:param treeiter: The TreeIter that points to the selected file.
		"""
        name = self._tv_model[treeiter][2]  # pylint: disable=unsubscriptable-object
        if self.get_is_folder(name):
            if not self.remove_by_folder_name(name):
                return
        elif not self.remove_by_file_name(name):
            return
        self._tv_model.remove(treeiter)

    def ftp_acquire(self):
        """
		Get a thread-specific ftp handle. This handle must not be transferred to
		another thread and it must be closed with a follow up call to
		:py:meth:`.ftp_release` when it is no longer needed.

		:return: A handle to an FTP session.
		"""
        current_tid = threading.current_thread().ident
        if current_tid not in self._thread_local_ftp:
            logger.info("opening a new sftp session for tid 0x{0:x}".format(
                current_tid))
            ftp = self.ssh.open_sftp()
            ftp.chdir(self.cwd)
            self._thread_local_ftp[current_tid] = ObjectLock(
                ftp, threading.RLock())
        else:
            logger.debug(
                "leasing an existing sftp session to tid 0x{0:x}".format(
                    current_tid))
        obj_lock = self._thread_local_ftp[current_tid]
        obj_lock.lock.acquire()
        return obj_lock.object

    @contextlib.contextmanager
    def ftp_handle(self):
        ftp = self.ftp_acquire()
        try:
            yield ftp
        finally:
            self.ftp_release()

    def ftp_release(self):
        """
		Return a thread-specific ftp handle previously acquired with
		:py:meth:`.ftp_acquire`.
		"""
        current_tid = threading.current_thread().ident
        if current_tid not in self._thread_local_ftp:
            raise RuntimeError(
                'ftp_release() called for thread before ftp_acquire')
        self._thread_local_ftp[current_tid].lock.release()
        logger.debug("leased sftp session released from tid 0x{0:x}".format(
            current_tid))
        return

    def path_mode(self, path):
        try:
            with self.ftp_handle() as ftp:
                return stat.S_IFMT(ftp.stat(path).st_mode)
        except IOError as error:
            if error.errno in (errno.ENOENT, errno.EACCES):
                return 0
            raise

    def shutdown(self):
        active_tids = tuple(self._thread_local_ftp.keys())
        logger.info("closing {0} active sftp sessions".format(
            len(active_tids)))
        for idx, tid in enumerate(active_tids, 1):
            obj_lock = self._thread_local_ftp[tid]
            logger.debug(
                "closing sftp session {0} of {1} for tid 0x{2:x}".format(
                    idx, len(active_tids), tid))
            with obj_lock.lock:
                obj_lock.object.close()
                del self._thread_local_ftp[tid]
        logger.debug('all open sftp sessions have been closed')

    def stat(self, path):
        with self.ftp_handle() as ftp:
            return ftp.stat(path)

    @sftp_utilities.handle_permission_denied
    def remove_by_folder_name(self, name):
        """
		Removes a folder given its absolute path.

		:param name: The path of the folder to be removed.
		"""
        # with paramiko, you cannot remove populated dir, so recursive method utilized
        for path in self._yield_dir_list(name):
            new_path = self.path_mod.join(name, path)
            if self.get_is_folder(new_path):
                self.remove_by_folder_name(new_path)
            else:
                self.remove_by_file_name(new_path)
        with self.ftp_handle() as ftp:
            ftp.rmdir(name)

    @sftp_utilities.handle_permission_denied
    def remove_by_file_name(self, name):
        """
		Removes a file given its absolute path.

		:param name: The path of the file to be removed.
		"""
        with self.ftp_handle() as ftp:
            ftp.remove(name)

    def read_file(self, file_path):
        """
		Reads the contents of a file and returns as bytes

		:param str file_path: The path to the file to open and read.
		:return: The contents of the file
		:rtype: bytes
		"""
        with self.ftp_handle() as ftp:
            with ftp.file(file_path, 'r') as file_:
                file_contents = file_.read()
        return file_contents

    def write_file(self, file_path, file_contents):
        """
		Write data to a file.

		:param str file_path: The absolute path to target file.
		:param bytes file_contents: The data to place in file.
		"""
        with self.ftp_handle() as ftp:
            file_ = ftp.file(file_path, 'wb')
            file_.write(file_contents)
            file_.close()
        logger.info("wrote {} bytes to {}".format(len(file_contents),
                                                  file_path))

    def walk(self, path):
        """
		Walk through a given directory and return all subdirectories and
		subfiles in a format parsed for transfer. This traverses the path in a
		top-down pattern.

		:param str path: The directory to be traversed through.
		:return: A list of :py:class:`DirectoryContents` instances representing the contents.
		:rtype: list
		"""
        contents = []
        path = self.get_abspath(path)
        subdirs = []
        files = []
        try:
            entries = list(self._yield_dir_list(path))
        except (IOError, OSError):
            return contents
        for entry in entries:
            if self.get_is_folder(self.path_mod.join(path, entry)):
                subdirs.append(entry)
            else:
                files.append(entry)
        contents.append(DirectoryContents(path, subdirs, files))
        for folder in subdirs:
            contents.extend(self.walk(self.path_mod.join(path, folder)))
        return contents
示例#45
0
def save_model(
    spark_model,
    path,
    mlflow_model=None,
    conda_env=None,
    dfs_tmpdir=None,
    sample_input=None,
    signature: ModelSignature = None,
    input_example: ModelInputExample = None,
):
    """
    Save a Spark MLlib Model to a local path.

    By default, this function saves models using the Spark MLlib persistence mechanism.
    Additionally, if a sample input is specified using the ``sample_input`` parameter, the model
    is also serialized in MLeap format and the MLeap flavor is added.

    :param spark_model: Spark model to be saved - MLflow can only save descendants of
                        pyspark.ml.Model which implement MLReadable and MLWritable.
    :param path: Local path where the model is to be saved.
    :param mlflow_model: MLflow model config this flavor is being added to.
    :param conda_env: Either a dictionary representation of a Conda environment or the path to a
                      Conda environment yaml file. If provided, this decsribes the environment
                      this model should be run in. At minimum, it should specify the dependencies
                      contained in :func:`get_default_conda_env()`. If `None`, the default
                      :func:`get_default_conda_env()` environment is added to the model.
                      The following is an *example* dictionary representation of a Conda
                      environment::

                        {
                            'name': 'mlflow-env',
                            'channels': ['defaults'],
                            'dependencies': [
                                'python=3.7.0',
                                'pyspark=2.3.0'
                            ]
                        }
    :param dfs_tmpdir: Temporary directory path on Distributed (Hadoop) File System (DFS) or local
                       filesystem if running in local mode. The model is be written in this
                       destination and then copied to the requested local path. This is necessary
                       as Spark ML models read from and write to DFS if running on a cluster. All
                       temporary files created on the DFS are removed if this operation
                       completes successfully. Defaults to ``/tmp/mlflow``.
    :param sample_input: A sample input that is used to add the MLeap flavor to the model.
                         This must be a PySpark DataFrame that the model can evaluate. If
                         ``sample_input`` is ``None``, the MLeap flavor is not added.

    :param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
                      describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
                      The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
                      from datasets with valid model input (e.g. the training dataset with target
                      column omitted) and valid model output (e.g. model predictions generated on
                      the training dataset), for example:

                      .. code-block:: python

                        from mlflow.models.signature import infer_signature
                        train = df.drop_column("target_label")
                        predictions = ... # compute model predictions
                        signature = infer_signature(train, predictions)
    :param input_example: (Experimental) Input example provides one or several instances of valid
                          model input. The example can be used as a hint of what data to feed the
                          model. The given example will be converted to a Pandas DataFrame and then
                          serialized to json using the Pandas split-oriented format. Bytes are
                          base64-encoded.



    .. code-block:: python
        :caption: Example

        from mlflow import spark
        from pyspark.ml.pipeline.PipelineModel

        # your pyspark.ml.pipeline.PipelineModel type
        model = ...
        mlflow.spark.save_model(model, "spark-model")
    """
    _validate_model(spark_model)
    from pyspark.ml import PipelineModel

    if not isinstance(spark_model, PipelineModel):
        spark_model = PipelineModel([spark_model])
    if mlflow_model is None:
        mlflow_model = Model()
    # Spark ML stores the model on DFS if running on a cluster
    # Save it to a DFS temp dir first and copy it to local path
    if dfs_tmpdir is None:
        dfs_tmpdir = DFS_TMP
    tmp_path = _tmp_path(dfs_tmpdir)
    spark_model.save(tmp_path)
    sparkml_data_path = os.path.abspath(os.path.join(path, _SPARK_MODEL_PATH_SUB))
    # We're copying the Spark model from DBFS to the local filesystem if (a) the temporary DFS URI
    # we saved the Spark model to is a DBFS URI ("dbfs:/my-directory"), or (b) if we're running
    # on a Databricks cluster and the URI is schemeless (e.g. looks like a filesystem absolute path
    # like "/my-directory")
    copying_from_dbfs = is_valid_dbfs_uri(tmp_path) or (
        databricks_utils.is_in_cluster() and posixpath.abspath(tmp_path) == tmp_path
    )
    if copying_from_dbfs:
        tmp_path_fuse = dbfs_hdfs_uri_to_fuse_path(tmp_path)
        shutil.move(src=tmp_path_fuse, dst=sparkml_data_path)
    else:
        _HadoopFileSystem.copy_to_local_file(tmp_path, sparkml_data_path, remove_src=True)
    _save_model_metadata(
        dst_dir=path,
        spark_model=spark_model,
        mlflow_model=mlflow_model,
        sample_input=sample_input,
        conda_env=conda_env,
        signature=signature,
        input_example=input_example,
    )
示例#46
0
import pandas as pd
from yaml import safe_load as load
from os import environ
from posixpath import join, dirname, abspath
from alpaca_trade_api import REST as alpaca

# Constant: the S&P-500 (^GSPC) symbols
# 505 tickers, comprising of the 500 largest US-traded companies
SPX = load(open(join(dirname(abspath(__file__)), 'spx.yml'), 'r'))

# Constant: the NASDAQ-100 (^NDX) symbols
# 103 tickers, comprising of 100 largest NASDAQ-traded companies
NDX = load(open(join(dirname(abspath(__file__)), 'ndx.yml'), 'r'))

# Constant: the Dow-Jones (^DJI) symbols
DJI = load(open(join(dirname(abspath(__file__)), 'dji.yml'), 'r'))

# Only for debugging: Don't run this program directly
if __name__ == "__main__":
    print(__file__)
    print(SPX, "\n")
    print(NDX, "\n")
    print(DJI, "\n")
示例#47
0
 def _normalize_path(self, path):
     if path[0] != '/':
         return "/" + path
     return posixpath.abspath(path)
示例#48
0
 def test_find_library(self):
     self.assertEqual(library.find_library('A/foo/bar'),
                      posixpath.abspath('A'))
示例#49
0
def write_action(asset, webgl_mode):
    filename = posixpath.splitext(posixpath.basename(asset['path']))[0]
    filename = filename.replace('.', '_')
    filename = filename.replace('-', '_')
    filename = filename.lower()
    name = "convert_" + filename
    if webgl_mode:
        name = name + "_webgl"
    output = asset['path'].replace('convert_', '')
    output_base = posixpath.splitext(output)[0]
    output_tgz = output_base + ".o3dtgz"
    output_json = output_base + "/scene.json"

    output = output_tgz
    if webgl_mode:
        output = output_json
    output_dir = posixpath.dirname(output)

    output_file.write("        {\n")
    output_file.write("          'action_name': '%s',\n" % name)
    output_file.write("          'inputs': [\n")
    output_file.write("            '<(PRODUCT_DIR)/o3dConverter',\n")
    # TODO(kbr): there are a couple of problems with gyp's MSVC
    # generator which require this workaround right now. First, if two
    # actions have the same inputs then gyp will arbitrarily hang the
    # custom build rule off the second input, and the collision will
    # currently cause one or the other rule to be dropped. Second, if we
    # just insert a fake second input, without removing the .zip, then
    # for some reason the .zip ends up in an "_excluded_files" folder in
    # the project and the non-WebGL version of the asset won't be built.
    # Here we use a different input for the WebGL version of the assets.
    if webgl_mode:
        output_file.write(
            "            '../o3d_assets/samples/%s.fake_webgl_input.txt',\n" %
            asset['path'])
    else:
        output_file.write("            '../o3d_assets/samples/%s',\n" %
                          asset['path'])
    output_file.write("          ],\n")
    output_file.write("          'outputs': [\n")
    if sys.platform[:5] == 'linux':
        # TODO(gspencer): This is a HACK!  We shouldn't need to put the
        # absolute path here, but currently on Linux (scons), it is unable
        # to copy generated items out of the source tree (because the
        # repository mojo fails to find it and puts in the wrong path).
        output_file.write("            '%s',\n" % posixpath.abspath(output))
    else:
        output_file.write("            '../samples/%s',\n" % output)
    output_file.write("          ],\n")
    output_file.write("          'action': [\n")
    output_file.write("            '<(PRODUCT_DIR)/o3dConverter',\n")
    output_file.write("            '--no-condition',\n")
    output_file.write("            '--up-axis=%s',\n" % asset['up'])
    if pretty_print:
        output_file.write("            '--pretty-print',\n")
    if webgl_mode:
        output_file.write("            '--no-binary',\n")
        output_file.write("            '--no-archive',\n")
        output_file.write("            '--convert-dds-to-png',\n")
        output_file.write("            '--convert-cg-to-glsl',\n")
    output_file.write("            '../o3d_assets/samples/%s',\n" %
                      asset['path'])
    if webgl_mode:
        output_file.write("            '../samples/%s',\n" % output_tgz)
    else:
        output_file.write("            '<(_outputs)',\n")
    output_file.write("          ],\n")
    output_file.write("        },\n")
示例#50
0
    def __iter__(self):  # noqa: C901
        storage = queryUtility(IRedirectionStorage)
        for item in self.previous:
            if storage is None:
                self.logger.warn(
                    'Could not find plone.app.redirector storage utility')
                yield item
                continue

            keys = item.keys()

            # Update paths first

            # If a redirect already exists for _path, it should be
            # updated first so that any new redirects for _old_paths
            # point to the correct _path and it's unlikely that any
            # keys in this item will contain any _old_paths for the
            # same item

            for key in keys:
                if not self.updatepathkeys(key)[1]:
                    continue

                if not self.condition(item, key=key):
                    continue

                multiple = True
                paths = old_paths = item[key]
                if not isinstance(paths, (tuple, list)):
                    multiple = False
                    paths = [paths]

                for idx, obj in enumerate(paths):
                    if obj is None:
                        continue  # no object at this location

                    path = obj
                    is_element = isinstance(obj, (Element, ElementBase))
                    if is_element:
                        for attrib in self.elementattribs:
                            if attrib in obj.attrib:
                                path = obj.attrib[attrib]
                                break
                        else:
                            # No attribute in this element
                            continue

                    leading = path[:-len(path.lstrip('/'))]
                    url = urlparse.urlsplit(path)
                    if self._is_external(url):
                        continue

                    abspath = posixpath.abspath(
                        posixpath.join(posixpath.sep,
                                       str(url.path).lstrip('/'))).lstrip('/')
                    new_path = old_path = self.context_path
                    for elem in pathsplit(abspath):
                        old_path = posixpath.join(old_path, elem)
                        new_path = posixpath.join(new_path, elem)
                        new_path = storage.get(old_path, new_path)
                    if not urlparse.urlsplit(new_path).netloc:
                        new_path = leading + new_path[len(self.context_path):]
                    new_path = urlparse.urlunsplit(url[:2] + (new_path, ) +
                                                   url[3:])

                    if new_path != path.lstrip('/'):
                        if is_element:
                            obj.attrib[attrib] = new_path
                            new_path = obj
                        paths[idx] = new_path

                if not multiple:
                    paths = paths[0]
                if item[key] != paths:
                    self.logger.debug('Updating %r path(s): %r => %r', key,
                                      old_paths, paths)
                    item[key] = paths

            # Collect old paths
            old_paths = set()
            oldpathskey = self.oldpathskey(*keys)[0]
            if oldpathskey and oldpathskey in item:
                paths = item[oldpathskey]
                if isinstance(paths, (str, unicode)):
                    paths = [paths]
                old_paths.update(paths)

            pathkey = self.pathkey(*keys)[0]
            if pathkey:
                path = item[pathkey]
                url = urlparse.urlsplit(path)
                new_path = (self._is_external(url) and path or posixpath.join(
                    self.context_path,
                    str(path).lstrip('/'))).rstrip('/')
                # Add any new redirects
                for old_path in old_paths:
                    old_path = posixpath.join(
                        self.context_path,
                        str(old_path).lstrip('/')).rstrip('/')
                    if (old_path and old_path != new_path and
                            # Avoid recursive redirects
                            not new_path.startswith(old_path + '/')
                            and not storage.has_path(old_path)
                            and traverse(self.context, old_path) is None):
                        self.logger.debug('Adding %r redirect: %r => %r',
                                          pathkey, old_path, new_path)
                        storage.add(old_path, new_path)

            yield item
示例#51
0
文件: url.py 项目: wrwilliams/spack
def join(base_url, path, *extra, **kwargs):
    """Joins a base URL with one or more local URL path components

    If resolve_href is True, treat the base URL as though it where the locator
    of a web page, and the remaining URL path components as though they formed
    a relative URL to be resolved against it (i.e.: as in posixpath.join(...)).
    The result is an absolute URL to the resource to which a user's browser
    would navigate if they clicked on a link with an "href" attribute equal to
    the relative URL.

    If resolve_href is False (default), then the URL path components are joined
    as in posixpath.join().

    Note: file:// URL path components are not canonicalized as part of this
    operation.  To canonicalize, pass the joined url to format().

    Examples:
      base_url = 's3://bucket/index.html'
      body = fetch_body(prefix)
      link = get_href(body) # link == '../other-bucket/document.txt'

      # wrong - link is a local URL that needs to be resolved against base_url
      spack.util.url.join(base_url, link)
      's3://bucket/other_bucket/document.txt'

      # correct - resolve local URL against base_url
      spack.util.url.join(base_url, link, resolve_href=True)
      's3://other_bucket/document.txt'

      prefix = 'https://mirror.spack.io/build_cache'

      # wrong - prefix is just a URL prefix
      spack.util.url.join(prefix, 'my-package', resolve_href=True)
      'https://mirror.spack.io/my-package'

      # correct - simply append additional URL path components
      spack.util.url.join(prefix, 'my-package', resolve_href=False) # default
      'https://mirror.spack.io/build_cache/my-package'

      # For canonicalizing file:// URLs, take care to explicitly differentiate
      # between absolute and relative join components.

      # '$spack' is not an absolute path component
      join_result = spack.util.url.join('/a/b/c', '$spack') ; join_result
      'file:///a/b/c/$spack'
      spack.util.url.format(join_result)
      'file:///a/b/c/opt/spack'

      # '/$spack' *is* an absolute path component
      join_result = spack.util.url.join('/a/b/c', '/$spack') ; join_result
      'file:///$spack'
      spack.util.url.format(join_result)
      'file:///opt/spack'
    """
    paths = [(x) if isinstance(x, string_types) else x.geturl()
             for x in itertools.chain((base_url, path), extra)]

    paths = [convert_to_posix_path(x) for x in paths]
    n = len(paths)
    last_abs_component = None
    scheme = ''
    for i in range(n - 1, -1, -1):
        obj = urllib_parse.urlparse(paths[i], scheme='', allow_fragments=False)

        scheme = obj.scheme

        # in either case the component is absolute
        if scheme or obj.path.startswith('/'):
            if not scheme:
                # Without a scheme, we have to go back looking for the
                # next-last component that specifies a scheme.
                for j in range(i - 1, -1, -1):
                    obj = urllib_parse.urlparse(paths[j],
                                                scheme='',
                                                allow_fragments=False)

                    if obj.scheme:
                        paths[i] = '{SM}://{NL}{PATH}'.format(
                            SM=obj.scheme,
                            NL=((obj.netloc +
                                 '/') if obj.scheme != 's3' else ''),
                            PATH=paths[i][1:])
                        break

            last_abs_component = i
            break

    if last_abs_component is not None:
        paths = paths[last_abs_component:]
        if len(paths) == 1:
            result = urllib_parse.urlparse(paths[0],
                                           scheme='file',
                                           allow_fragments=False)

            # another subtlety: If the last argument to join() is an absolute
            # file:// URL component with a relative path, the relative path
            # needs to be resolved.
            if result.scheme == 'file' and result.netloc:
                result = urllib_parse.ParseResult(
                    scheme=result.scheme,
                    netloc='',
                    path=posixpath.abspath(result.netloc + result.path),
                    params=result.params,
                    query=result.query,
                    fragment=None)

            return result.geturl()

    return _join(*paths, **kwargs)
示例#52
0
 def launchpad_report(self, launchpad_report):
     self._interface.report_url = "file://%s" % posixpath.abspath(
         launchpad_report)
示例#53
0
def resturl(request, lid):
    restpath = posixpath.abspath(posixpath.join(request.path, '../rest'))
    return '%s?league=%s' % (restpath, lid)
示例#54
0
            copies[item_dir] += [item]
    else:
        copies[item_dir] = [item]

for (dir, paths) in copies.items():
    output_file.write("        {\n")
    output_file.write("          'destination': " \
                      "'<(PRODUCT_DIR)/samples/%s',\n" % dir)
    output_file.write("          'files': [\n")
    for path in paths:
        if sys.platform[:5] == 'linux':
            # TODO(gspencer): This is a HACK!  We shouldn't need to put the
            # absolute path here, but currently on Linux (scons), it is unable
            # to copy generated items out of the source tree (because the
            # repository mojo fails to find it and puts in the wrong path).
            output_file.write("            '%s',\n" % posixpath.abspath(path))
        else:
            output_file.write("            '../samples/%s',\n" % path)
    output_file.write("          ],\n")
    output_file.write("        },\n")

output_file.write("      ],\n")
output_file.write("    },\n")
output_file.write("""    {
      'target_name': 'build_samples',
      'type': 'none',
      'dependencies': [
        'copy_samples',
      ],
    },\n""")
output_file.write("  ],\n")
示例#55
0
 def __in_home_directory(self, path):
     return posixpath.abspath(path).startswith(self.__home_directory)
示例#56
0
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.

"""

from __future__ import absolute_import, division, print_function, unicode_literals

import os
import sys
import posixpath

archive_path = posixpath.split(os.path.abspath(__file__))[0]
archive_uppath = posixpath.abspath(posixpath.join(archive_path, '..'))

sys.path.append(archive_path)
sys.path.append(archive_uppath)

os.chdir(archive_uppath)

os.environ["DJANGO_SETTINGS_MODULE"] = "archive.settings"

# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()

# Apply WSGI middleware here.
示例#57
0
 def resolve(self, strict: bool = False) -> 'GPath':
     """Returns the abolute path."""
     return GPath(posixpath.abspath(self._path_str))
示例#58
0
 def resolve(self: _P, strict: bool = False) -> _P:
     """Returns the abolute path."""
     return self._new(posixpath.abspath(self._path_str))
示例#59
0
import secrets
import posixpath
from flask import Flask
import os
import matplotlib
matplotlib.use('Agg')

secret_path = "/var/subway/.secret"

app = Flask(__name__)
if not os.path.exists(secret_path):
    open(secret_path, "wb").write(secrets.token_bytes(16))
app.config["SECRET_KEY"] = open(secret_path, "rb").read()
app.config["N_TASKS"] = 5
app.config["SVG_DIR"] = posixpath.abspath(
    posixpath.join(posixpath.curdir, "tasks"))

if app.config["DEBUG"]:
    app.config['TEMPLATES_AUTO_RELOAD'] = True

import subway.views