def test_commonprefix(self):
        self.assertEqual(
            posixpath.commonprefix([]),
            ""
        )
        self.assertEqual(
            posixpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
            "/home/swen"
        )
        self.assertEqual(
            posixpath.commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
            "/home/swen/"
        )
        self.assertEqual(
            posixpath.commonprefix(["/home/swen/spam", "/home/swen/spam"]),
            "/home/swen/spam"
        )

        testlist = ['', 'abc', 'Xbcd', 'Xb', 'XY', 'abcd', 'aXc', 'abd', 'ab', 'aX', 'abcX']
        for s1 in testlist:
            for s2 in testlist:
                p = posixpath.commonprefix([s1, s2])
                self.assert_(s1.startswith(p))
                self.assert_(s2.startswith(p))
                if s1 != s2:
                    n = len(p)
                    self.assertNotEqual(s1[n:n+1], s2[n:n+1])
Example #2
0
  def Canonicalize(self, path):
    '''Returns the canonical path for |path|.
    '''
    canonical_paths, simplified_paths_map = self._LoadCache().Get()

    # Path may already be the canonical path.
    if path in canonical_paths:
      return path

    # Path not found. Our single heuristic: find |base| in the directory
    # structure with the longest common prefix of |path|.
    _, base = SplitParent(path)
    potential_paths = simplified_paths_map.get(_SimplifyFileName(base))
    if not potential_paths:
      # There is no file with anything close to that name.
      return path

    # The most likely canonical file is the one with the longest common prefix
    # with |path|. This is slightly weaker than it could be; |path| is
    # compared, not the simplified form of |path|, which may matter.
    max_prefix = potential_paths[0]
    max_prefix_length = len(posixpath.commonprefix((max_prefix, path)))
    for path_for_file in potential_paths[1:]:
      prefix_length = len(posixpath.commonprefix((path_for_file, path)))
      if prefix_length > max_prefix_length:
        max_prefix, max_prefix_length = path_for_file, prefix_length

    return max_prefix
Example #3
0
  def fill_in_extra_args(self, commit):
    # Set any empty members to the string "<null>"
    v = vars(commit)
    for k in v.keys():
      if not v[k]:
        v[k] = '<null>'

    self._generate_dirs_changed(commit)
    # Add entries to the commit object that are useful for
    # formatting.
    commit.log_firstline = commit.log.split("\n",1)[0]
    commit.log_firstparagraph = re.split("\r?\n\r?\n",commit.log,1)[0]
    commit.log_firstparagraph = re.sub("\r?\n"," ",commit.log_firstparagraph)
    if commit.dirs_changed:
      commit.dirs_root = posixpath.commonprefix(commit.dirs_changed)
      if commit.dirs_root == '':
        commit.dirs_root = '/'
      commit.dirs_count = len(commit.dirs_changed)
      if commit.dirs_count > 1:
        commit.dirs_count_s = " (%d dirs)" %(commit.dirs_count)
      else:
        commit.dirs_count_s = ""

      commit.subdirs_count = commit.dirs_count
      if commit.dirs_root in commit.dirs_changed:
        commit.subdirs_count -= 1
      if commit.subdirs_count >= 1:
        commit.subdirs_count_s = " + %d subdirs" % (commit.subdirs_count)
      else:
        commit.subdirs_count_s = ""
Example #4
0
    def _relpath(path, start=None):
        """Return a relative version of a path.

        Implementation by James Gardner in his BareNecessities
        package, under MIT licence.

        With a fix for Windows where posixpath.sep (and functions like
        join) use the Unix slash not the Windows slash.
        """
        import posixpath
        if start is None:
            start = posixpath.curdir
        else:
            start = start.replace(os.path.sep, posixpath.sep)
        if not path:
            raise ValueError("no path specified")
        else:
            path = path.replace(os.path.sep, posixpath.sep)
        start_list = posixpath.abspath(start).split(posixpath.sep)
        path_list = posixpath.abspath(path).split(posixpath.sep)
        # Work out how much of the filepath is shared by start and path.
        i = len(posixpath.commonprefix([start_list, path_list]))
        rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
        if not rel_list:
            return posixpath.curdir.replace(posixpath.sep, os.path.sep)
        return posixpath.join(*rel_list).replace(posixpath.sep, os.path.sep)
Example #5
0
def commonPaths(paths):
    """ Returns the common component and the stripped paths

        It expects that directories do always end with a trailing slash and
        paths never begin with a slash (except root).

        :param paths: The list of paths (``[str, str, ...]``)
        :type paths: ``list``

        :return: The common component (always a directory) and the stripped
                 paths (``(str, [str, str, ...])``)
        :rtype: ``tuple``
    """
    import posixpath

    common = ''
    if len(paths) > 1 and "/" not in paths:
        common = posixpath.commonprefix(paths)
        if common[-1:] != "/":
            common = common[:common.rfind("/") + 1]

        idx = len(common)
        if idx > 0:
            paths = [path[idx:] or "./" for path in paths]
            common = common[:-1] # chop the trailing slash

    return (common, paths)
Example #6
0
  def test_set_lifecycle_wildcard(self):
    bucket1_uri = self.CreateBucket()
    bucket2_uri = self.CreateBucket()
    # This just double checks that the common prefix of the two buckets is what
    # we think it should be (based on implementation detail of CreateBucket).
    # We want to be careful when setting a wildcard on buckets to make sure we
    # don't step outside the test buckets to effect other buckets.
    common_prefix = posixpath.commonprefix([suri(bucket1_uri),
                                            suri(bucket2_uri)])
    self.assertTrue(common_prefix.startswith(
        'gs://gsutil-test-test_set_lifecycle_wildcard'))
    wildcard = '%s*' % common_prefix

    fpath = self.CreateTempFile(contents=self.valid_doc)
    stderr = self.RunGsUtil(['lifecycle', 'set', fpath, wildcard],
                            return_stderr=True)
    self.assertIn('Setting lifecycle configuration on %s/...' %
                  suri(bucket1_uri), stderr)
    self.assertIn('Setting lifecycle configuration on %s/...' %
                  suri(bucket2_uri), stderr)
    self.assertEqual(stderr.count('Setting lifecycle configuration'), 2)

    stdout = self.RunGsUtil(['lifecycle', 'get', suri(bucket1_uri)],
                            return_stdout=True)
    self.assertEqual(stdout, self.valid_doc)
    stdout = self.RunGsUtil(['lifecycle', 'get', suri(bucket2_uri)],
                            return_stdout=True)
    self.assertEqual(stdout, self.valid_doc)
Example #7
0
    def consolidateFiles(self, xmlFiles):
        """Given a <files> element, find the directory common to all files
           and return a 2-tuple with that directory followed by
           a list of files within that directory.
           """
        files = []
        if xmlFiles:
            for fileTag in XML.getChildElements(xmlFiles):
                if fileTag.nodeName == 'file':
                    files.append(XML.shallowText(fileTag))

        # If we only have one file, return it as the prefix.
        # This prevents the below regex from deleting the filename
        # itself, assuming it was a partial filename.
        if len(files) == 1:
            return files[0], []

        # Start with the prefix found by commonprefix,
        # then actually make it end with a directory rather than
        # possibly ending with part of a filename.
        prefix = re.sub("[^/]*$", "", posixpath.commonprefix(files))

        endings = []
        for file in files:
            ending = file[len(prefix):].strip()
            if ending == '':
                    ending = '.'
            endings.append(ending)
        return prefix, endings
Example #8
0
    def test_bucket_list_wildcard(self):
        """Tests listing multiple buckets with a wildcard."""
        random_prefix = self.MakeRandomTestString()
        bucket1_name = self.MakeTempName("bucket", prefix=random_prefix)
        bucket2_name = self.MakeTempName("bucket", prefix=random_prefix)
        bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
        bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
        # This just double checks that the common prefix of the two buckets is what
        # we think it should be (based on implementation detail of CreateBucket).
        # We want to be careful when setting a wildcard on buckets to make sure we
        # don't step outside the test buckets to affect other buckets.
        common_prefix = posixpath.commonprefix([suri(bucket1_uri), suri(bucket2_uri)])
        self.assertTrue(
            common_prefix.startswith(
                "%s://%sgsutil-test-test_bucket_list_wildcard-bucket-" % (self.default_provider, random_prefix)
            )
        )
        wildcard = "%s*" % common_prefix

        # Use @Retry as hedge against bucket listing eventual consistency.
        @Retry(AssertionError, tries=3, timeout_secs=1)
        def _Check1():
            stdout = self.RunGsUtil(["ls", "-b", wildcard], return_stdout=True)
            expected = set([suri(bucket1_uri) + "/", suri(bucket2_uri) + "/"])
            actual = set(stdout.split())
            self.assertEqual(expected, actual)

        _Check1()
Example #9
0
    def commit(self, url, commit):
        if commit.type != 'svn' or commit.format != 1:
            logging.info("SKIP unknown commit format (%s.%d)",
                         commit.type, commit.format)
            return
        logging.info("COMMIT r%d (%d paths) from %s"
                     % (commit.id, len(commit.changed), url))

        paths = map(self._normalize_path, commit.changed)
        if len(paths):
            pre = posixpath.commonprefix(paths)
            if pre == "/websites/":
                # special case for svnmucc "dynamic content" buildbot commits
                # just take the first production path to avoid updating all cms working copies
                for p in paths:
                    m = PRODUCTION_RE_FILTER.match(p)
                    if m:
                        pre = m.group(0)
                        break

            #print "Common Prefix: %s" % (pre)
            wcs = [wc for wc in self.watch if wc.update_applies(commit.repository, pre)]
            logging.info("Updating %d WC for r%d" % (len(wcs), commit.id))
            for wc in wcs:
                self.worker.add_work(OP_UPDATE, wc)
    def GetCommonPrefix(args):
      """Returns the common prefix between two paths (no partial paths).

      e.g.: /tmp/bar, /tmp/baz will return /tmp/ (and not /tmp/ba as the dumb
      posixpath.commonprefix implementation would do)
      """
      parts = posixpath.commonprefix(args).rpartition(posixpath.sep)[0]
      return parts + posixpath.sep if parts else ''
Example #11
0
 def test_commonprefix(self):
     self.assertEqual(
         posixpath.commonprefix([]),
         ""
     )
     self.assertEqual(
         posixpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
         "/home/swen"
     )
     self.assertEqual(
         posixpath.commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
         "/home/swen/"
     )
     self.assertEqual(
         posixpath.commonprefix(["/home/swen/spam", "/home/swen/spam"]),
         "/home/swen/spam"
     )
Example #12
0
 def is_suburi(self, base, test):
     if base == test:
         return True
     if base[0] != test[0]:
         return False
     common = posixpath.commonprefix((base[1], test[1]))
     if len(common) == len(base[1]):
         return True
     return False
Example #13
0
 def relpath(path, start=curdir):
     """Return a relative version of a path"""
     if not path:
         raise ValueError("no path specified")
     
     start_list = abspath(start).split(sep)
     path_list = abspath(path).split(sep)
     # Work out how much of the filepath is shared by start and path.
     i = len(commonprefix([start_list, path_list]))
     rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
     return curdir if not rel_list else join(*rel_list)
  def test_set_lifecycle_wildcard(self):
    """Tests setting lifecycle with a wildcarded bucket URI."""
    if self.test_api == ApiSelector.XML:
      # This test lists buckets with wildcards, but it is possible that another
      # test being run in parallel (in the same project) deletes a bucket after
      # it is listed in this test. This causes the subsequent XML metadata get
      # for the lifecycle configuration to fail on that just-deleted bucket,
      # even though that bucket is not used directly in this test.
      return unittest.skip('XML wildcard behavior can cause test to flake '
                           'if a bucket in the same project is deleted '
                           'during execution.')

    random_prefix = self.MakeRandomTestString()
    bucket1_name = self.MakeTempName('bucket', prefix=random_prefix)
    bucket2_name = self.MakeTempName('bucket', prefix=random_prefix)
    bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
    bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
    # This just double checks that the common prefix of the two buckets is what
    # we think it should be (based on implementation detail of CreateBucket).
    # We want to be careful when setting a wildcard on buckets to make sure we
    # don't step outside the test buckets to affect other buckets.
    common_prefix = posixpath.commonprefix(
        [suri(bucket1_uri), suri(bucket2_uri)])
    self.assertTrue(
        common_prefix.startswith(
            'gs://%sgsutil-test-test-set-lifecycle-wildcard-' % random_prefix))
    wildcard = '%s*' % common_prefix

    fpath = self.CreateTempFile(contents=self.lifecycle_doc.encode('ascii'))

    # Use @Retry as hedge against bucket listing eventual consistency.
    expected = set([
        'Setting lifecycle configuration on %s/...' % suri(bucket1_uri),
        'Setting lifecycle configuration on %s/...' % suri(bucket2_uri)
    ])
    actual = set()

    @Retry(AssertionError, tries=3, timeout_secs=1)
    def _Check1():
      stderr = self.RunGsUtil(['lifecycle', 'set', fpath, wildcard],
                              return_stderr=True)
      actual.update(stderr.splitlines())
      self.assertEqual(expected, actual)
      self.assertEqual(stderr.count('Setting lifecycle configuration'), 2)

    _Check1()

    stdout = self.RunGsUtil(
        ['lifecycle', 'get', suri(bucket1_uri)], return_stdout=True)
    self.assertEqual(json.loads(stdout), self.lifecycle_json_obj)
    stdout = self.RunGsUtil(
        ['lifecycle', 'get', suri(bucket2_uri)], return_stdout=True)
    self.assertEqual(json.loads(stdout), self.lifecycle_json_obj)
Example #15
0
  def test_set_wildcard_non_null_cors(self):
    """Tests setting CORS on a wildcarded bucket URI."""
    random_prefix = self.MakeRandomTestString()
    bucket1_name = self.MakeTempName('bucket', prefix=random_prefix)
    bucket2_name = self.MakeTempName('bucket', prefix=random_prefix)
    bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
    bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
    # This just double checks that the common prefix of the two buckets is what
    # we think it should be (based on implementation detail of CreateBucket).
    # We want to be careful when setting a wildcard on buckets to make sure we
    # don't step outside the test buckets to affect other buckets.
    common_prefix = posixpath.commonprefix(
        [suri(bucket1_uri), suri(bucket2_uri)])
    self.assertTrue(
        common_prefix.startswith(
            'gs://%sgsutil-test-test-set-wildcard-non-null-cors-' %
            random_prefix))
    wildcard = '%s*' % common_prefix

    fpath = self.CreateTempFile(contents=self.cors_doc.encode(UTF8))

    # Use @Retry as hedge against bucket listing eventual consistency.
    expected = set([
        'Setting CORS on %s/...' % suri(bucket1_uri),
        'Setting CORS on %s/...' % suri(bucket2_uri)
    ])
    actual = set()

    @Retry(AssertionError, tries=3, timeout_secs=1)
    def _Check1():
      """Ensures expect set lines are present in command output."""
      stderr = self.RunGsUtil(self._set_cmd_prefix + [fpath, wildcard],
                              return_stderr=True)
      outlines = stderr.splitlines()
      for line in outlines:
        # Ignore the deprecation warnings from running the old cors command.
        if ('You are using a deprecated alias' in line or
            'gsutil help cors' in line or
            'Please use "cors" with the appropriate sub-command' in line):
          continue
        actual.add(line)
      for line in expected:
        self.assertIn(line, actual)
      self.assertEqual(stderr.count('Setting CORS'), 2)

    _Check1()

    stdout = self.RunGsUtil(self._get_cmd_prefix + [suri(bucket1_uri)],
                            return_stdout=True)
    self.assertEqual(json.loads(stdout), self.cors_json_obj)
    stdout = self.RunGsUtil(self._get_cmd_prefix + [suri(bucket2_uri)],
                            return_stdout=True)
    self.assertEqual(json.loads(stdout), self.cors_json_obj)
Example #16
0
    def posix_relpath(path, start):
        sep = posixpath.sep
        start_list = [x for x in posixpath.abspath(start).split(sep) if x]
        path_list = [x for x in posixpath.abspath(path).split(sep) if x]

        # Work out how much of the filepath is shared by start and path.
        i = len(posixpath.commonprefix([start_list, path_list]))

        rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
        if not rel_list:
            return posixpath.curdir
        return posixpath.join(*rel_list)
Example #17
0
 def relpath(path, start=posixpath.curdir):   # NOQA
     """Return a relative version of a path"""
     if not path:
         raise ValueError("no path specified")
     start_list = posixpath.abspath(start).split(posixpath.sep)
     path_list = posixpath.abspath(path).split(posixpath.sep)
     # Work out how much of the filepath is shared by start and path.
     i = len(posixpath.commonprefix([start_list, path_list]))
     rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:]
     if not rel_list:
         return posixpath.curdir
     return posixpath.join(*rel_list)
Example #18
0
    def is_suburi(self, base, test):
        """Check if test is below base in a URI tree

        Both args must be URIs in reduced form.
        """
        if base == test:
            return True
        if base[0] != test[0]:
            return False
        common = posixpath.commonprefix((base[1], test[1]))
        if len(common) == len(base[1]):
            return True
        return False
Example #19
0
def relpath(path, start=curdir):
    """Return a relative version of a path, backport to python2.4 from 2.6"""
    """http://www.saltycrane.com/blog/2010/03/ospathrelpath-source-code-python-25/ """
    if not path:
        raise ValueError("no path specified")
    start_list = posixpath.abspath(start).split(sep)
    path_list = posixpath.abspath(path).split(sep)
    # Work out how much of the filepath is shared by start and path.
    i = len(posixpath.commonprefix([start_list, path_list]))
    rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
    if not rel_list:
        return curdir
    return join(*rel_list)
Example #20
0
File: buck.py Project: azatoth/buck
def relpath(path, start=posixpath.curdir):
  """
  Return a relative filepath to path from the current directory or an optional start point.
  """
  if not path:
    raise ValueError("no path specified")
  start_list = posixpath.abspath(start).split(posixpath.sep)
  path_list = posixpath.abspath(path).split(posixpath.sep)
  # Work out how much of the filepath is shared by start and path.
  common = len(posixpath.commonprefix([start_list, path_list]))
  rel_list = [posixpath.pardir] * (len(start_list) - common) + path_list[common:]
  if not rel_list:
    return posixpath.curdir
  return posixpath.join(*rel_list)
Example #21
0
    def _get_relative_path(a, b):
        """
        returns a relative path for navigation from dir *a* to dir *b*

        if the common parent of both is "/", return an absolute path
        """
        a += "/"
        b += "/"
        parent = posixpath.dirname(posixpath.commonprefix([a,b]))
        if parent == "/": return b[:-1]

        a = posixpath.relpath(a, parent)
        b = posixpath.relpath(b, parent)
        if a == ".": return b

        return posixpath.normpath("../" * (a.count("/")+1) + b)
Example #22
0
def appname(p):
    """Given an etcdir or etcfile, returns the inferred the appname."""
    if pp.isfile(p):
        p = pp.dirname(p)

    # Strip away 'etc'
    assert pp.basename(p) == 'etc', p
    p = pp.dirname(p)

    prefix = pp.commonprefix([env.directory, p])
    p = p[len(prefix):]

    if p[0] == '/':
        p = p[1:]

    return p
Example #23
0
def relpath(path, start=os.getcwd()):
    """Return a relative version of a path"""
    sep = "/"
    if platform.system() == 'Windows':
      sep = "\\"

    if not path:
        raise ValueError("no path specified")
    start_list = posixpath.abspath(start).split(sep)
    path_list = posixpath.abspath(path).split(sep)
    # Work out how much of the filepath is shared by start and path.
    i = len(posixpath.commonprefix([start_list, path_list]))
    rel_list = [start] * (len(start_list)-i) + path_list[i:]
    if not rel_list:
        return start
    r = sep.join(rel_list)
    return r
def file_list(com):
    filelist = com["modified"] + com["added"] + com["removed"]
    filelist = sorted(set(filelist))
    if len(filelist) == 1:
        return filelist[0]
    prefix = posixpath.commonprefix(filelist).rpartition("/")[0] + "/"
    suffixes = [entry[len(prefix):] for entry in filelist]
    sufstring = " ".join(suffixes)
    if len(sufstring) < 80:
        if prefix == "/":
            return " ".join(filelist)
        else:
            return "{pre} ({suf})".format(pre=prefix, suf=sufstring)
    dirs = len({suffix.rpartition("/")[0] for suffix in suffixes})
    if dirs == 1:
        return "{pre} ({nfiles} files)".format(pre=prefix, nfiles=len(suffixes))
    else:
        return "{pre} ({nfiles} files in {ndirs} dirs)".format(pre=prefix, nfiles=len(suffixes), ndirs=dirs)
Example #25
0
    def test_set_lifecycle_wildcard(self):
        random_prefix = self.MakeRandomTestString()
        bucket1_name = self.MakeTempName('bucket', prefix=random_prefix)
        bucket2_name = self.MakeTempName('bucket', prefix=random_prefix)
        bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
        bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
        # This just double checks that the common prefix of the two buckets is what
        # we think it should be (based on implementation detail of CreateBucket).
        # We want to be careful when setting a wildcard on buckets to make sure we
        # don't step outside the test buckets to affect other buckets.
        common_prefix = posixpath.commonprefix(
            [suri(bucket1_uri), suri(bucket2_uri)])
        self.assertTrue(
            common_prefix.startswith(
                'gs://%sgsutil-test-test_set_lifecycle_wildcard-' %
                random_prefix))
        wildcard = '%s*' % common_prefix

        fpath = self.CreateTempFile(contents=self.valid_doc)

        # Use @Retry as hedge against bucket listing eventual consistency.
        expected = set([
            'Setting lifecycle configuration on %s/...' % suri(bucket1_uri),
            'Setting lifecycle configuration on %s/...' % suri(bucket2_uri)
        ])
        actual = set()

        @Retry(AssertionError, tries=3, timeout_secs=1)
        def _Check1():
            stderr = self.RunGsUtil(['lifecycle', 'set', fpath, wildcard],
                                    return_stderr=True)
            actual.update(stderr.splitlines())
            self.assertEqual(expected, actual)
            self.assertEqual(stderr.count('Setting lifecycle configuration'),
                             2)

        _Check1()

        stdout = self.RunGsUtil(
            ['lifecycle', 'get', suri(bucket1_uri)], return_stdout=True)
        self.assertEqual(stdout, self.valid_doc)
        stdout = self.RunGsUtil(
            ['lifecycle', 'get', suri(bucket2_uri)], return_stdout=True)
        self.assertEqual(stdout, self.valid_doc)
 def _consolidate_files(self):
     files = []
     filenode = self.dig('message', 'body', 'commit', 'files')
     if filenode is not None:
         for child in filenode.childNodes:
             if child.nodeName == 'file':
                 files.append(self._shallowtext(child))
     # Optimization: if we only have one file, don't waste CPU on any of the other
     # stuff we do to pretend to be CIA.
     if len(files) == 1:
         return files[0], []
     prefix = re.sub("[^/]*$", "", posixpath.commonprefix(files))
     endings = []
     for file in files:
         ending = file[len(prefix):].strip()
         if ending == '':
             ending = '.'
         endings.append(ending)
     return prefix, endings
Example #27
0
    def _relpath(path, start=None):
        """Return a relative version of a path.

        Implementation by James Gardner in his BareNecessities
        package, under MIT licence.
        """
        import posixpath
        if start is None:
            start = posixpath.curdir
        if not path:
            raise ValueError("no path specified")
        start_list = posixpath.abspath(start).split(posixpath.sep)
        path_list = posixpath.abspath(path).split(posixpath.sep)
        # Work out how much of the filepath is shared by start and path.
        i = len(posixpath.commonprefix([start_list, path_list]))
        rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
        if not rel_list:
            return posixpath.curdir
        return posixpath.join(*rel_list)
Example #28
0
    def build_tweet(self, commit):
        maxlen = 144
        left = maxlen
        paths = map(self._normalize_path, commit.changed)
        if not len(paths):
            return None
        path = posixpath.commonprefix(paths)
        if path[0:1] == '/' and len(path) > 1:
            path = path[1:]

        #TODO: allow URL to be configurable.
        link = " - http://svn.apache.org/r%d" % (commit.id)
        left -= len(link)
        msg = "r%d in %s by %s: "  % (commit.id, path, commit.committer)
        left -= len(msg)
        if left > 3:
            msg += commit.log[0:left]
        msg += link
        return msg
Example #29
0
 def _consolidate_files(self):
     files = []
     filenode = self.dig('message', 'body', 'commit', 'files')
     if filenode is not None:
         for child in filenode.childNodes:
             if child.nodeName == 'file':
                 files.append(self._shallowtext(child))
     # Optimization: if we only have one file, don't waste CPU on any of the other
     # stuff we do to pretend to be CIA.
     if len(files) == 1:
         return files[0], []
     prefix = re.sub("[^/]*$", "", posixpath.commonprefix(files))
     endings = []
     for file in files:
         ending = file[len(prefix):].strip()
         if ending == '':
             ending = '.'
         endings.append(ending)
     return prefix, endings
Example #30
0
    def build_tweet(self, commit):
        maxlen = 144
        left = maxlen
        paths = map(self._normalize_path, commit.changed)
        if not len(paths):
            return None
        path = posixpath.commonprefix(paths)
        if path[0:1] == '/' and len(path) > 1:
            path = path[1:]

        #TODO: allow URL to be configurable.
        link = " - http://svn.apache.org/r%d" % (commit.id)
        left -= len(link)
        msg = "r%d in %s by %s: " % (commit.id, path, commit.committer)
        left -= len(msg)
        if left > 3:
            msg += commit.log[0:left]
        msg += link
        return msg
Example #31
0
    def test_commonprefix(self):
        self.assertEqual(
            posixpath.commonprefix([]),
            ""
        )
        self.assertEqual(
            posixpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
            "/home/swen"
        )
        self.assertEqual(
            posixpath.commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
            "/home/swen/"
        )
        self.assertEqual(
            posixpath.commonprefix(["/home/swen/spam", "/home/swen/spam"]),
            "/home/swen/spam"
        )

        self.assertEqual(
            posixpath.commonprefix([b"/home/swenson/spam", b"/home/swen/spam"]),
            b"/home/swen"
        )
        self.assertEqual(
            posixpath.commonprefix([b"/home/swen/spam", b"/home/swen/eggs"]),
            b"/home/swen/"
        )
        self.assertEqual(
            posixpath.commonprefix([b"/home/swen/spam", b"/home/swen/spam"]),
            b"/home/swen/spam"
        )

        testlist = ['', 'abc', 'Xbcd', 'Xb', 'XY', 'abcd', 'aXc', 'abd', 'ab', 'aX', 'abcX']
        for s1 in testlist:
            for s2 in testlist:
                p = posixpath.commonprefix([s1, s2])
                self.assertTrue(s1.startswith(p))
                self.assertTrue(s2.startswith(p))
                if s1 != s2:
                    n = len(p)
                    self.assertNotEqual(s1[n:n+1], s2[n:n+1])
Example #32
0
    def test_set_wildcard_non_null_cors(self):
        bucket1_uri = self.CreateBucket()
        bucket2_uri = self.CreateBucket()
        # This just double checks that the common prefix of the two buckets is what
        # we think it should be (based on implementation detail of CreateBucket).
        # We want to be careful when setting a wildcard on buckets to make sure we
        # don't step outside the test buckets to effect other buckets.
        common_prefix = posixpath.commonprefix([suri(bucket1_uri), suri(bucket2_uri)])
        self.assertTrue(common_prefix.startswith("gs://gsutil-test-test_set_wildcard_non_null_cors-bucket-"))
        wildcard = "%s*" % common_prefix

        fpath = self.CreateTempFile(contents=self.cors_doc)
        stderr = self.RunGsUtil(["setcors", fpath, wildcard], return_stderr=True)
        self.assertIn("Setting CORS on %s/..." % suri(bucket1_uri), stderr)
        self.assertIn("Setting CORS on %s/..." % suri(bucket2_uri), stderr)
        self.assertEqual(stderr.count("Setting CORS"), 2)

        stdout = self.RunGsUtil(["getcors", suri(bucket1_uri)], return_stdout=True)
        self.assertEqual(stdout, self.cors_doc)
        stdout = self.RunGsUtil(["getcors", suri(bucket2_uri)], return_stdout=True)
        self.assertEqual(stdout, self.cors_doc)
Example #33
0
def prepare_changeset_values(env, chgset):
    """Converts a changeset object into a dict."""
    outer_path = None
    files = 0
    for path, kind, change, base_path, base_rev in chgset.get_changes():
        directory = posixpath.dirname(path)
        if outer_path is None:
            outer_path = directory
        else:
            outer_path = posixpath.commonprefix((outer_path, directory))
        files += 1
    if not outer_path.startswith('/'):
        outer_path = '/' + outer_path
    return add_environment_info(env, {
        'file_count':   files,
        'path':         outer_path,
        'rev':          chgset.rev,
        'author':       chgset.author,
        'message':      chgset.message,
        'url':          env.abs_href.changeset(chgset.rev)
    })
Example #34
0
  def test_bucket_list_wildcard(self):
    bucket1_uri = self.CreateBucket()
    bucket2_uri = self.CreateBucket()
    # This just double checks that the common prefix of the two buckets is what
    # we think it should be (based on implementation detail of CreateBucket).
    # We want to be careful when setting a wildcard on buckets to make sure we
    # don't step outside the test buckets to affect other buckets.
    common_prefix = posixpath.commonprefix([suri(bucket1_uri),
                                            suri(bucket2_uri)])
    self.assertTrue(common_prefix.startswith(
        'gs://gsutil-test-test_bucket_list_wildcard-bucket-'))
    wildcard = '%s*' % common_prefix

    # Use @Retry as hedge against bucket listing eventual consistency.
    @Retry(AssertionError, tries=3, delay=1, backoff=1)
    def _Check1():
      stdout = self.RunGsUtil(['ls', '-b', wildcard], return_stdout=True)
      expected = set([suri(bucket1_uri) + '/', suri(bucket2_uri) + '/'])
      actual = set(stdout.split())
      self.assertEqual(expected, actual)
    _Check1()
Example #35
0
def prepare_changeset_values(env, chgset):
    """Converts a changeset object into a dict."""
    outer_path = None
    files = 0
    for path, kind, change, base_path, base_rev in chgset.get_changes():
        directory = posixpath.dirname(path)
        if outer_path is None:
            outer_path = directory
        else:
            outer_path = posixpath.commonprefix((outer_path, directory))
        files += 1
    if not outer_path.startswith('/'):
        outer_path = '/' + outer_path
    return add_environment_info(
        env, {
            'file_count': files,
            'path': outer_path,
            'rev': chgset.rev,
            'author': chgset.author,
            'message': chgset.message,
            'url': env.abs_href.changeset(chgset.rev)
        })
Example #36
0
  def test_set_wildcard_non_null_cors(self):
    bucket1_uri = self.CreateBucket()
    bucket2_uri = self.CreateBucket()
    # This just double checks that the common prefix of the two buckets is what
    # we think it should be (based on implementation detail of CreateBucket).
    # We want to be careful when setting a wildcard on buckets to make sure we
    # don't step outside the test buckets to effect other buckets.
    common_prefix = posixpath.commonprefix([suri(bucket1_uri),
                                            suri(bucket2_uri)])
    self.assertTrue(common_prefix.startswith(
        'gs://gsutil-test-test_set_wildcard_non_null_cors-bucket-'))
    wildcard = '%s*' % common_prefix

    fpath = self.CreateTempFile(contents=self.cors_doc)
    stderr = self.RunGsUtil(['setcors', fpath, wildcard], return_stderr=True)
    self.assertIn('Setting CORS on %s/...' % suri(bucket1_uri), stderr)
    self.assertIn('Setting CORS on %s/...' % suri(bucket2_uri), stderr)
    self.assertEqual(stderr.count('Setting CORS'), 2)

    stdout = self.RunGsUtil(['getcors', suri(bucket1_uri)], return_stdout=True)
    self.assertEqual(stdout, self.cors_doc)
    stdout = self.RunGsUtil(['getcors', suri(bucket2_uri)], return_stdout=True)
    self.assertEqual(stdout, self.cors_doc)
Example #37
0
def relpath(path, start=curdir):
    """Return a relative version of a path"""

    if not path:
        raise ValueError("no path specified")

    if type(start) is unicode:
        start_list = unicode_abspath(start).split(sep)
    else:
        start_list = abspath(start).split(sep)

    if type(path) is unicode:
        path_list = unicode_abspath(path).split(sep)
    else:
        path_list = abspath(path).split(sep)

    # Work out how much of the filepath is shared by start and path.
    i = len(commonprefix([start_list, path_list]))

    rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
    if not rel_list:
        return curdir
    return join(*rel_list)
Example #38
0
  def test_bucket_list_wildcard(self):
    random_prefix = self.MakeRandomTestString()
    bucket1_name = self.MakeTempName('bucket', prefix=random_prefix)
    bucket2_name = self.MakeTempName('bucket', prefix=random_prefix)
    bucket1_uri = self.CreateBucket(bucket_name=bucket1_name)
    bucket2_uri = self.CreateBucket(bucket_name=bucket2_name)
    # This just double checks that the common prefix of the two buckets is what
    # we think it should be (based on implementation detail of CreateBucket).
    # We want to be careful when setting a wildcard on buckets to make sure we
    # don't step outside the test buckets to affect other buckets.
    common_prefix = posixpath.commonprefix([suri(bucket1_uri),
                                            suri(bucket2_uri)])
    self.assertTrue(common_prefix.startswith(
        'gs://%sgsutil-test-test_bucket_list_wildcard-bucket-' % random_prefix))
    wildcard = '%s*' % common_prefix

    # Use @Retry as hedge against bucket listing eventual consistency.
    @Retry(AssertionError, tries=3, timeout_secs=1)
    def _Check1():
      stdout = self.RunGsUtil(['ls', '-b', wildcard], return_stdout=True)
      expected = set([suri(bucket1_uri) + '/', suri(bucket2_uri) + '/'])
      actual = set(stdout.split())
      self.assertEqual(expected, actual)
    _Check1()
Example #39
0
    def first_common_ancestor(self, include_self=False, strict=False):
        """
        Find the first ancestor that all pages in this queryset have in common.
        For example, consider a page heirarchy like::

            - Home/
                - Foo Event Index/
                    - Foo Event Page 1/
                    - Foo Event Page 2/
                - Bar Event Index/
                    - Bar Event Page 1/
                    - Bar Event Page 2/

        The common ancestors for some queries would be:

        .. code-block:: python

            >>> Page.objects\\
            ...     .type(EventPage)\\
            ...     .first_common_ancestor()
            <Page: Home>
            >>> Page.objects\\
            ...     .type(EventPage)\\
            ...     .filter(title__contains='Foo')\\
            ...     .first_common_ancestor()
            <Page: Foo Event Index>

        This method tries to be efficient, but if you have millions of pages
        scattered across your page tree, it will be slow.

        If `include_self` is True, the ancestor can be one of the pages in the
        queryset:

        .. code-block:: python

            >>> Page.objects\\
            ...     .filter(title__contains='Foo')\\
            ...     .first_common_ancestor()
            <Page: Foo Event Index>
            >>> Page.objects\\
            ...     .filter(title__exact='Bar Event Index')\\
            ...     .first_common_ancestor()
            <Page: Bar Event Index>

        A few invalid cases exist: when the queryset is empty, when the root
        Page is in the queryset and ``include_self`` is False, and when there
        are multiple page trees with no common root (a case Wagtail does not
        support). If ``strict`` is False (the default), then the first root
        node is returned in these cases. If ``strict`` is True, then a
        ``ObjectDoesNotExist`` is raised.
        """
        # An empty queryset has no ancestors. This is a problem
        if not self.exists():
            if strict:
                raise self.model.DoesNotExist('Can not find ancestor of empty queryset')
            return self.model.get_first_root_node()

        if include_self:
            # Get all the paths of the matched pages.
            paths = self.order_by().values_list('path', flat=True)
        else:
            # Find all the distinct parent paths of all matched pages.
            # The empty `.order_by()` ensures that `Page.path` is not also
            # selected to order the results, which makes `.distinct()` works.
            paths = self.order_by()\
                .annotate(parent_path=Substr(
                    'path', 1, Length('path') - self.model.steplen,
                    output_field=CharField(max_length=255)))\
                .values_list('parent_path', flat=True)\
                .distinct()

        # This method works on anything, not just file system paths.
        common_parent_path = posixpath.commonprefix(paths)

        # That may have returned a path like (0001, 0002, 000), which is
        # missing some chars off the end. Fix this by trimming the path to a
        # multiple of `Page.steplen`
        extra_chars = len(common_parent_path) % self.model.steplen
        if extra_chars != 0:
            common_parent_path = common_parent_path[:-extra_chars]

        if common_parent_path is '':
            # This should only happen when there are multiple trees,
            # a situation that Wagtail does not support;
            # or when the root node itself is part of the queryset.
            if strict:
                raise self.model.DoesNotExist('No common ancestor found!')

            # Assuming the situation is the latter, just return the root node.
            # The root node is not its own ancestor, so this is technically
            # incorrect. If you want very correct operation, use `strict=True`
            # and receive an error.
            return self.model.get_first_root_node()

        # Assuming the database is in a consistent state, this page should
        # *always* exist. If your database is not in a consistent state, you've
        # got bigger problems.
        return self.model.objects.get(path=common_parent_path)
Example #40
0
"""An extensible library for opening URLs using a variety of protocols
Example #41
0
def DoIt(filenames, pattern, with_o2):
    fn_measure = basename(commonprefix(filenames))
    fn_measure = myutils.strip_from_end(fn_measure, '.h5')
    fn_measure = myutils.strip_from_end(fn_measure, '-type')

    def cachelocation(g):
        path = posixpath.join(
            'FileCS_' + myutils.checksum(basename(g.file.filename)),
            g.name.strip(posixpath.sep))
        return (f_measure, path)

    if with_o2:
        fn_measure = myutils.strip_from_end(fn_measure, '_detailedpo2')

    files = [h5files.open(fn, 'a') for fn in filenames]
    f_measure = h5files.open('plotVessels_chache.h5', 'a', search=False)
    groups = list(
        itertools.chain.from_iterable(
            myutils.walkh5(f, pattern, return_h5objects=True) for f in files))
    if len(groups) <= 0:
        print 'no matching groups in hdf file(s)'
        sys.exit(0)

    if with_o2:
        name = posixpath.commonprefix(map(lambda g: g.name, groups))
        name = myutils.strip_from_start(name, '/po2/vessels').replace('/', '-')
        fn_measure += name

    with mpl_utils.PdfWriter(fn_measure + '.pdf') as pdfpages:
        rc = matplotlib.rc
        rc('font', size=8.)
        rc('axes', titlesize=10., labelsize=8.)

        if with_o2:
            import detailedo2Analysis as o2analysis
            import detailedo2Analysis.plotsForPaper
            import detailedo2
            dataman = myutils.DataManager(20, [
                o2analysis.DataDetailedPO2(),
                analyzeGeneral.DataTumorTissueSingle(),
                analyzeGeneral.DataDistanceFromCenter(),
                analyzeGeneral.DataBasicVessel(),
                analyzeGeneral.DataVesselSamples(),
                analyzeBloodFlow.DataTumorBloodFlow(),
                analyzeGeneral.DataVesselRadial(),
                analyzeGeneral.DataVesselGlobal()
            ])

            vesselgroups = list(
                detailedo2.OpenVesselAndTumorGroups(g)[0] for g in groups)
            #original_vesselgroups = list(h5files.openLink(g, 'SOURCE') for g in vesselgroups)
            if 1:
                PrintGlobalDataWithOxygen(pdfpages, groups, vesselgroups,
                                          f_measure, dataman)
                '''FormatParameters makes the network creation parameters
            that does not work, if we have an o2 file'''
                #text = FormatParameters(original_vesselgroups[0].file)
                text = [' ']
                text += detailedo2Analysis.plotsForPaper.FormatParameters(
                    groups[0])
                fig, _ = mpl_utils.MakeTextPage(text,
                                                figsize=(mpl_utils.a4size[0],
                                                         mpl_utils.a4size[0]))
                pdfpages.savefig(fig, postfix='_vesselsparams')
            if 1:
                res = getMultiScatter(300. * len(filenames), vesselgroups)
                plotMultiScatterBeauty(res, pdfpages)

        else:
            dataman = myutils.DataManager(20, [
                analyzeGeneral.DataTumorTissueSingle(),
                analyzeGeneral.DataVesselRadial(),
                analyzeGeneral.DataDistanceFromCenter(),
                analyzeBloodFlow.DataTumorBloodFlow(),
                analyzeGeneral.DataBasicVessel(),
                analyzeGeneral.DataVesselSamples(),
                analyzeGeneral.DataVesselGlobal()
            ])
            #dataman = myutils.DataManager(20, [ analyzeGeneral.DataBasicVessel(), analyzeGeneral.DataVesselSamples(), analyzeGeneral.DataVesselGlobal()])
            vesselgroups = groups

            if 0:
                res = getMultiScatter(300. * len(filenames), vesselgroups)
                plotMultiScatterBeauty(res, pdfpages)
            if 0:
                PlotRadiusHistogram2(dataman, vesselgroups, pdfpages)

            if 0 and all(map(lambda g: 'data' in g.parent, vesselgroups)):
                data = VesselData()
                for g in vesselgroups:
                    data.add(g.parent['data'])
                plot_topological_stats_avg(data, pdfpages)
            if 0:  #reproduce swine
                plot_geometric_stuff_on_RC(dataman, f_measure, filenames,
                                           options, pdfpages)
            if 1:
                PrintGlobalData(pdfpages, vesselgroups, f_measure, dataman)
Example #42
0
    def download(self,
                 dataset_name,
                 revision,
                 target_directory,
                 create_target=False,
                 display_progress=False):
        '''
        download a dataset's contents into the top-level of target_directory.

        when create_target is specified, target_directory and parents
        will be created, otherwise, an error is signaled.
        '''

        os.makedirs(target_directory, exist_ok=True) if create_target else None

        if not os.path.exists(target_directory):

            msg = 'target_directory {} does not exist'.format(target_directory)
            log.warning(msg)
            raise FileNotFoundError(msg)

        pfx = ufs.join(dataset_name, revision)

        # main download loop -
        #
        # iterate over objects,
        # convert full source path to source subpath,
        # construct local path and create local subdirectory in the target
        # then fetch the object into the local path.
        #
        # local paths are dealt with using OS path for native support,
        # paths in the s3 space use posixpath since these are '/' delimited

        nfound = 0

        obj_iter = self.client.list_objects(self.bucket,
                                            recursive=True,
                                            prefix=pfx)

        for obj in obj_iter:

            assert not obj.is_dir  # assuming dir not in recursive=True list

            spath = obj.object_name  # ds/rev/<...?>/thing

            ssubp = spath.replace(  # <...?>/thing
                ufs.commonprefix((pfx, spath)), '').lstrip('/')

            # target_directory/<...?>/thing
            lpath = os.path.join(target_directory, *ssubp.split(ufs.sep))
            lsubd, _ = os.path.split(lpath)

            # ensure we are not creating outside of target_directory
            assert (os.path.commonprefix(
                (target_directory, lpath)) == target_directory)

            xfer_msg = 'transferring {} to {}'.format(spath, lpath)

            log.debug(xfer_msg)

            if display_progress:
                print(xfer_msg)

            os.makedirs(lsubd, exist_ok=True)

            self.client.fget_object(self.bucket, spath, lpath)

            nfound += 1

        if not nfound:

            msg = 'dataset {} revision {} not found'.format(
                dataset_name, revision)

            log.debug(msg)

            raise FileNotFoundError(msg)
Example #43
0
def _CommonNormalizedPrefix(first_file, second_file):
    return posixpath.commonprefix(
        (_Normalize(first_file), _Normalize(second_file)))
Example #44
0
def commonprefix(paths):
    return posixpath.commonprefix([normsep(path) for path in paths])
Example #45
0
  if not os.path.exists(md5sum_dist_path):
    raise IOError('File not built: %s' % md5sum_dist_path)
  md5sum_file_size = os.path.getsize(md5sum_dist_bin_path)

  # For better performance, make the script as small as possible to try and
  # avoid needing to write to an intermediary file (which RunShellCommand will
  # do if necessary).
  md5sum_script = 'a=%s;' % MD5SUM_DEVICE_BIN_PATH
  # Check if the binary is missing or has changed (using its file size as an
  # indicator), and trigger a (re-)push via the exit code.
  md5sum_script += '! [[ $(ls -l $a) = *%d* ]]&&exit 2;' % md5sum_file_size
  # Make sure it can find libbase.so
  md5sum_script += 'export LD_LIBRARY_PATH=%s;' % MD5SUM_DEVICE_LIB_PATH
  if len(paths) > 1:
    prefix = posixpath.commonprefix(paths)
    if len(prefix) > 4:
      md5sum_script += 'p="%s";' % prefix
      paths = ['$p"%s"' % p[len(prefix):] for p in paths]

  md5sum_script += ';'.join('$a %s' % p for p in paths)
  # Don't fail the script if the last md5sum fails (due to file not found)
  # Note: ":" is equivalent to "true".
  md5sum_script += ';:'
  try:
    out = presentation.device.RunShellCommand(md5sum_script, shell=True, check_return=True)
  except device_errors.AdbShellCommandFailedError as e:
    # Push the binary only if it is found to not exist
    # (faster than checking up-front).
    if e.status == 2:
      # If files were previously pushed as root (adbd running as root), trying
Example #46
0
def CalculateDeviceMd5Sums(paths, device):
  """Calculates the MD5 sum value for all items in |paths|.

  Directories are traversed recursively and the MD5 sum of each file found is
  reported in the result.

  Args:
    paths: A list of device paths to md5sum.
  Returns:
    A dict mapping file paths to their respective md5sum checksums.
  """
  if not paths:
    return {}

  if isinstance(paths, basestring):
    paths = [paths]
  # Allow generators
  paths = list(paths)

  md5sum_dist_path = devil_env.config.FetchPath('md5sum_device', device=device)

  if os.path.isdir(md5sum_dist_path):
    md5sum_dist_bin_path = os.path.join(md5sum_dist_path, 'md5sum_bin')
  else:
    md5sum_dist_bin_path = md5sum_dist_path

  if not os.path.exists(md5sum_dist_path):
    raise IOError('File not built: %s' % md5sum_dist_path)
  md5sum_file_size = os.path.getsize(md5sum_dist_bin_path)

  # For better performance, make the script as small as possible to try and
  # avoid needing to write to an intermediary file (which RunShellCommand will
  # do if necessary).
  md5sum_script = 'a=%s;' % MD5SUM_DEVICE_BIN_PATH
  # Check if the binary is missing or has changed (using its file size as an
  # indicator), and trigger a (re-)push via the exit code.
  md5sum_script += '! [[ $(ls -l $a) = *%d* ]]&&exit 2;' % md5sum_file_size
  # Make sure it can find libbase.so
  md5sum_script += 'export LD_LIBRARY_PATH=%s;' % MD5SUM_DEVICE_LIB_PATH
  if len(paths) > 1:
    prefix = posixpath.commonprefix(paths)
    if len(prefix) > 4:
      md5sum_script += 'p="%s";' % prefix
      paths = ['$p"%s"' % p[len(prefix):] for p in paths]

  md5sum_script += ';'.join('$a %s' % p for p in paths)
  # Don't fail the script if the last md5sum fails (due to file not found)
  # Note: ":" is equivalent to "true".
  md5sum_script += ';:'
  try:
    out = device.RunShellCommand(
        md5sum_script, shell=True, check_return=True, large_output=True)
  except device_errors.AdbShellCommandFailedError as e:
    # Push the binary only if it is found to not exist
    # (faster than checking up-front).
    if e.status == 2:
      # If files were previously pushed as root (adbd running as root), trying
      # to re-push as non-root causes the push command to report success, but
      # actually fail. So, wipe the directory first.
      device.RunShellCommand(['rm', '-rf', MD5SUM_DEVICE_LIB_PATH],
                             as_root=True, check_return=True)
      if os.path.isdir(md5sum_dist_path):
        device.adb.Push(md5sum_dist_path, MD5SUM_DEVICE_LIB_PATH)
      else:
        mkdir_cmd = 'a=%s;[[ -e $a ]] || mkdir $a' % MD5SUM_DEVICE_LIB_PATH
        device.RunShellCommand(mkdir_cmd, shell=True, check_return=True)
        device.adb.Push(md5sum_dist_bin_path, MD5SUM_DEVICE_BIN_PATH)

      out = device.RunShellCommand(
          md5sum_script, shell=True, check_return=True, large_output=True)
    else:
      raise

  return _ParseMd5SumOutput(out)
Example #47
0
 def update_event(self, inp=-1):
     self.set_output_val(0, posixpath.commonprefix(self.input(0)))