Beispiel #1
0
def zopen(path, mode="r", passwd=PASSWORD):
    original_path = path.rstrip(".zip").strip(os.path.sep)  # relative
    try:
        return zipfile.ZipFile(path).open(original_path, "r", passwd)
    except KeyError:
        original_path = os.path.basename(path.rstrip(".zip"))
        return zipfile.ZipFile(path).open(original_path, "r", passwd)
Beispiel #2
0
def decompress_file_in_place(path, remove=False):

    """
    This function ...
    :param path:
    :param remove:
    :return:
    """

    from ..basics.log import log

    # Inform the user
    log.info("Decompressing '" + path + "' ...")

    # Check extension
    if path.endswith(".bz2"):
        new_path = path.rstrip(".bz2")
        decompress_bz2(path, new_path)
    elif path.endswith(".gz"):
        new_path = path.rstrip(".gz")
        if new_path.endswith(".tar"): new_path = new_path.split(".tar")[0]
        decompress_gz(path, new_path)
    elif path.endswith(".zip"):
        new_path = path.rstrip(".zip")
        decompress_zip(path, new_path)
    else: raise ValueError("Unrecognized archive type (must be bz2, gz [or tar.gz] or zip)")

    # Remove the original file if requested
    if remove: fs.remove_file(path)

    # Return the new path
    return new_path
    def revisions(self, repository, *args, **kwargs):
        path = "/".join(args)
        path.rstrip("/")
        if not path.startswith("/"):
            path = "/" + path

        path, _, script = path.rpartition('/')
        path = path + '/'

        scr = Script.visible(request,
                             repository,
                             path).filter(Script.name == script).first()
        if not scr:
            return O.error(msg="Script not found")

        revisions = sorted([r.serialize(
            skip=['id', 'script_id', 'draft', 'content', 'meta'],
            rel=[("created_at", "created_at", lambda d: d)])
            for r in scr.history
            if not r.draft], key=lambda r: r["created_at"], reverse=True)
        return O.history(
            script=scr.name,
            owner=scr.owner.username,
            revisions=revisions
        )
Beispiel #4
0
 def _getPath(self, file, trim = False):
     if (self._debug):
         path = self._lind % file
     else:
         path = self._lin % file
     #useful during debugging on Windows platform...
     if (trim):
         path.rstrip( os.sep )
     return path
def preparePhoto (path):
  image = Image.open(path)
  dist_path = "photos/" + '/'.join(path.rstrip().split('/')[1:])
  dist_directory = '/'.join(dist_path.rstrip().split('/')[:2])
  eye_left_pos, eye_right_pos = getEyesPosition(path)

  if not os.path.exists(dist_directory):
    os.makedirs(dist_directory)
  CropFace(image, eye_left = eye_left_pos, eye_right = eye_right_pos, offset_pct=(0.3, 0.3), dest_sz=(200, 200)).save(dist_path)
  print ("%s SAVED!\tLeft Eye: %s\t Right Eye: %s" % ('/'.join(path.rstrip().split('/')[1:]), eye_left_pos, eye_right_pos))
Beispiel #6
0
    def draw_delete_button_cb(self, col, cell, model, it, tv):
        path =  model.get_value(it, 0)
        # Trailing slashes are uncommon in bblayers.conf but confuse os.path.basename
        path.rstrip('/')
        name = os.path.basename(path)
        if name == "meta" or name == "meta-hob":
            cell.set_sensitive(False)
            cell.set_property('pixbuf', None)
            cell.set_property('mode', gtk.CELL_RENDERER_MODE_INERT)
        else:
            cell.set_property('pixbuf', self.rem_icon)
            cell.set_sensitive(True)
            cell.set_property('mode', gtk.CELL_RENDERER_MODE_ACTIVATABLE)

        return True
Beispiel #7
0
    def doctor(link_dir, path):
        # Don't mess with paths that just refer to another link:
        if path.rstrip()[-1] == '_': return path

        path = path.lstrip()

        # Don't mess with paths that point somewhere in the outside universe:
        if path.startswith('http://'): return ' ' + path

        # Prepend link_dir to path
        if link_dir.startswith('./'): path = link_dir[2:] + '/' + path
        elif link_dir != '.': path = link_dir + '/' + path

        # Prepare dir (start_dir, minus initial './')
        if start_dir == '.': dir = ''
        elif start_dir.startswith('./'): dir = start_dir[2:]
        else: dir = start_dir

        rest=' '
        last_dir = None
        while dir and dir != last_dir:
            if path.startswith(dir + '/'):
                ans = rest + path[len(dir) + 1:]
                #print "doctor(%s) abbr:" % (path.rstrip(),), ans
                return ans
            rest += '../'
            last_dir = dir
            dir, ignore = os.path.split(dir)
        ans = rest + path
        #print "doctor(%s) abs:" % (path.rstrip(),), ans
        return ans
Beispiel #8
0
def parse_got_revision(gclient_output, got_revision_mapping):
  """Translate git gclient revision mapping to build properties."""
  properties = {}
  solutions_output = {
      # Make sure path always ends with a single slash.
      '%s/' % path.rstrip('/') : solution_output for path, solution_output
      in gclient_output['solutions'].iteritems()
  }
  for dir_name, property_name in got_revision_mapping.iteritems():
    # Make sure dir_name always ends with a single slash.
    dir_name = '%s/' % dir_name.rstrip('/')
    if dir_name not in solutions_output:
      continue
    solution_output = solutions_output[dir_name]
    if solution_output.get('scm') is None:
      # This is an ignored DEPS, so the output got_revision should be 'None'.
      git_revision = revision = commit_position = None
    else:
      # Since we are using .DEPS.git, everything had better be git.
      assert solution_output.get('scm') == 'git'
      git_revision = git('rev-parse', 'HEAD', cwd=dir_name).strip()
      revision = git_revision
      commit_position = get_commit_position(dir_name)

    properties[property_name] = revision
    if revision != git_revision:
      properties['%s_git' % property_name] = git_revision
    if commit_position:
      properties['%s_cp' % property_name] = commit_position

  return properties
Beispiel #9
0
 def newest_in(self,path, depth=0):
   if "with_hidden" in self.params.keys() and not self.params["with_hidden"] and "with_dirs" in self.params.keys() and not self.params["with_dirs"] and "with_files" in self.params.keys() and not self.params["with_files"]:
     raise Exception("Configuration Error, all entries disabled in configuration of %s"%self.backup.name)
   newest_path=None
   newest_time=None
   for e in os.listdir(path):
     if "with_hidden" in self.params.keys() and not self.params["with_hidden"] and fnmatch.fnmatch(e,".*"):
       continue
     if "with_dirs" in self.params.keys() and not self.params["with_dirs"] and os.path.isdir(e):
       continue
     if "with_files" in self.params.keys() and not self.params["with_files"] and os.path.isfile(e):
       continue
     try:
       entry_path="%s/%s"%(path.rstrip("/"),e)
       entry_time=os.path.getmtime(entry_path)
       if newest_time==None or entry_time>newest_time:
         newest_time=entry_time
         newest_path=entry_path
       if os.path.isdir(e) and depth>0:
         in_dir_path, in_dir_time = self.newest_in(e)
         if in_dir_time>newest_time:
           newest_time=in_dir_time
           newest_path=in_dir_path
     except:
       pass
   return (newest_path, newest_time)
Beispiel #10
0
    def getRequestStatus(self, requestName):
        """ check status for request :requestName:
    
    :param self: self reference
    :param str requestName: 
    """
        res = self.__locateRequest(requestName, assigned=True)
        if not res["OK"]:
            return res
        subRequestPaths = res["Value"]
        if not subRequestPaths:
            return S_ERROR("getRequestStatus: request '%s' not found" % requestName)
        ## figure out subrequests status
        result = list(set([path.rstrip(requestName).split("/")[-2] for path in subRequestPaths]))
        subRequestStatus = "Unknown"
        if "Empty" in result:
            subRequestStatus = "Empty"
        elif "Waiting" in result:
            subRequestStatus = "Waiting"
        elif "Assigned" in result:
            subRequestStatus = "Assigned"
        elif "Failed" in result:
            subRequestStatus = "Failed"
        elif "Done" in result:
            subRequestStatus = "Done"
        ## ...and same for request status
        if subRequestStatus in ("Waiting", "Assigned", "Unknown"):
            requestStatus = "Waiting"
        elif subRequestStatus in ("Empty", "Failed", "Done"):
            requestStatus = "Done"

        return S_OK({"RequestStatus": requestStatus, "SubRequestStatus": subRequestStatus})
    def upload(self, f=None, path=None):
        # f.seek(0, 0)
        h = {}
        for key in cherrypy.request.headers:
            h[key.lower()] = cherrypy.request.headers[key]

        formFields = FileFieldStorage(fp=cherrypy.request.rfile, headers=h, environ={'REQUEST_METHOD':'POST'}, keep_blank_values=True)
        if 'f' in formFields and 'path' in formFields:
            f = formFields['f']
            path = formFields.getvalue('path')
            search = re.search('------WebKitFormBoundary.{16}--\Z', path)
            if search != None:
                path = path.replace(search.group(), '')
            path = path.rstrip()
            if self.pathInSync(path) and self.pathExists(path):
                if hasattr(f.file, 'name'):
                    move(f.file.name, path)
                    os.chmod(path, 0776)
                    # os.remove(f.file.name)
                    return 'moved.'
            else:
                raise cherrypy.HTTPError(400, message="path is not valid")
        elif 'f' not in formFields:
            raise cherrypy.HTTPError(400, message="f was not found in your request")

        elif 'path' not in formFields:
            raise cherrypy.HTTPError(400, message="path was not found in your request")

        else:
            raise cherrypy.HTTPError(400, message="unknown error with upload")
 def serve_file(self, request):
     ''' Load files from static directory '''
     path = os.path.join(self.static, request.path.lstrip('/')).replace(os.sep, '/')
     if path.startswith(self.static): # extra check due ".." components
         if utils.get_resource_isdir(path):
             path = path.rstrip("/") + "/index.html"
         if utils.get_resource_exists(path):
             ext = path.rsplit(".", 1)[-1]
             if ext in self._forced_mimes:
                 mime = self._forced_mimes[ext]
             else:
                 mime, encoding = mimetypes.guess_type(path)
                 if mime is None:
                     mime = "application/octet-stream"
                 if encoding:
                     mime += "; " + encoding
             request.response_headers['Content-Type'] = mime
             fp = utils.get_resource_stream(path)
             chunk = fp.read(self.buffsize)
             if not config.DEBUG:
                 request.cache_for(3600) # an hour
             while chunk:
                 yield chunk
                 chunk = fp.read(self.buffsize)
         else:
             request.error(404)
     else:
         request.error(404)
Beispiel #13
0
    def _set_fileview_root(self, path, *, tabbed=False):
        """Set the root path for the file display."""
        separators = os.sep
        if os.altsep is not None:
            separators += os.altsep

        dirname = os.path.dirname(path)

        try:
            if not path:
                pass
            elif path in separators and os.path.isdir(path):
                # Input "/" -> don't strip anything
                pass
            elif path[-1] in separators and os.path.isdir(path):
                # Input like /foo/bar/ -> show /foo/bar/ contents
                path = path.rstrip(separators)
            elif os.path.isdir(dirname) and not tabbed:
                # Input like /foo/ba -> show /foo contents
                path = dirname
            else:
                return
        except OSError:
            log.prompt.exception("Failed to get directory information")
            return

        root = self._file_model.setRootPath(path)
        self._file_view.setRootIndex(root)
Beispiel #14
0
    def _fetch_directory_from_cache(self, path):
        self.directory_cache.expire()

        if path not in self.directory_cache:
            s3_path = u'{0}/'.format(path.rstrip('/')).lstrip('/')
            child_keys = self.bucket.list(prefix=s3_path, delimiter='/')

            # Add the child keys to the directory cache
            children = [ key_basename(k) for k in child_keys ]
            self.directory_cache.add(path, children)

            # Add the child keys to the attribute cache
            for key in child_keys:
                is_directory = isinstance(key, boto.s3.prefix.Prefix)
                child_path = u'/{0}'.format(key.name).rstrip('/')

                if is_directory:
                    mode = self.DIRECTORY_MODE
                    self.attribute_cache.add(child_path, mode)
                else:
                    mode = self.FILE_MODE
                    self.attribute_cache.add(child_path, mode, key)

        children = self.directory_cache.get(path)
        return children
Beispiel #15
0
    def translate_path(self, path):
        """
        Translate a /-separated PATH to the local filename syntax.

        This method is unelegantly 'borrowed' from SimpleHTTPServer.py to change
        the original so that it has the `path = self.server.static_path' line.
        """
        # abandon query parameters
        path = path.split('?',1)[0]
        path = path.split('#',1)[0]
        path = path[len(self.url_path):]
        # Don't forget explicit trailing slash when normalizing. Issue17324
        trailing_slash = path.rstrip().endswith('/')
        path = posixpath.normpath(urllib.parse.unquote(path))
        words = path.split('/')
        words = filter(None, words)
        # server content from static_path, instead of os.getcwd()
        path = self.local_path
        for word in words:
            drive, word = os.path.splitdrive(word)
            head, word = os.path.split(word)
            if word in (os.curdir, os.pardir): continue
            path = os.path.join(path, word)
        if trailing_slash:
            path += '/'
        return path
Beispiel #16
0
            def encodeUrl(orig):
                origUrl = urlparse.urlparse(orig)
                newPath = origUrl.path
                newQuery = urlparse.parse_qs(origUrl.query)

                # relative?
                if not origUrl.netloc:
                    newPath = urlparse.urljoin(path.rstrip("/") + "/", newPath.lstrip("/"))
                elif not orig.lower().startswith(portal_url.lower()):
                    # Not an internal URL - ignore
                    return orig

                newQuery['path'] = newPath
                newQuery['theme'] = theme
                if links:
                    newQuery['links'] = links
                if forms:
                    newQuery['forms'] = forms
                if title:
                    if isinstance(title, unicode):
                        newQuery['title'] = title.encode('utf-8', 'replace')
                    else:
                        newQuery['title'] = title

                return self.request.getURL() + '?' + urllib.urlencode(newQuery)
Beispiel #17
0
    def _insert_path(self, index, *, clicked=True):
        """Handle an element selection.

        Args:
            index: The QModelIndex of the selected element.
            clicked: Whether the element was clicked.
        """
        if index == QModelIndex():
            path = os.path.join(self._file_model.rootPath(), self._to_complete)
        else:
            path = os.path.normpath(self._file_model.filePath(index))

        if clicked:
            path += os.sep
        else:
            # On Windows, when we have C:\foo and tab over .., we get C:\
            path = path.rstrip(os.sep)

        log.prompt.debug('Inserting path {}'.format(path))
        self._lineedit.setText(path)
        self._lineedit.setFocus()
        self._set_fileview_root(path, tabbed=True)
        if clicked:
            # Avoid having a ..-subtree highlighted
            self._file_view.setCurrentIndex(QModelIndex())
Beispiel #18
0
    def __init__(self, data):
        super().__init__(data)
        try:
            parsed = json.loads(data)
        except ValueError:
            raise testprocess.InvalidLine(data)

        assert isinstance(parsed, dict)
        assert set(parsed.keys()) == {'path', 'verb', 'status'}

        self.verb = parsed['verb']

        path = parsed['path']
        self.path = '/' if path == '/' else path.rstrip('/')

        self.status = parsed['status']

        missing_paths = ['/favicon.ico', '/does-not-exist']

        if self.path in missing_paths:
            assert self.status == http.client.NOT_FOUND  # 404
        else:
            expected_http_statuses = [
                http.client.OK,  # 200
                http.client.FOUND,  # 302
                http.client.UNAUTHORIZED  # 401
            ]
            assert self.status in expected_http_statuses
Beispiel #19
0
Datei: job.py Projekt: Yelp/mrjob
    def _upload_attr(self, attr_name):
        """Helper for :py:meth:`archives`, :py:meth:`dirs`, and
        :py:meth:`files`"""
        attr_value = getattr(self, attr_name)

        # catch path instead of a list of paths
        if isinstance(attr_value, string_types):
            raise TypeError('%s must be a list or other sequence.' % attr_name)

        script_dir = os.path.dirname(self.mr_job_script())
        paths = []

        for path in attr_value:
            expanded_path = expand_path(path)

            if os.path.isabs(expanded_path):
                paths.append(path)
            else:
                # relative subdirs are confusing; people will expect them
                # to appear in a subdir, not the same directory as the script,
                # but Hadoop doesn't work that way
                if os.sep in path.rstrip(os.sep) and '#' not in path:
                    log.warning(
                        '%s: %s will appear in same directory as job script,'
                        ' not a subdirectory' % (attr_name, path))

                paths.append(os.path.join(script_dir, path))

        return paths
Beispiel #20
0
def get_wav_length() -> None:
    '''Outputs length of wav file on the console and
       in a text file if the user desires'''
    
    try:
        path = get_wav_path()
        file_with_ext = os.path.basename(path)
        filename = file_with_ext.rstrip('.wav')
        
        with wave.open(path,'r') as file:
            frame = file.getnframes()
            rate = file.getframerate()
            duration = frame / float(rate)

            print("\n{} was {} seconds long.\n".format(file_with_ext,
                                                   duration))

            if outputToFile():
                output_path = path.rstrip(file_with_ext) + filename + '-length.txt'
                output_length(output_path, duration)
                print("Successfully written duration in file {} in path {}!\n\nClosing {}...".format(filename + 'length.txt',
                                                                                                  os.path.dirname(output_path) ,
                                                                                                  filename + 'length.txt'))
                

            print("Closing {}...".format(file_with_ext))
                
        
    except PathDoesNotExistError:
        print("Path was not valid! File not found.")
        
    except NotWavFileError:
        print("Path was not a wav file! Exiting...")

    print("Exiting wavlength.py script...")
Beispiel #21
0
def parse_content_disposition(reply):
    """Parse a content_disposition header.

    Args:
        reply: The QNetworkReply to get a filename for.

    Return:
        A (is_inline, filename) tuple.
    """
    is_inline = True
    filename = None
    content_disposition_header = 'Content-Disposition'.encode('iso-8859-1')
    # First check if the Content-Disposition header has a filename
    # attribute.
    if reply.hasRawHeader(content_disposition_header):
        # We use the unsafe variant of the filename as we sanitize it via
        # os.path.basename later.
        try:
            value = bytes(reply.rawHeader(content_disposition_header))
            log.rfc6266.debug("Parsing Content-Disposition: {}".format(value))
            content_disposition = rfc6266.parse_headers(value)
            filename = content_disposition.filename()
        except (SyntaxError, UnicodeDecodeError, rfc6266.Error):
            log.rfc6266.exception("Error while parsing filename")
        else:
            is_inline = content_disposition.is_inline()
    # Then try to get filename from url
    if not filename:
        path = reply.url().path()
        if path is not None:
            filename = path.rstrip('/')
    # If that fails as well, use a fallback
    if not filename:
        filename = 'qutebrowser-download'
    return is_inline, os.path.basename(filename)
Beispiel #22
0
def _getBasePath(path):
    if os.path.exists(path):
        return path

    # Iterate up in the tree structure until we get an
    # existing path
    return _getBasePath(os.path.dirname(path.rstrip("/")))
Beispiel #23
0
    def get_dirname(self):
        "Calculate the directory name resulting from a successful checkout."
        p = urlparse.urlparse(self.repository)
        path = p[2]  # urlparse -> path

        dirname = path.rstrip("/").split("/")[-1]
        log_info("hg checkout dirname guessed as: %s" % (dirname,))
        return dirname
Beispiel #24
0
 def __init__(self, path, pattern):
     if path.startswith('/pnfs/desy.de/') and not os.path.exists(path):
         self.mode = 'dcap'
         self.path = path.rstrip('/') + '/'
     else:
         self.mode = 'local'
         self.path = os.path.realpath(path)
     self.pattern = pattern
Beispiel #25
0
 def _vcs_listdir(self, path, revision):
     output = self._u_invoke_client('manifest', '--rev', revision)
     files = output.splitlines()
     path = path.rstrip(os.path.sep) + os.path.sep
     descendent_files = [self._u_rel_path(f, path) for f in files
                         if f.startswith(path)]
     return sorted(set(
             f.split(os.path.sep, 1)[0] for f in descendent_files))
Beispiel #26
0
def _is_version(path):
    """Check if the path is a version."""
    name = os.path.basename(path.rstrip("/"))
    try:
        datetime.strptime(name, VERSION_DIR_FORMAT)
        return True
    except:
        pass
    return False
Beispiel #27
0
	def wrapper(callback):
		if path is None or not path.rstrip('/'):
			return route(path, method, **kargs)(callback)
		elif path.endswith('/'):
			return route(path[:-1], method, **kargs)(
					route(path, method, **kargs)(callback))
		else:
			return route(path, method, **kargs)(
					route(path+'/', method, **kargs)(callback))
    def _basename(self, path):
        '''
        A basename() variant which first strips the trailing slash,
        if present.

        Thus we always get the last component of the path, even for
        directories.
        '''
        return os.path.basename(path.rstrip(os.path.sep))
def coconuts_crumbs(path):
    # breadcrumbs
    crumbs = [ coconuts.models.Folder('') ]
    crumb_path = ''
    for bit in path.rstrip("/").split("/"):
        if bit:
            crumb_path = os.path.join(crumb_path, bit)
            crumbs.append(coconuts.models.Folder(crumb_path))
    return { 'crumbs': crumbs }
Beispiel #30
0
import glob
import os.path
import pathlib
import subprocess

import pytest

root_dir = os.path.relpath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
examples = sorted(glob.glob(os.path.join(root_dir, 'examples/*/')))
assert examples  # if empty, it is just the detection failed
examples = [path for path in examples if not glob.glob((os.path.join(path, 'test*.py')))]


@pytest.fixture(params=examples, ids=[os.path.basename(path.rstrip('/')) for path in examples])
def exampledir(request):
    return pathlib.Path(request.param)


@pytest.fixture(scope='session')
def peering_yaml():
    crd_api = os.environ.get('CRDAPI', 'v1')
    crd_file = 'peering.yaml' if crd_api == 'v1' else f'peering-{crd_api}.yaml'
    return f'{crd_file}'


@pytest.fixture(scope='session')
def crd_yaml():
    crd_api = os.environ.get('CRDAPI', 'v1')
    crd_file = 'crd.yaml' if crd_api == 'v1' else f'crd-{crd_api}.yaml'
    return f'examples/{crd_file}'
def macosx_engine_setup(config, test, taskdesc):
    mozharness = test['mozharness']

    installer_url = ARTIFACT_URL.format('<build>', mozharness['build-artifact-name'])
    test_packages_url = ARTIFACT_URL.format('<build>',
                                            'public/build/target.test_packages.json')
    mozharness_url = ARTIFACT_URL.format('<build>',
                                         'public/build/mozharness.zip')

    # for now we have only 10.10 machines
    taskdesc['worker-type'] = 'tc-worker-provisioner/gecko-t-osx-10-10'

    worker = taskdesc['worker'] = {}
    worker['implementation'] = test['worker-implementation']

    worker['artifacts'] = [{
        'name': prefix.rstrip('/'),
        'path': path.rstrip('/'),
        'type': 'directory',
    } for (prefix, path) in ARTIFACTS]

    worker['env'] = {
        'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
        'GECKO_HEAD_REV': config.params['head_rev'],
        'MOZHARNESS_CONFIG': ' '.join(mozharness['config']),
        'MOZHARNESS_SCRIPT': mozharness['script'],
        'MOZHARNESS_URL': {'task-reference': mozharness_url},
        'MOZILLA_BUILD_URL': {'task-reference': installer_url},
    }

    # assemble the command line

    worker['link'] = '{}/raw-file/{}/taskcluster/scripts/tester/test-macosx.sh'.format(
        config.params['head_repository'], config.params['head_rev']
    )

    command = worker['command'] = ["./test-macosx.sh"]
    if mozharness.get('no-read-buildbot-config'):
        command.append("--no-read-buildbot-config")
    command.extend([
        {"task-reference": "--installer-url=" + installer_url},
        {"task-reference": "--test-packages-url=" + test_packages_url},
    ])
    if mozharness.get('include-blob-upload-branch'):
        command.append('--blob-upload-branch=' + config.params['project'])
    command.extend(mozharness.get('extra-options', []))

    # TODO: remove the need for run['chunked']
    if mozharness.get('chunked') or test['chunks'] > 1:
        # Implement mozharness['chunking-args'], modifying command in place
        if mozharness['chunking-args'] == 'this-chunk':
            command.append('--total-chunk={}'.format(test['chunks']))
            command.append('--this-chunk={}'.format(test['this-chunk']))
        elif mozharness['chunking-args'] == 'test-suite-suffix':
            suffix = mozharness['chunk-suffix'].replace('<CHUNK>', str(test['this-chunk']))
            for i, c in enumerate(command):
                if isinstance(c, basestring) and c.startswith('--test-suite'):
                    command[i] += suffix

    if 'download-symbols' in mozharness:
        download_symbols = mozharness['download-symbols']
        download_symbols = {True: 'true', False: 'false'}.get(download_symbols, download_symbols)
        command.append('--download-symbols=' + download_symbols)
Beispiel #32
0
 def __init__(self, path):
     self.path = path.rstrip('/')
     if not os.path.exists(path):
         os.makedirs(path)
 def __cleanPath(self, path):
     return path.rstrip('/')
Beispiel #34
0
import subprocess

import pytest

root_dir = os.path.relpath(
    os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
examples = sorted(glob.glob(os.path.join(root_dir, 'examples/*/')))
assert examples  # if empty, it is just the detection failed
examples = [
    path for path in examples
    if not glob.glob((os.path.join(path, 'test*.py')))
]


@pytest.fixture(params=examples,
                ids=[os.path.basename(path.rstrip('/')) for path in examples])
def exampledir(request):
    return pathlib.Path(request.param)


@pytest.fixture(scope='session')
def peering_yaml():
    crd_api = os.environ.get('CRDAPI') or 'v1'
    crd_file = 'peering.yaml' if crd_api == 'v1' else f'peering-{crd_api}.yaml'
    return f'{crd_file}'


@pytest.fixture(scope='session')
def crd_yaml():
    crd_api = os.environ.get('CRDAPI') or 'v1'
    crd_file = 'crd.yaml' if crd_api == 'v1' else f'crd-{crd_api}.yaml'
def update_vhosts(vhosts):
    for vhost in vhosts:
        host = vhost.host
        port = vhost.port
        ip_addr = vhost.ip_addr
        domains = vhost.domains
        flags = vhost.flags

        location_tmpl = """
          location    %(path)s {
            proxy_pass  http://upstream_%(upstream)s%(upstream_path)s;
            proxy_http_version 1.1;
            %(redirect_rule)s
            proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
            proxy_set_header        Host            %(host)s;
            %(set_script_name)s
            proxy_set_header        X-Real-IP       $remote_addr;
            proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Port $server_port;
            %(misc)s
          }
    """
        location_tmpl_params = {
            'redirect_rule':
            'proxy_redirect   off;' if flags.get('disableRedirect') else ''
        }

        def render_location(location_dict):
            location_dict['host'] = location_dict.get('host', '$host')
            location_dict['set_script_name'] = location_dict.get(
                'set_script_name', '')
            location_dict['misc'] = location_dict.get('misc', '')
            location_dict['upstream_path'] = location_dict.get(
                'upstream_path', '')
            params = dict(location_dict.items() + location_tmpl_params.items())
            # print params
            return location_tmpl % params

        location_parameters = {
            'upstream': domains[0],
            'path': '/',
            'host': flags.get('forceHost', '$host'),
            'upstream_path': flags.get('upstream_path', '')
        }

        if 'htpasswd_file' in flags:
            location_parameters[
                'misc'] = 'auth_basic "Restricted"; auth_basic_user_file %s;' % (
                    flags['htpasswd_file'])

        if 'location_extra' in flags:
            location_parameters['misc'] = location_parameters[
                'misc'] if 'misc' in location_parameters else ''
            location_parameters['misc'] += flags['location_extra']

        location = render_location(location_parameters)

        location_ssl = location

        upstreams = [{
            'local_port': port,
            'local_address': ip_addr,
            'name': domains[0]
        }]

        if flags.get('sslToPort'):
            upstream_name = "%s_ssl " % domains[0]
            location_ssl = render_location({
                'upstream':
                upstream_name,
                'path':
                '/',
                'host':
                flags.get('forceHost', '$host')
            })
            upstreams.append({
                'local_port': flags.get('sslToPort'),
                'local_address': ip_addr,
                'name': upstream_name
            })

        if flags.get('httpsToHttpPaths'):
            for path in flags.get('httpsToHttpPaths').split(','):
                location_ssl += "\n" + render_location(
                    {
                        'upstream': domains[0],
                        'path': '/%s' % path,
                        'host': flags.get('forceHost', '$host')
                    })

        other_locations = [{
            'upstream': domains[0],
            'path': '@failover',
            'host': flags.get('forceHost', '$host')
        }]
        other_locations_https = []

        path_idx = 0
        for path, path_config in vhost.paths.items():
            upstream_name = "%s_%s " % (domains[0], path_idx)
            upstreams.append({
                'local_port':
                path_config['port'],
                'local_address':
                vm_map[path_config['host']]['local_address'],
                'name':
                upstream_name
            })

            if path_config['secure']:
                other_locations_https.append({
                    'upstream':
                    upstream_name,
                    'path':
                    '/%s' % path,
                    'misc':
                    '''
''',
                    'set_script_name':
                    ('proxy_set_header        SCRIPT_NAME     /%s;' %
                     path.rstrip('/'))
                    if path_config.get('setScriptName') else '',
                    'host':
                    flags.get('forceHost', '$host')
                })
            else:
                other_locations.append({
                    'upstream':
                    upstream_name,
                    'path':
                    '/%s' % path,
                    'misc':
                    '''
	    error_page 500 = @failover;
	    proxy_intercept_errors on;
''',
                    'set_script_name':
                    ('proxy_set_header        SCRIPT_NAME     /%s;' %
                     path.rstrip('/'))
                    if path_config.get('setScriptName') else '',
                    'host':
                    flags.get('forceHost', '$host')
                })

            path_idx += 1

        upstream_tmpl = 'upstream upstream_%(name)s { server %(local_address)s:%(local_port)s; }'

        rewrites = ''

        extra_directives = ''
        if flags.get('block_robots'):
            extra_directives += '''
            location = /robots.txt {
                alias /var/www/robots_deny.txt;
            }
            '''

        if flags.get('allow_robots'):
            extra_directives += '''
            location = /robots.txt {
                alias /var/www/robots_allow.txt;
            }
            '''

        if 'server_config_extra' in flags:
            extra_directives += flags['server_config_extra']

        if flags.get('aliases'):
            aliases = flags.get('aliases').split("\n")
            for alias in aliases:
                extra_directives += '''
            location /%s {
                alias %s;
            }
            ''' % tuple(alias.strip().split('->'))

        if vhost.rewrites:
            rewrites += vhost.rewrites

        location_http = location if flags.get(
            'allow_http') else 'return 301 https://$host$request_uri;'

        if flags.get('httpPaths'):
            for path in flags.get('httpPaths').split(','):
                location_http = "\n" + render_location(
                    {
                        'upstream': domains[0],
                        'path': '/%s' % path,
                        'host': flags.get('forceHost', '$host')
                    }
                ) + "\n" + '''                                                                                                                              location  / { return 301 https://$host$request_uri; }    
            '''

        format_args = {
            'upstreams':
            "\n".join([upstream_tmpl % up for up in upstreams]),
            'public_port':
            port,
            'other_locations':
            "\n".join([
                render_location(location_dict)
                for location_dict in other_locations
            ]),
            'other_locations_https':
            "\n".join([
                render_location(location_dict)
                for location_dict in other_locations_https
            ]),
            'extra_directives':
            extra_directives,
            'domain':
            domains[0],
            'server_names':
            ' '.join(domains)
            if not flags.get('rewriteDomains') else domains[0],
            'location':
            location_ssl,
            'rewrites':
            rewrites,
            'upload_limit':
            flags.get('uploadLimit', '20M'),
            'location_http':
            location_http,
            'cert_dir':
            CERT_DIR
        }

        config = """
        %(upstreams)s
        server {
          listen      80;
          server_name %(server_names)s;
          client_max_body_size %(upload_limit)s;

          %(rewrites)s

          %(location_http)s

          %(other_locations)s

          %(extra_directives)s
        }
        
    """ % format_args

        if not flags.get('noSsl'):
            config += """
        server {
          listen      443 ssl;
          server_name %(server_names)s;
          client_max_body_size %(upload_limit)s;
        
          ssl on;
          ssl_certificate     %(cert_dir)s/%(domain)s.cer;
          ssl_certificate_key %(cert_dir)s/%(domain)s.key;
          ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH:!CAMELLIA;
          ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
          ssl_prefer_server_ciphers on;
    
          %(location)s

          %(other_locations_https)s

          %(extra_directives)s
        }
    """ % format_args

        if flags.get('rewriteDomains'):
            for domain in domains[1:]:
                config += """
server {
        listen 80;
        server_name %(domain1)s;
        return 301 http://%(domain2)s$request_uri;
}
""" % {
                    'domain1': domain,
                    'domain2': domains[0]
                }

        f = open('%s/%s' % (nginx_sites_available, domains[0]), 'w')
        f.write(config)
        f.close()
    '''
Beispiel #36
0
def _basename(path):
    return os.path.basename(path.rstrip(os.path.sep))
Beispiel #37
0
    def process_request(self, req):
        parent_id = None
        parent_realm = req.args.get('realm')
        path = req.args.get('path')
        filename = None

        if not parent_realm or not path:
            raise HTTPBadRequest(_('Bad request'))
        if parent_realm == 'attachment':
            raise TracError(
                tag_("%(realm)s is not a valid parent realm",
                     realm=tag.code(parent_realm)))

        parent_realm = Resource(parent_realm)
        action = req.args.get('action', 'view')
        if action == 'new':
            parent_id = path.rstrip('/')
        else:
            last_slash = path.rfind('/')
            if last_slash == -1:
                parent_id, filename = path, ''
            else:
                parent_id, filename = path[:last_slash], path[last_slash + 1:]

        parent = parent_realm(id=parent_id)
        if not resource_exists(self.env, parent):
            raise ResourceNotFound(
                _("Parent resource %(parent)s doesn't exist",
                  parent=get_resource_name(self.env, parent)))

        # Link the attachment page to parent resource
        parent_name = get_resource_name(self.env, parent)
        parent_url = get_resource_url(self.env, parent, req.href)
        add_link(req, 'up', parent_url, parent_name)
        add_ctxtnav(req, _('Back to %(parent)s', parent=parent_name),
                    parent_url)

        if not filename:  # there's a trailing '/'
            if req.args.get('format') == 'zip':
                self._download_as_zip(req, parent)
            elif action != 'new':
                return self._render_list(req, parent)

        attachment = Attachment(self.env, parent.child(self.realm, filename))

        if req.method == 'POST':
            if action == 'new':
                data = self._do_save(req, attachment)
            elif action == 'delete':
                self._do_delete(req, attachment)
            else:
                raise HTTPBadRequest(_("Invalid request arguments."))
        elif action == 'delete':
            data = self._render_confirm_delete(req, attachment)
        elif action == 'new':
            data = self._render_form(req, attachment)
        else:
            data = self._render_view(req, attachment)

        add_stylesheet(req, 'common/css/code.css')
        return 'attachment.html', data
Beispiel #38
0
    def generate_collect_sp(self):
        """This function generates shell scripts to be used for collecting data from clients, router and sink. The
        collection can be conducted via IPv4 or IPv6, but IPv6 (6LoWPAN) is not suitable to transmit large files.
        This function will output two shell scripts: collect_ip6.sh and collect_ip4.sh The IP addresses (v4/v6) are
        taking from the sink server configuration file.
        """
        cwd = os.getcwd()
        head = """\
#!/bin/bash
#
#
# Collect data from clients, router and sink server.
#
#

        """
        create_dirs = """\

echo "Creating log directories."
mkdir -p logs/sink logs/router logs/clients

        """
        collect_sink = """\

echo "Collecting data from sink server."
cp -r *.log logs/sink/
cp -r exp logs/sink/

        """
        collect_router = Template("""\

echo "Collecting data from border router for interface $inf."
sshpass -p $password scp -o "StrictHostKeyChecking no" -r $user@\[$ip\]:$remote/$inf/$inf.log $local
sshpass -p $password scp -o "StrictHostKeyChecking no" -r $user@\[$ip\]:$remote/$inf/router.info $local

        """)
        collect_clients = Template("""\

echo "Collecting data from client on IP: $ip."
mkdir -p $local
sshpass -p $password scp -o "StrictHostKeyChecking no" -r $user@\[$ip\]:$remote $local

        """)
        # Create scripts
        os.chdir('scripts')
        # Create script section for collecting data from sink server and border router.
        script = head
        script += create_dirs
        script += collect_sink
        script = script + collect_router.substitute(
            inf='eth0',
            password=self.sink_config_items['PASSWORD'],
            user=self.sink_config_items['USER'],
            ip=self.sink_config_items['ROUTER_IP6'],
            remote=self.sink_config_items['ROUTER_WKD'].rstrip('/'),
            local='logs/router/')
        script = script + collect_router.substitute(
            inf='wpan0',
            password=self.sink_config_items['PASSWORD'],
            user=self.sink_config_items['USER'],
            ip=self.sink_config_items['ROUTER_IP6'],
            remote=self.sink_config_items['ROUTER_WKD'].rstrip('/'),
            local='logs/router/')
        script = script + collect_router.substitute(
            inf='lowpan0',
            password=self.sink_config_items['PASSWORD'],
            user=self.sink_config_items['USER'],
            ip=self.sink_config_items['ROUTER_IP6'],
            remote=self.sink_config_items['ROUTER_WKD'].rstrip('/'),
            local='logs/router/')
        script_share = script
        # Get remote client directory which contains log files.
        path = self.sink_config_items['CLIENT_SCRIPT']
        remote_dir = os.path.dirname(path.rstrip('/'))
        # Generate script for ipv4 addresses
        client_ip4 = self.sink_config_items['CLIENT_IP4'].split(',')
        if len(client_ip4) > 0:  # there are ipv4 addresses provided
            index = 1
            for client_ip in client_ip4:
                script = script + collect_clients.substitute(
                    ip=client_ip,
                    local='logs/clients/' + str(index),
                    remote=remote_dir + '/*.log',
                    password=self.sink_config_items['PASSWORD'],
                    user=self.sink_config_items['USER'])
                index += 1
            self.utl.write_file(script, 'collect_ip4.sh', 'w')
            self.utl.call('sudo chmod +x collect_ip4.sh', shell=True)
        # Generate script for ipv6 addresses
        client_ip6 = self.sink_config_items['CLIENT_IP6'].split(',')
        if len(client_ip6):  # there are ipv6 addresses provided
            index = 1
            script = script_share
            for client_ip in client_ip6:
                script = script + collect_clients.substitute(
                    ip=client_ip,
                    local='logs/clients/' + str(index),
                    remote=remote_dir + '/*.log',
                    password=self.sink_config_items['PASSWORD'],
                    user=self.sink_config_items['USER'])
            self.utl.write_file(script, 'collect_ip6.sh', 'w')
            self.utl.call('sudo chmod +x collect_ip6.sh', shell=True)
        os.chdir('..')
Beispiel #39
0
def main():
    """Main function for scPBAL janitor."""
    # Parse runtime args
    args = parse_runtime_arguments()

    # Set up the logger
    logging.basicConfig(format='%(levelname)s:%(message)s',
                        level=LOGLEVEL_DICT[args.loglevel])

    # Parse YAML config file in the same directory as this script
    logging.debug("loading YAML config file %s", CONFIG_PATH)

    script_directory_path = os.path.dirname(os.path.abspath(__file__))
    config_file_path = os.path.join(script_directory_path, CONFIG_PATH)

    with open(config_file_path, 'r') as yamlfile:
        config = yaml.safe_load(yamlfile)

    # Get a list of directories to process
    logging.debug("collecting directories to process")

    directory_paths = args.directories

    for directories_file in args.directories_files:
        logging.debug("collecting directories from file %s", directories_file)
        directory_paths += read_lines_from_file(directories_file)

    # Process each directory passed in
    for path in args.directories:
        logging.debug("processing %s", path)

        # Validate that the path is in fact a directory
        logging.debug("validating that %s is a directory", path)

        if not os.path.isdir(path):
            # Not a directory. Move on to next directory.
            logging.error("%s is not a directory!", path)
            continue

        # Process the directory name
        old_directory_name = os.path.basename(path.rstrip('/'))

        logging.debug("parsing directory name %s", old_directory_name)
        name_features = parse_directory_name(old_directory_name)

        if not name_features['id']:
            logging.error("failed to parse ID from directory %s",
                          old_directory_name)
            continue

        # Form the new path of the directory, which is, provided the
        # features exist, {id}_{date}_{extra}
        new_directory_name = name_features['id']

        if name_features['date']:
            new_directory_name += '_' + name_features['date']

        if name_features['extra']:
            new_directory_name += '_' + name_features['extra']

        try:
            new_directory_path = os.path.join(
                config['home_scpbal_directory'],
                new_directory_name,
            )
        except KeyError:
            logging.critical("'home_scpbal_directory' not defined in config")
            logging.critical("aborting now")
            sys.exit(1)

        # Import the directory
        if args.move:
            logging.debug("moving %s to %s", path, new_directory_path)
        else:
            logging.debug("copying %s to %s", path, new_directory_path)

        if os.path.exists(new_directory_path):
            # The path we want to copy to already exists!
            logging.error("%s already exists", new_directory_path)
            continue

        if not args.dry_run:
            import_directory(path, new_directory_path, args.move)
Beispiel #40
0
print('\nSTEP 6: Remove unwanted source cm_caf.xmls')
# Remove all cm_caf.xml files, which you can find in the list 'cm_caf'
for cm_caf_file in cm_caf:
    print('Removing ' + cm_caf_file)
    os.remove(cm_caf_file)

print('\nSTEP 7: Commit to Gerrit')
xml = minidom.parse('android/default.xml')
xml_extra = minidom.parse('extra_packages.xml')
items = xml.getElementsByTagName('project')
items += xml_extra.getElementsByTagName('project')
all_projects = []

for path in iter(proc.stdout.readline, ''):
    # Remove the \n at the end of each line
    path = path.rstrip()
    # Get project root dir from Crowdin's output
    m = re.search(
        '/(.*Superuser)/Superuser.*|/(.*LatinIME).*|/(frameworks/base).*|/(.*CMFileManager).*|/(device/.*/.*)/.*/res/values.*|/(hardware/.*/.*)/.*/res/values.*|/(.*)/res/values.*',
        path)
    for good_path in m.groups():
        # When a project has multiple translatable files, Crowdin will give duplicates.
        # We don't want that (useless empty commits), so we save each project in all_projects
        # and check if it's already in there.
        if good_path is not None and not good_path in all_projects:
            all_projects.append(good_path)
            for project_item in items:
                # We need to have the Github repository for the git push url.
                # Obtain them from android/default.xml or extra_packages.xml.
                if project_item.attributes['path'].value == good_path:
                    if project_item.hasAttribute('revision'):
Beispiel #41
0
def url_join(path, *args):
    return "/".join([path.rstrip("/")] + list(args))
Beispiel #42
0
def mktorrent(path,
              outfile,
              tracker=None,
              piecesize=2**18,
              private=True,
              magnet=False,
              source=None):
    """Main function, writes metainfo file, fixed piece size for now"""

    # Common dict items
    torrent = {}
    torrent['info'] = {}
    torrent['info']['piece length'] = piecesize
    torrent['info']['name'] = os.path.basename(path.rstrip(os.sep))

    if tracker:
        torrent['announce'] = tracker[0]

        if len(tracker) > 1:
            torrent['announce-list'] = [tracker]

        if private:
            torrent['info']['private'] = True

        if source is not None:
            torrent['info']['source'] = source

    # Single file case
    if os.path.isfile(path):

        torrent['info']['length'] = os.path.getsize(path)
        torrent['info']['pieces'] = makePieces([path], piecesize)

    # Multiple file case
    elif os.path.isdir(path):

        torrent['info']['files'] = []

        filelist = []
        for root, _, filenames in os.walk(path):
            for filename in filenames:
                filepath = os.path.join(root, filename)
                filelist.append(filepath)

                fileinfo = {
                    'length': os.path.getsize(filepath),
                    'path': filepath.split(os.sep)[1:]
                }  #Del root dir

                torrent['info']['files'].append(fileinfo)

        torrent['info']['pieces'] = makePieces(filelist, piecesize)

    # Write metainfo file
    with open(outfile, 'wb') as outpt:
        outpt.write(bencode.Bencode(torrent))

    # Print minimal magnet link if requested
    if magnet:
        link = 'magnet:?xt=urn:btih:'
        infohash = hashlib.sha1(bencode.Bencode(torrent['info'])).hexdigest()
        print(link + infohash)

    return 0
Beispiel #43
0
def add_url_path_component(path, component):
    return '%s/%s' % (path.rstrip('/'), component.lstrip('/'))
Beispiel #44
0
 def is_parent_selected(self):
     for path in self.paths:
         if os.path.split(path.rstrip("/"))[1] == "..":
             return True
     return False
Beispiel #45
0
from os import path
from os import listdir
import os

path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
path = path.rstrip() + '\\tools\pythontool'

for root, dirs, files in os.walk(path):
    for f in files:
        print(f)
        if os.path.splitext(f)[1] == '.py':
            os.system('python ' + root + '\\' + f)
Beispiel #46
0
def findFiles(
    paths,
    patterns=None,
    exclPatterns=None,
    dirPatterns=None,
    exclDirPatterns=None,
    recursionDepth=None,
    returnDirs=False,
    patWarn=False,
):
    """Search for files that match a given pattern, returning a list of unique paths.
    
    paths may include files and/or directories.
    - All matching directories in paths, and matching subdirectories of same (to the specified recursion depth)
        are searched for files. Matching directories are also included in the output list if returnDirs is true.
    - All matching files in paths or in searched directories are included in the output list.
    
    One use is to handle a list of files that has been dragged and dropped on an applet.

    Inputs:
    - paths: one or a sequence of paths; files are checked to see if they match
        the specified pattern and directories are searched if they don't exceed the recursion level
    - patterns: one or a sequence of inclusion patterns; each file name must match at least one of these;
        if None or [] then ["*"] is used.
        Patterns are matched using fnmatch, which does unix shell-style matching
        (* for any char sequence, ? for one char).
    - exclPatterns: one or a sequence of exclusion patterns; each file name must not match any of these
    - dirPatterns: one or a sequence of inclusion patterns; each directory name must match at least one of these;
        if None or [] then ["*"] is used.
    - exclDirPatterns: one or a sequence of exclusion patterns; each directory name must not match any of these
    - recursionDepth: recursion level; None or an integer n:
        None means infinite recursion
        n means go down n levels from the root path, for example:
        0 means don't even look inside directories in paths
        1 means look inside directories in paths but no deeper
    - returnDirs: include directories in the returned list?
    - patWarn: print to sys.stderr names of files and directories that don't match the pattern

    Returns a list of unique paths.
    
    Notes:
    - Pattern matching is applied to files and directories in the paths argument,
      as well as files and directories in subdirectories.
    - Duplicate paths are removed
    
    Pattern special characters are those for fnmatch:
    *       match any sequence of 0 or more characters
    ?       match any single character
    [seq]   matches any character in seq
    [!seq]  matches any character not in seq
    """
    # process the inputs
    paths = RO.SeqUtil.asSequence(paths)
    patterns = RO.SeqUtil.asSequence(patterns or "*")
    exclPatterns = RO.SeqUtil.asSequence(exclPatterns or ())
    dirPatterns = RO.SeqUtil.asSequence(dirPatterns or "*")
    exclDirPatterns = RO.SeqUtil.asSequence(exclDirPatterns or ())
    if recursionDepth is None:
        recursionDepth = _Inf()
    else:
        recursionDepth = int(recursionDepth)

    # perform the search
    foundPathList = []
    for path in paths:
        if os.path.isfile(path):
            if _nameMatch(path, patterns, exclPatterns):
                foundPathList.append(path)
            elif patWarn:
                sys.stderr.write("Skipping file %r: no pattern match\n" %
                                 (path, ))
        elif os.path.isdir(path):
            strippedPath = path.rstrip(os.path.sep)
            baseLevel = strippedPath.count(os.path.sep)
            if _nameMatch(path, dirPatterns, exclDirPatterns):
                if returnDirs:
                    foundPathList.append(path)
                for root, dirs, files in os.walk(path):
                    if root == ".":
                        root = ""
                    newDirs = []
                    subLevel = root.count(os.path.sep)
                    if recursionDepth is not None and subLevel - baseLevel >= recursionDepth:
                        del dirs[:]
                    else:
                        for d in dirs:
                            dPath = os.path.join(root, d)
                            if _nameMatch(d, dirPatterns, exclDirPatterns):
                                newDirs.append(d)
                                if returnDirs:
                                    foundPathList.append(dPath)
                            elif patWarn:
                                sys.stderr.write(
                                    "Skipping dir %r: no pattern match\n" %
                                    (dPath, ))
                        if len(dirs) > len(newDirs):
                            dirs[:] = newDirs

                    for f in files:
                        fPath = os.path.join(root, f)
                        if _nameMatch(f, patterns, exclPatterns):
                            foundPathList.append(fPath)
                        elif patWarn:
                            sys.stderr.write(
                                "Skipping file %r: no pattern match\n" %
                                (fPath, ))
            elif patWarn:
                sys.stderr.write("Skipping dir %r: no pattern match\n" %
                                 (path, ))
        elif not os.path.exists(path):
            sys.stderr.write("Warning: file does not exist: %s\n" % path)
        else:
            sys.stderr.write("Skipping non-file, non-directory: %s\n" % path)

    return removeDupPaths(foundPathList)
    def _get_directory(self, path, content=True) -> Union[dict, None]:
        """Get a dictionary model or none.

        See the get method for parameter and return type details."""
        data = self._directories.find_one({'path': path})
        if data is None:
            return None

        model = {
            'name': os.path.basename(data['path']),
            'path': self.denormalize_path(data['path']),
            'mimetype': None,
            'type': 'directory',
            'writable': True,
            'created': data['created'],
            'last_modified': data['last_modified'],
            'content': None,
            'format': None,
        }
        if not content:
            return model

        # get content
        # this is fun! :)
        # directories are easier than files

        # children: list of content-less models
        children: List[dict] = []
        # match_regex: regex to match exactly one level past the path
        match_regex = '^' + re.escape(path.rstrip('/') + '/') + r'[^\/]+$'

        subdirectories = self._directories.find(
            {'path': {
                '$regex': match_regex
            }})
        for subdirectory in subdirectories:
            print(subdirectory['path'])
            children.append({
                'name': os.path.basename(subdirectory['path']),
                'path': self.denormalize_path(subdirectory['path']),
                'type': 'directory',
                'created': subdirectory['created'],
                'last_modified': subdirectory['last_modified'],
                'mimetype': None,
                'format': 'json',
                'content': None,
            })

        # description of pipeline:
        # $match: is pretty obvious
        # $sort: sorts individual files by their uploadedDate (newest first)
        # $group: groups documents together
        #   - _id: $filename groups by filename
        #   - item is the directory or file in the returned document ($first:
        #     specifies that we want the first matching document (i.e. newest)
        #     and $$ROOT means that we want the entire document
        # $project: we only care about the file we found in the grouping stage
        pipeline = [
            {
                '$match': {
                    'filename': {
                        '$regex': match_regex
                    }
                }
            },
            {
                '$sort': {
                    'uploadedDate': -1
                }
            },
            {
                '$group': {
                    '_id': '$filename',
                    'file': {
                        '$first': '$$ROOT'
                    }
                }
            },
            {
                '$project': {
                    'file': 1,
                    '_id': 0
                }
            },
        ]
        files_cursor = self._files_metadata.aggregate(pipeline)
        for document in files_cursor:
            # note: since file is a document, not a GridOut, we can't use
            # .attribute, we have to use ['attribute']
            file = document['file']
            metadata = file['metadata']
            print(file['filename'])
            if ('deleted' in metadata and metadata['deleted'] is True):
                continue
            children.append({
                'name': os.path.basename(file['filename']),
                'path': self.denormalize_path(file['filename']),
                'type': metadata['type'],
                'created': metadata['created'],
                'last_modified': metadata['last_modified'],
                'mimetype': metadata['mimetype'],
                'format': metadata['format'],
                'content': None,
            })
        children.sort(key=lambda i: i['name'])
        model['content'] = children
        model['format'] = 'json'
        return model
Beispiel #48
0
 def strip_trailing_slash(path):
     return path.rstrip("/")
Beispiel #49
0
 def format_path(self, path):
     return "%s%s" % (path.rstrip("/"), "/")
Beispiel #50
0
 def get(self, path):
     new_path = path.rstrip('/') or '/'
     self.redirect(new_path)
 def _vcs_listdir(self, path, revision):
     output = self._u_invoke_client('manifest', '--rev', revision)
     files = output.splitlines()
     path = path.rstrip(os.path.sep) + os.path.sep
     return [self._u_rel_path(f, path) for f in files if f.startswith(path)]
Beispiel #52
0
                        action="store_true",
                        default=False,
                        help="Make images public")
    parser.add_argument("--family",
                        dest="family",
                        action="store_true",
                        default=False,
                        help="Make images visible to family")
    parser.add_argument("--friends",
                        dest="friends",
                        action="store_true",
                        default=False,
                        help="Make images visible to friends")

    args = parser.parse_args()
    args.path = [path.rstrip('/') for path in args.path]
    action = args.action[0]

    if action == "sync" and args.delete:
        print "The --delete-missing option makes no sense for the `sync` action"
        sys.exit(0)

    for path in args.path:
        if not os.path.exists(path):
            print "Error! `%s` does not exist" % path
            sys.exit(0)

    uploadr = Uploadr(KEY, SECRET, args.path[0])
    for path in args.path:
        sync(path, uploadr, action, args.delete, args.public, args.family,
             args.friends, args.really)
Beispiel #53
0
def cpp_to_o(path):
    return path.rstrip('cpp').replace('/', '') + 'o'
Beispiel #54
0
 def listdir(self, path):
     return self.dir_index[path.rstrip("/")]
    def get_timeline_events(self, req, start, stop, filters):
        show_files_events = 'files_events' in filters
        show_downloads_events = 'files_downloads_events' in filters
        if not show_files_events and not show_downloads_events:
            return
        can_view_files = 'FILES_VIEW' in req.perm
        can_view_downloads = 'FILES_DOWNLOADS_VIEW' in req.perm
        if not can_view_files and not can_view_downloads:
            return

        type_to_can_view = {
            'download': can_view_downloads,
            'normal': can_view_files
        }
        # Get webdav events
        for event in self._get_events(
                start, stop, show_downloads_events and not show_files_events):
            # Return event.
            # Filter out empty parts at the same time.
            filename = os.path.basename(event['to'])
            method_parts = event['method'].split(':')

            from_path = event['from']
            path = event['to']
            if not path:
                continue
            if len(method_parts) == 3:
                method, download_str, dir_or_file = method_parts

            else:
                # Handling for old-style db data
                method, download_str, dir_or_file = (method_parts[0], 'normal',
                                                     '.' in filename and 'file'
                                                     or 'dir')
                if method == 'MOVE' or method == 'COPY':
                    download_str = 'normal-normal'
                from_path = from_path.rstrip('/') if from_path else ''
                path = path.rstrip('/')

            event_time = event['time']
            author = event['author']
            try:
                if method == 'MOVE':
                    result = self.get_move_event(download_str, dir_or_file,
                                                 type_to_can_view,
                                                 show_downloads_events,
                                                 show_files_events, filename,
                                                 path, from_path)
                    if not result:
                        continue
                    path, event_class, title, description = result
                    yield (event_class, event_time, author,
                           (title, description, path))
                if method == 'COPY':
                    result = self.get_copy_event(download_str, dir_or_file,
                                                 type_to_can_view,
                                                 show_downloads_events,
                                                 show_files_events, filename,
                                                 path, from_path)
                    if not result:
                        continue
                    path, event_class, title, description = result
                    yield (event_class, event_time, author,
                           (title, description, path))
                elif method == 'PUT':
                    result = self.get_put_event(download_str, dir_or_file,
                                                type_to_can_view,
                                                show_downloads_events,
                                                show_files_events, filename,
                                                path)
                    if not result:
                        continue
                    event_class, title, description = result
                    yield (event_class, event_time, author,
                           (title, description, path))
                elif method == 'DELETE':
                    result = self.get_delete_event(req, download_str,
                                                   dir_or_file,
                                                   type_to_can_view,
                                                   show_downloads_events,
                                                   show_files_events, filename,
                                                   path)
                    if not result:
                        continue
                    event_class, title, description = result
                    path = ''
                    yield (event_class, event_time, author,
                           (title, description, path))
            except TracError as e:
                self.log.warning("Invalid Files event data: %s" % (event, ))
Beispiel #56
0
def mounted(path):
    ''' Return True iff path mounted. '''

    return path.rstrip('/') in mountpoints()
Beispiel #57
0
 def __init__(self, path, skip_before=0):
     self._skip_before = datetime.datetime.utcfromtimestamp(skip_before)
     self._path = path.rstrip('/')
Beispiel #58
0
 def folder(self, path="/Shared", **kwargs):
     """Get a Folder object for the specified path"""
     return resources.Folder(self, path=path.rstrip('/'), **kwargs)
Beispiel #59
0
 def isdir(self, path):
     return path.rstrip("/") in self.dir_index