def get_calls(sources, destination):
    result = []
    options, destination = split_destination(destination)
    url = urlparse.urlsplit(destination)
    if url[0] in ('scp', ''):
        netloc, path = url[1], url[2]
        assert path.startswith('/')
        path = path[1:]
        result.append(['scp'] + sources + ['%s:%s' % (netloc, path)])
    if url[0] in ('http', 'https'):
        if destination.endswith('/'):
            destination = destination[:-1]
        default_params = ['curl']
        default_params.extend(options)
        default_params.extend(['-X', 'PUT', '--data-binary'])
        default_params = tuple(default_params)
        for source in sources:
            source_name = os.path.basename(source)
            result.append(
                list(default_params +
                     ('@' + source, '%s/%s' % (destination, source_name))))
    if url[0] in ('sftp', ):
        netloc, path = url[1], url[2]
        assert path.startswith('/')
        for source in sources:
            result.append(
                ['echo', '"put %s"' % source, '|', 'sftp',
                    '-b', '-', "%s:%s" % (netloc, path)])
    return result
Beispiel #2
0
 def resolve(self, path):
     if path.startswith("file://"):
         return File(path[7:])
     elif path.startswith(("http://", "https://")):
         return URL(path)
     else:
         return File(path)
Beispiel #3
0
    def __call__(self, environ, start_response):
        path = environ['PATH_INFO'].strip('/') or 'index.html'

        if path.startswith('static/') or path == 'index.html':
            try:
                data = open(path).read()
                lmt = os.path.getmtime(path)
            except Exception:
                return not_found(start_response)
            headers = [('Last-Modified', lmt)]
            if path.endswith(".js"):
                content_type = "text/javascript"
            elif path.endswith(".css"):
                content_type = "text/css"
            elif path.endswith(".jpg"):
                content_type = "image/jpeg"
                headers += [('Cache-Control', 'max-age=86400')]
            elif path.endswith(".png"):
                content_type = "image/png"
                headers += [('Cache-Control', 'max-age=86400')]
            else:
                content_type = "text/html"

            lmt = time.strftime("%a, %d %b %Y %H:%M:%S +0545", time.gmtime(lmt))
            start_response('200 OK', headers + [('Content-Type', content_type)])
            return [data]

        if path.startswith("socket.io"):
            socketio_manage(environ, {'/cpu': CPUNamespace})
        else:
            return not_found(start_response)
Beispiel #4
0
 def _handle_results(self):
     if self._worktree_watch is not None:
         for _, path in self._worktree_watch.read():
             if not self._running:
                 break
             if self._force_notify:
                 continue
             path = self._worktree + '/' + self._transform_path(path)
             if (path != self._git_dir
                     and not path.startswith(self._git_dir + '/')
                     and not os.path.isdir(path)):
                 if self._use_check_ignore:
                     self._file_paths.add(path)
                 else:
                     self._force_notify = True
     for _, path in self._git_dir_watch.read():
         if not self._running:
             break
         if self._force_notify:
             continue
         path = self._transform_path(path)
         if path.endswith('.lock'):
             continue
         if path == 'config':
             self._force_config = True
             continue
         if (path == 'head'
                 or path == 'index'
                 or path.startswith('refs/')):
             self._force_notify = True
Beispiel #5
0
def __check_for_path(path):
    """
    This function checks a string for full file path identifiers.
    """
    return path.startswith("/") or path.startswith("$") or \
           path.startswith(".") or path.startswith("~") or \
           path[0].isalpha()
Beispiel #6
0
    def doctor(link_dir, path):
        # Don't mess with paths that just refer to another link:
        if path.rstrip()[-1] == '_': return path

        path = path.lstrip()

        # Don't mess with paths that point somewhere in the outside universe:
        if path.startswith('http://'): return ' ' + path

        # Prepend link_dir to path
        if link_dir.startswith('./'): path = link_dir[2:] + '/' + path
        elif link_dir != '.': path = link_dir + '/' + path

        # Prepare dir (start_dir, minus initial './')
        if start_dir == '.': dir = ''
        elif start_dir.startswith('./'): dir = start_dir[2:]
        else: dir = start_dir

        rest=' '
        last_dir = None
        while dir and dir != last_dir:
            if path.startswith(dir + '/'):
                ans = rest + path[len(dir) + 1:]
                #print "doctor(%s) abbr:" % (path.rstrip(),), ans
                return ans
            rest += '../'
            last_dir = dir
            dir, ignore = os.path.split(dir)
        ans = rest + path
        #print "doctor(%s) abs:" % (path.rstrip(),), ans
        return ans
Beispiel #7
0
def normalize_xpath(path):
    if path.startswith('//'):
        return '.' + path # avoid warnings
    elif path.startswith('.//'):
        return path
    else:
        raise InvalidCheck('Non-absolute XPath is not supported due to implementation issues')
Beispiel #8
0
def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
    """Copy (local) recipe files, including both files included via include/require,
    and files referred to in the SRC_URI variable."""
    import bb.fetch2
    import oe.path

    # FIXME need a warning if the unexpanded SRC_URI value contains variable references

    uris = (d.getVar("SRC_URI", True) or "").split()
    fetch = bb.fetch2.Fetch(uris, d)
    if download:
        fetch.download()

    # Copy local files to target directory and gather any remote files
    bb_dir = os.path.dirname(d.getVar("FILE", True)) + os.sep
    remotes = []
    includes = [
        path for path in d.getVar("BBINCLUDED", True).split() if path.startswith(bb_dir) and os.path.exists(path)
    ]
    for path in fetch.localpaths() + includes:
        # Only import files that are under the meta directory
        if path.startswith(bb_dir):
            if not whole_dir:
                relpath = os.path.relpath(path, bb_dir)
                subdir = os.path.join(tgt_dir, os.path.dirname(relpath))
                if not os.path.exists(subdir):
                    os.makedirs(subdir)
                shutil.copy2(path, os.path.join(tgt_dir, relpath))
        else:
            remotes.append(path)
    # Simply copy whole meta dir, if requested
    if whole_dir:
        shutil.copytree(bb_dir, tgt_dir)

    return remotes
Beispiel #9
0
def get_absolute_path(path, abspath_mode = ABSPATH_IMAGE):
    """Convert a path into an absolute path using the path conventions
    
    If a path starts with http:, https: or ftp:, leave it unchanged.
    If a path starts with "./", then make the path relative to the
    Default Output Folder.
    If a path starts with "&/", then make the path relative to the
    Default Input Folder.
    If a "path" has no path component then make the path relative to
    the Default Output Folder.
    """
    if abspath_mode == ABSPATH_OUTPUT:
        osep = '.'
        isep = '&'
    elif abspath_mode == ABSPATH_IMAGE:
        osep = '&'
        isep = '.'
    else:
        raise ValueError("Unknown abspath mode: %s"%abspath_mode)
    if is_url_path(path):
        return path
    if (path.startswith(osep+os.path.sep) or
        ("altsep" in os.path.__all__ and os.path.altsep and
         path.startswith(osep+os.path.altsep))):
        return os.path.join(get_default_output_directory(), path[2:])
    elif (path.startswith(isep+os.path.sep) or
          ("altsep" in os.path.__all__ and os.path.altsep and
           path.startswith(isep+os.path.altsep))):
        return os.path.join(get_default_image_directory(), path[2:])
    elif len(os.path.split(path)[0]) == 0:
        return os.path.join(get_default_output_directory(), path)
    else:
        return str(get_proper_case_filename(os.path.abspath(path)))
Beispiel #10
0
 def _cut_prefix(self, path):
     assert path.startswith(self.prefix), \
             'Path passed to _cut_prefix does not start with prefix'
     path = path[len(self.prefix):]
     if path.startswith('/'):
         path = path[1:]
     return path
 def _full_path(self, path):
     if path.startswith('~'):
         return os.path.expanduser(path)
     elif path.startswith('/'):
         return path
     else:
         return os.path.join(self.plan_base_path, path)
Beispiel #12
0
 def _path(self, path):
     if path.startswith('sandbox://'):
         path = path.replace('sandbox://', 'temp://xbmcup/' + self._sandbox + '/')
     special = [x for x in self._special if path.startswith(x)]
     if special:
         return os.path.normpath(xbmc.translatePath(path.replace(special[0], 'special://' + special[0].replace(':/', ''))))
     return os.path.normpath(path)
Beispiel #13
0
def app(environ, start_response):
    def make_response(body, status='200 OK', headers=(),
                      content_type='text/html; charset=UTF-8'):
        start_response(status, [
            ('Content-Type', content_type),
            ('Content-Length', str(len(body))),
        ] + list(headers))
        return [body]

    path = environ['PATH_INFO']

    if path == '/favicon.ico':
        with open(FAVICON, 'rb') as fd:
            return make_response(fd.read(), content_type='image/x-icon')

    elif path.startswith('/pdf/') and len(path) > 5: # len('/pdf/') == 5
        url = normalize_url(path[5:], environ.get('QUERY_STRING'))
        body = HTML(url=url).write_pdf(stylesheets=[STYLESHEET])
        filename = url.rstrip('/').rsplit('/', 1)[-1] or 'out'
        return make_response(body, content_type='application/pdf',
            headers=[('Content-Disposition',
                      'attachement; filename=%s.pdf' % filename)])

    elif path.startswith('/view/'):
        url = normalize_url(path[6:], environ.get('QUERY_STRING'))
        return make_response(render_template(url))

    elif path == '/':
        args = parse_qs(environ.get('QUERY_STRING') or '')
        url = normalize_url(args.get('url', [''])[0])
        return make_response(render_template(url))

    return make_response(b'<h1>Not Found</h1>', status='404 Not Found')
    def get_config(self, path):
        if path.startswith("http://"):
            # Do 3 trials to get the kick start
            # TODO: make sure the installer run after network is up
            for x in range(0,3):
                err_msg = ""
                try:
                    response = requests.get(path, timeout=3)
                    if response.ok:
                        return json.loads(response.text)
                    err_msg = response.text
                except Exception as e:
                    err_msg = e
                print >> sys.stderr, "Failed to get the kickstart file at {0}, error msg: {1}".format(path, err_msg)
                print "Failed to get the kickstart file at {0}, retry in a second".format(path)
                time.sleep(1)


            # Something went wrong
            print "Failed to get the kickstart file at {0}, exiting the installer, check the logs for more details".format(path)
            raise Exception(err_msg)
        else:
            if path.startswith("cdrom:/"):
                self.mount_RPMS_cd()
                path = os.path.join(self.cd_path, path.replace("cdrom:/", "", 1))
            return (JsonWrapper(path)).read();
Beispiel #15
0
 def handle_content(self, creation_method, *args):
     mimetype = self.get_attribute("mimetype",
                                   self.DEFAULTS["content@mimetype"])
     url = self.get_attribute("url", "")
     if url and not self.standalone_xml:
         purl = urlparse(url)
         scheme, netloc, path = purl[:3]
         if scheme == '' and netloc == '':
             if path.startswith("../"):
                 url = path[3:] # make URL relative to package URI
             elif not path.startswith("/"):
                 url = "packaged:/%s" % path
     model = self.get_attribute("model", "")
     encoding = self.get_attribute("encoding", "")
     elt = creation_method(*args + (mimetype, "", url))
     self.do_or_postpone(model, elt._set_content_model)
     elem = self.complete_current()
     if len(elem):
         raise ParserError("no XML tag allowed in content; use &lt;tag>")
     data = elem.text
     if url and data and data.strip():
         raise ParserError("content can not have both url (%s) and data" %
                           url)
     elif data:
         if encoding:
             if encoding == "base64":
                 data = base64.decodestring(data)
             else:
                 raise ParserError("encoding %s is not supported", encoding)
         elt.enter_no_event_section()
         try:
             elt.content_data = data
         finally:
             elt.exit_no_event_section()
     return elt
Beispiel #16
0
 def _get_rel_path(self, path):
     assert os.path.isabs(path)
     if path.startswith(self._build_root):
         return os.path.relpath(path, self._build_root)
     if path.startswith(self._source_root):
         return os.path.relpath(path, self._source_root)
     raise ValueError("path not under build nor source tree: {0}".format(path))
Beispiel #17
0
    def get_abs_links(self, url, base_depth):
        if self.base:
            url = self.base
        full_urls = []
        root = urlparse(url)
        root_dir = os.path.split(root.path)[0]
        for link in self.links:
            parsed = urlparse(link)
            if not parsed.netloc: # does it have no protocol or host, i.e relative
                if parsed.path.startswith("/"):
                    parsed = root[0:2] + parsed[2:5] + (None,)
                else:
                    dir = root_dir
                    path = parsed.path
                    while True:
                        if path.startswith("../"):
                            path=path[3:]
                            dir=os.path.split(dir)[0]
                        elif path.startswith("./"):
                            path=path[2:]
                        else:
                            break

                    parsed = root[0:2] + (os.path.join(dir, path),) + parsed[3:5] + (None,)
                new_link = urlunparse(parsed)
                logging.debug("relative %s -> %s"%(link, new_link))
                link=new_link

            else:
                logging.debug("absolute %s"%link)
            full_urls.append(link)
        return [Link(link, base_depth+1) for link in full_urls]
 def _fix_path(self, path):
     """ Translates special prefixes. """
     if path.startswith(HOME_DIRECTORY):
         path = os.path.join(self.home_dir, path[len(HOME_DIRECTORY) :])
     elif path.startswith(WORKING_DIRECTORY):
         path = os.path.join(self.work_dir, path[len(WORKING_DIRECTORY) :])
     return path
Beispiel #19
0
    def parse(self, path):
        u''' source 和 publish 路径一一对应 '''

        path = os.path.realpath(path)

        if path.startswith(self.source_path):
            return path, None

        elif self.publish_path and path.startswith(self.publish_path):
            package_path = path[len(self.publish_path) + 1:]

            if os.path.splitext(path)[1] == '.css':
                # xxx-all-min.css --> xxx
                package_path = os.path.splitext(package_path)[0].split('-')
                if len(package_path) < 3: return (None, None)
                name = '-'.join(package_path[:-2])
                mode = package_path[-2]
                source = os.path.join(self.source_path, name + '.css')
            else:
                source = os.path.join(self.source_path, package_path)
                mode = None

            return source, mode

        # 有可能是lib下的
        else:
            return None, None
    def __call__(self, environ, start_response):
        path = environ['PATH_INFO'].strip('/') or 'index.html'

        if path.startswith('/static') or path == 'index.html':
            try:
                data = open(path).read()
            except Exception:
                return not_found(start_response)

            if path.endswith(".js"):
                content_type = "text/javascript"
            elif path.endswith(".css"):
                content_type = "text/css"
            elif path.endswith(".swf"):
                content_type = "application/x-shockwave-flash"
            else:
                content_type = "text/html"

            start_response('200 OK', [('Content-Type', content_type)])
            return [data]
        if path.startswith("socket.io"):
            environ['scan_ts'] = self.scan_ts
            environ['scan_interval'] = self.scan_interval
            cur_ts = datetime.utcnow()
            socketio_manage(environ, {'/services': ServicesNamespace,
                                      '/sysinfo': SysinfoNamespace,
                                      '/cpu-widget': CPUWidgetNamespace,
                                      '/memory-widget': MemoryWidgetNamespace,
                                      '/network-widget': NetworkWidgetNamespace,
                                      '/disk-widget': DisksWidgetNamespace,
                                      '/logmanager': LogManagerNamespace,
                                      '/pincardmanager': PincardManagerNamespace
            })
            if ((cur_ts - self.scan_ts).total_seconds() > self.scan_interval):
                self.scan_ts = cur_ts
Beispiel #21
0
    def manage_content(self, xelt, required=True):
        content_tag = self.tag_template % "content"
        content_xelt = xelt.find(content_tag)
        if not required and content_xelt is None:
            return [ "x-advene/none", "", "" ], None, None

        mimetype = content_xelt.get("mimetype",
                                    self.DEFAULTS["content@mimetype"])
        url = content_xelt.get("url", "")
        if url and not self.standalone_xml:
            purl = urlparse(url)
            scheme, netloc, path = purl[:3]
            if scheme == '' and netloc == '':
                if path.startswith("../"):
                    url = path[3:] # make URL relative to package URI
                elif not path.startswith("/"):
                    url = "packaged:/%s" % path
        args = [mimetype , "", url]
        content_model = content_xelt.get("model", "")
        encoding = content_xelt.get("encoding", "")
        if len(content_xelt):
            raise ParserError("%s: for XML contents, use &lt;tag> or CDATA"
                              % xelt.attrib["id"])
        data = content_xelt.text
        if url and data and data.strip():
            raise ParserError("%s: content has both url (%s) and data" %
                              (xelt.attrib["id"], url))
        elif data:
            if encoding:
                if encoding == "base64":
                    data = base64.decodestring(data)
                else:
                    raise ParserError("encoding %s is not supported", encoding)
        return args, content_model, data
Beispiel #22
0
    def __call__(self, environment, start_response):
        """Handle a WSGI request. This is the entry point for the WSGI
        application.

        Any request paths with the prefix /media/ will be treated as static
        files and served from the media directory specified when constructing
        the class. This is suitable for the simple Python server included with
        the application, but if you are setting up a production server you
        probably want to let the server handle the static files rather than
        making a WSGI request for them.

        :param environment: The WSGI environment containing the request.
        :type environment: dictionary
        :param start_response: The WSGI function to start a response.
        :type start_response: function

        """
        # Get the path and query string.
        path = environment['PATH_INFO']

        # Strong motion events.
        if path.startswith('/events'):
            return self.serve_events(path[8:], start_response)

        # Static media files.
        if path.startswith('/media/'):
            return self.serve_media(path[7:], start_response)

        # No idea what they were after.
        start_response(STATUS_CODE[404], [])
        return ('File not found',)
Beispiel #23
0
def list_site_packages_paths():
    site_packages_paths = set([site.USER_SITE])
    try:
        site_packages_paths.update(site.getsitepackages())
    except AttributeError:
        pass
    try:
        user_site = site.getusersitepackages()
        if isinstance(user_site, str):
            site_packages_paths.add(user_site)
        else:
            site_packages_paths.update(user_site)
    except AttributeError:
        pass
    try:
        virtualenv_path = os.environ['VIRTUAL_ENV']
    except KeyError:
        pass
    else:
        virtualenv_src_path = os.path.join(virtualenv_path, 'src')
        site_packages_paths.update(
            path
            for path in sys.path
            if path.startswith(virtualenv_path) and (
                'site-packages' in path or
                path.startswith(virtualenv_src_path)
            )
        )
    return site_packages_paths
Beispiel #24
0
 def inpath(self, path):
   if path.startswith('@'):
     return self.project.genpath(path[1:])
   elif path.startswith('//'):
     return self.project.inpath(path[2:])
   else:
     return self.project.inpath(self.relpath, path)
Beispiel #25
0
def path2uri(path):
    r"""
    Converts a path to URI with file sheme.

    If a path does not start with a slash (/), it is considered to be an invalid
    path and returned directly.

    >>> path2uri('/path/to/file')
    'file:///path/to/file'
    >>> path2uri('file:///path/to/file')
    'file:///path/to/file'
    >>> path2uri(u'/path/to/file')
    'file:///path/to/file'
    >>> path2uri('invalid/path')
    'invalid/path'
    >>> path2uri('/\xe8\xb7\xaf\xe5\xbe\x84/\xe6\x96\x87\xe4\xbb\xb6')
    'file:///%E8%B7%AF%E5%BE%84/%E6%96%87%E4%BB%B6'
    """
    if path.startswith('~'):
        path = os.path.expanduser(path)
    if not path.startswith('/'):
        return path
    if isinstance(path, unicode):
        path = path.encode('utf8')
    return 'file://' + urllib.pathname2url(path)
Beispiel #26
0
def extract_docker_layer(img: tarfile.TarFile,
                         layer_id: str,
                         extract_path: str):
    with tarfile.open(fileobj=img.extractfile('%s/layer.tar' % layer_id),
                      errorlevel=0,
                      dereference=True) as layer:

        layer.extractall(path=extract_path)

        log.debug('processing whiteouts')
        for member in layer.getmembers():
            path = member.path
            if path.startswith('.wh.') or '/.wh.' in path:
                if path.startswith('.wh.'):
                    newpath = path[4:]
                else:
                    newpath = path.replace('/.wh.', '/')

                try:
                    log.debug('removing path %s', newpath)
                    os.unlink(path)
                    os.unlink(newpath)
                except OSError as err:
                    if err.errno != errno.ENOENT:
                        raise
Beispiel #27
0
def sendpkm():

    print 'Note: you must exit the GTS before sending a pkm'
    print '4th Gen Pokemon files are currently unsupported.'
    print 'Enter the path or drag the pkm file here'
    print '(Type Back to go back)'

    while True:
        path = raw_input().strip()

        if path == "Back" or path == "back": return
               
        path = os.path.normpath(path)
        if system() != 'Windows':
            path = path.replace('\\', '')

        if path.startswith('"') or path.startswith("'"):
            path = path[1:]
        if path.endswith('"') or path.endswith("'"):
            path = path[:-1]
        if os.path.exists(path) and path.lower().endswith('.pkm'): break
        else:
            print 'Invalid file name, try again'
            continue
        
    sendingpkm(path)
Beispiel #28
0
def multisend():

    print 'Note: you must exit the GTS before sending each Pokemon'
    print '4th Gen Pokemon files are currently unsupported.\n'
    print 'Enter the path or drag the pkm file here, then\npress Enter, and enter another path. Finish by typing\nDone then press Enter.'
    print '(Type Back to go back)'

    multi = list()

    while True:
        path = raw_input().strip()

        if path == "Back" or path == "back": return
        
        path = os.path.normpath(path)
        if system() != 'Windows':
            path = path.replace('\\', '')

        if path == 'done' or path == 'Done':
            multisender(multi)
            break

        if path.startswith('"') or path.startswith("'"):
            path = path[1:]
        if path.endswith('"') or path.endswith("'"):
            path = path[:-1]
        if os.path.exists(path) and path.lower().endswith('.pkm'):
            multi.append(path)
        else:
            print 'Invalid file name, try again'
            continue
Beispiel #29
0
def isRemovable(path):

    if path.startswith('/dev/mapper') or path.startswith('/dev/dm-') or path.startswith('dm-'):
        return False 

    if path.startswith("/dev/"):
        dev = re.match("/dev/(.*)", path).group(1)
    else:
        dev = path
        
    dev = dev.replace("/", "!")

    if dev.startswith("xvd"):
        is_cdrom = False
        f = None
        try:
            f = open(path, 'r')
            if fcntl.ioctl(f, CDROM.CDROM_GET_CAPABILITY) == 0:
                is_cdrom = True
        except: # Any exception implies this is not a CDROM
            pass

        if f is not None:
            f.close()

        if is_cdrom:
            return True

    if os.path.exists("/sys/block/%s/removable" % dev):
        return int(__readOneLineFile__("/sys/block/%s/removable" % dev)) == 1
    else:
        return False
Beispiel #30
0
    def get_config(self, path):
        """kick start configuration"""
        if path.startswith("http://"):
            # Do 5 trials to get the kick start
            # TODO: make sure the installer run after network is up
            ks_file_error = "Failed to get the kickstart file at {0}".format(path)
            wait = 1
            for _ in range(0, 5):
                err_msg = ""
                try:
                    response = requests.get(path, timeout=3)
                    if response.ok:
                        return json.loads(response.text)
                    err_msg = response.text
                except Exception as e:
                    err_msg = e

                modules.commons.log(modules.commons.LOG_ERROR,
                                    ks_file_error)
                modules.commons.log(modules.commons.LOG_ERROR,
                                    "error msg: {0}".format(err_msg))
                print(ks_file_error)
                print("retry in a second")
                time.sleep(wait)
                wait = wait * 2

            # Something went wrong
            print(ks_file_error)
            print("exiting the installer, check the logs for more details")
            raise Exception(err_msg)
        else:
            if path.startswith("cdrom:/"):
                self.mount_cd()
                path = os.path.join(self.cd_path, path.replace("cdrom:/", "", 1))
            return (JsonWrapper(path)).read()
Beispiel #31
0
def check_file(path, fix_guards=False):
    """Check whether the file has a correct header guard.

    A header guard can either be a #pragma once, or else a matching set of
        #ifndef PATH_TO_FILE_
        #define PATH_TO_FILE_
        ...
        #endif  // PATH_TO_FILE_
    preprocessor directives, where both '.' and '/' in the path are
    mapped to '_', and a trailing '_' is appended.

    In either the #pragma once case or the header guard case, it is
    assumed that there is no trailing or leading whitespace.

    """

    # Only check .h files
    if path[-2:] != '.h':
        return True

    assert(path.startswith(FUCHSIA_ROOT))
    relative_path = path[len(FUCHSIA_ROOT):].strip('/')
    upper_path = relative_path.upper()
    header_guard = re.sub(disallowed_header_characters, '_', upper_path) + '_'
    header_guard = adjust_for_layer(header_guard)
    all_header_guards[header_guard].append(path)

    ifndef = re.compile('^#ifndef %s$' % header_guard)
    define = re.compile('^#define %s$' % header_guard)
    endif = re.compile('^#endif +// *%s$' % header_guard)

    found_pragma_once = False
    found_ifndef = False
    found_define = False
    found_endif = False

    with open(path, 'r') as f:
        for line in f.readlines():
            match = pragma_once.match(line)
            if match:
                if found_pragma_once:
                    print('%s contains multiple #pragma once' % path)
                    return False
                found_pragma_once = True

            match = ifndef.match(line)
            if match:
                if found_ifndef:
                    print('%s contains multiple ifndef header guards' % path)
                    return False
                found_ifndef = True

            match = define.match(line)
            if match:
                if found_define:
                    print('%s contains multiple define header guards' % path)
                    return False
                found_define = True

            match = endif.match(line)
            if match:
                if found_endif:
                    print('%s contains multiple endif header guards' % path)
                    return False
                found_endif = True

    if found_pragma_once:
        if found_ifndef or found_define or found_endif:
            print('%s contains both #pragma once and header guards' % path)
            return False
        if not fix_guards:
            return True

    if found_ifndef and found_define and found_endif:
        return True

    if not found_ifndef:
      print('%s did not contain ifndef part of its header guard' % path)
    elif not found_define:
      print('%s did not contain define part of its header guard' % path)
    elif not found_endif:
      print('%s did not contain endif part of its header guard' % path)
    elif fix_guards:
        if found_pragma_once:
            print('%s contained #pragma once instead of a header guard' % path)
        else:
            print('%s did not contain a header guard or the header guard did '
                  'not match the file path' % path)
    else:
        print('%s contained neither a proper header guard nor #pragma once' %
              path)

    header_guards_fixed = False
    if fix_guards:
        header_guards_fixed = fix_header_guard(path, header_guard)

    if not header_guards_fixed:
        print('Allowable header guard values are %s' % all_header_guards.keys());

    return False
Beispiel #32
0
 def install(self, path, callback):
     if path.startswith(Modules.LIBRARY_ROOT):
         Downloader.download(
             path, lambda t, result: self._on_install(t, result, callback))
     else:
         self._on_install('success', path, callback)
Beispiel #33
0
 def chdir(self, path):
     if (path.startswith("/")):
         self.workdir = ch.Path(path)
     else:
         self.workdir //= path
Beispiel #34
0
 def normalizeReturnPath(self, path):
     if path.endswith('/'):
         path = path[:-1]
     if not path.startswith('/'):
         path = '/' + path
     return path
Beispiel #35
0
 def map(self, path):
     for prefix, replacement in self._map:
         if path.startswith(prefix):
             return path.replace(prefix, replacement, 1)
     return path
Beispiel #36
0
    def update_zone_from_path(self, name):
        filename = os.path.basename(name)
        path = os.path.dirname(name)

        if not os.path.exists(name):
            # removed file

            if path.startswith(config.ETC_FIREWALLD_ZONES):
                # removed custom zone
                for x in self._zones.keys():
                    obj = self._zones[x]
                    if obj.filename == filename:
                        del self._zones[x]
                        if obj.name in self._builtin_zones:
                            return ("update", self._builtin_zones[obj.name])
                        return ("remove", obj)
            else:
                # removed builtin zone
                for x in self._builtin_zones.keys():
                    obj = self._builtin_zones[x]
                    if obj.filename == filename:
                        del self._builtin_zones[x]
                        if obj.name not in self._zones:
                            # update dbus zone
                            return ("remove", obj)
                        else:
                            # builtin hidden, no update needed
                            return (None, None)

            # zone not known to firewalld, yet (timeout, ..)
            return (None, None)

        # new or updated file

        log.debug1("Loading zone file '%s'", name)
        try:
            obj = zone_reader(filename, path)
        except Exception as msg:
            log.error("Failed to load zone file '%s': %s", filename, msg)
            return (None, None)

        obj.fw_config = self

        if path.startswith(config.ETC_FIREWALLD_ZONES) and \
           len(path) > len(config.ETC_FIREWALLD_ZONES):
            # custom combined zone part
            obj.name = "%s/%s" % (os.path.basename(path),
                                  os.path.basename(filename)[0:-4])

        # new zone
        if obj.name not in self._builtin_zones and obj.name not in self._zones:
            self.add_zone(obj)
            return ("new", obj)

        # updated zone
        if path.startswith(config.ETC_FIREWALLD_ZONES):
            # custom zone update
            if obj.name in self._zones:
                obj.default = self._zones[obj.name].default
                self._zones[obj.name] = obj
            return ("update", obj)
        else:
            if obj.name in self._builtin_zones:
                # builtin zone update
                del self._builtin_zones[obj.name]
                self._builtin_zones[obj.name] = obj

                if obj.name not in self._zones:
                    # update dbus zone
                    return ("update", obj)
                else:
                    # builtin hidden, no update needed
                    return (None, None)

        # zone not known to firewalld, yet (timeout, ..)
        return (None, None)
Beispiel #37
0
def omit_path(path):
    return any(path.startswith(hidden_path) for hidden_path in hidden_paths)
Beispiel #38
0
def gen_python_mapping_file(mapping_path):
    # These headers are guaranteed to be included by Python.h. See
    # https://docs.python.org/3/c-api/intro.html#include-files.
    IMPLIED_HEADERS = (
        "<assert.h>",
        "<errno.h>",
        "<limits.h>",
        "<stdio.h>",
        "<stdlib.h>",
        "<string.h>",
    )

    include = sysconfig.get_path("include")
    platinclude = sysconfig.get_path("platinclude")

    with open(mapping_path + ".tmp",
              "w") as imp, tempfile.TemporaryDirectory() as tmpdir:
        imp.write("[\n")
        for header in IMPLIED_HEADERS:
            imp.write(
                f'  {{"include": ["{header}", "public", "<Python.h>", "public"]}},\n'
            )

        build_dir = os.path.join(tmpdir, "build")
        os.mkdir(build_dir)
        source = os.path.join(build_dir, "python.c")
        with open(source, "w") as f:
            f.write("#include <Python.h>")

        commands = [{
            "arguments": [
                "clang",
                "-I",
                include,
                "-I",
                platinclude,
                "-c",
                "python.c",
            ],
            "directory":
            build_dir,
            "file":
            "python.c",
        }]
        with open(os.path.join(build_dir, "compile_commands.json"), "w") as f:
            json.dump(commands, f)

        symbols_dir = os.path.join(tmpdir, "find_all_symbols")
        os.mkdir(symbols_dir)
        subprocess.check_call([
            "find-all-symbols",
            "-p=" + build_dir,
            "--output-dir=" + symbols_dir,
            source,
        ])

        find_all_symbols_db = os.path.join(tmpdir, "find_all_symbols_db.yaml")
        subprocess.check_call([
            "find-all-symbols",
            "-p=" + build_dir,
            "--merge-dir=" + symbols_dir,
            find_all_symbols_db,
        ])

        with open(find_all_symbols_db, "r") as f:
            for document in yaml.safe_load_all(f):
                name = document["Name"]
                path = document["FilePath"]
                if path.startswith(include + "/"):
                    header = path[len(include) + 1:]
                elif path.startswith(platinclude + "/"):
                    header = path[len(platinclude) + 1:]
                else:
                    continue
                if header == "pyconfig.h":
                    # Probably best not to use these.
                    continue
                imp.write(
                    f'  {{"symbol": ["{name}", "private", "<Python.h>", "public"]}},  # From {header}\n'
                )
        # "cpython/object.h" defines struct _typeobject { ... } PyTypeObject.
        # For some reason, include-what-you-mean wants struct _typeobject, but
        # find-all-symbols only reports PyTypeObject. Add it manually.
        imp.write(
            '  {"symbol": ["_typeobject", "private", "<Python.h>", "public"]},  # From cpython/object.h\n'
        )

        imp.write("]\n")

    os.rename(mapping_path + ".tmp", mapping_path)
Beispiel #39
0
 def exclude_fnc(path):
     return path == self.exclude \
         or path.startswith(self.exclude + os.sep)
Beispiel #40
0
def clean_path(path):
    if path.startswith("dist/"):
        path = path[len("dist/"):]
    return path
Beispiel #41
0
def _mode(path: str) -> typing.Literal["ab", "wb"]:
    """Extract the writing mode (overwrite or append) from a path spec"""
    if path.startswith("+"):
        return "ab"
    else:
        return "wb"
Beispiel #42
0
    def finish(defaultTargets=DEFAULT_TARGETS,
               subDirList=None,
               ignoreRegex=None):
        """Convenience function to replace standard SConstruct boilerplate
        (step 2).

        This function:

        - Sets up installation paths.
        - Tells SCons to only do MD5 checks when timestamps have changed.
        - Sets the "include", "lib", "python", and "tests" targets as the
          defaults to be built when scons is run with no target arguments.

        Parameters
        ----------
        subDirList : `list`
            An explicit list of subdirectories that should be installed.
            By default, all non-hidden subdirectories will be installed.
        defaultTargets : `list`
            A sequence of targets (see `lsst.sconsUtils.state.targets`)
            that should be built when scons is run with no arguments.
        ignoreRegex : `str`
            Regular expression that matches files that should not be installed.

        Returns
        -------
        env : `lsst.sconsUtils.env`
            A SCons Environment.
        """
        if ignoreRegex is None:
            ignoreRegex = r"(~$|\.pyc$|^\.svn$|\.o|\.os$)"
        if subDirList is None:
            subDirList = []
            for path in os.listdir("."):
                if os.path.isdir(path) and not path.startswith("."):
                    subDirList.append(path)
        install = state.env.InstallLSST(state.env["prefix"],
                                        [subDir for subDir in subDirList],
                                        ignoreRegex=ignoreRegex)
        for name, target in state.targets.items():
            state.env.Requires(install, target)
            state.env.Alias(name, target)
        state.env.Requires(state.targets["python"], state.targets["version"])
        declarer = state.env.Declare()
        state.env.Requires(
            declarer,
            install)  # Ensure declaration fires after installation available

        # shebang should be in the list if bin.src exists but the location
        # matters so we can not append it afterwards.
        state.env.Default([
            t for t in defaultTargets if os.path.exists(t) or (
                t == "shebang" and os.path.exists("bin.src"))
        ])
        if "version" in state.targets:
            state.env.Default(state.targets["version"])
        state.env.Requires(state.targets["tests"], state.targets["version"])
        state.env.Decider(
            "MD5-timestamp"
        )  # if timestamps haven't changed, don't do MD5 checks
        #
        # Check if any of the tests failed by looking for *.failed files.
        # Perform this test just before scons exits
        #
        # N.b. the test is written in sh not python as then we can use @ to
        # suppress output
        #
        if "tests" in [str(t) for t in BUILD_TARGETS]:
            testsDir = pipes.quote(os.path.join(os.getcwd(), "tests",
                                                ".tests"))
            checkTestStatus_command = state.env.Command(
                'checkTestStatus', [], """
                @ if [ -d {0} ]; then \
                      nfail=`find {0} -name "*.failed" | wc -l | sed -e 's/ //g'`; \
                      if [ $$nfail -gt 0 ]; then \
                          echo "Failed test output:" >&2; \
                          for f in `find {0} -name "*.failed"`; do \
                              case "$$f" in \
                              *.xml.failed) \
                                echo "Global pytest output is in $$f" >&2; \
                                ;; \
                              *.failed) \
                                cat $$f >&2; \
                                ;; \
                              esac; \
                          done; \
                          echo "The following tests failed:" >&2;\
                          find {0} -name "*.failed" >&2; \
                          echo "$$nfail tests failed" >&2; exit 1; \
                      fi; \
                  fi; \
            """.format(testsDir))

            state.env.Depends(checkTestStatus_command,
                              BUILD_TARGETS)  # this is why the check runs last
            BUILD_TARGETS.extend(checkTestStatus_command)
            state.env.AlwaysBuild(checkTestStatus_command)
Beispiel #43
0
def _path(path: str) -> str:
    """Extract the path from a path spec (which may have an extra "+" at the front)"""
    if path.startswith("+"):
        path = path[1:]
    return os.path.expanduser(path)
Beispiel #44
0
 def translate_path(self, path):
     if path.startswith(self.node_dir):
         return self.main_dir + path
     else:
         return SimpleHTTPRequestHandler.translate_path(self, path)
Beispiel #45
0
def _trim_abs_path(path):
    if path.startswith('/'):
        path = path[1:]
    if path.startswith('./'):
        path = path[2:]
    return path
Beispiel #46
0
def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False,
                    sigs=False, sigsdiff=False, exclude_path=None):
    repo = git.Repo(repopath)
    assert repo.bare == False
    commit = repo.commit(revision1)
    diff = commit.diff(revision2)

    changes = []

    if sigs or sigsdiff:
        for d in diff.iter_change_type('M'):
            if d.a_blob.path == 'siglist.txt':
                changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff))
        return changes

    for d in diff.iter_change_type('M'):
        path = os.path.dirname(d.a_blob.path)
        if path.startswith('packages/'):
            filename = os.path.basename(d.a_blob.path)
            if filename == 'latest':
                changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
            elif filename.startswith('latest.'):
                chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
                changes.append(chg)
        elif path.startswith('images/'):
            filename = os.path.basename(d.a_blob.path)
            if filename in img_monitor_files:
                if filename == 'files-in-image.txt':
                    alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
                    blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
                    filechanges = compare_file_lists(alines,blines)
                    if filechanges:
                        chg = ChangeRecord(path, filename, None, None, True)
                        chg.filechanges = filechanges
                        changes.append(chg)
                elif filename == 'installed-package-names.txt':
                    alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
                    blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
                    filechanges = compare_lists(alines,blines)
                    if filechanges:
                        chg = ChangeRecord(path, filename, None, None, True)
                        chg.filechanges = filechanges
                        changes.append(chg)
                else:
                    chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
                    changes.append(chg)
            elif filename == 'image-info.txt':
                changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
            elif '/image-files/' in path:
                chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
                changes.append(chg)

    # Look for added preinst/postinst/prerm/postrm
    # (without reporting newly added recipes)
    addedpkgs = []
    addedchanges = []
    for d in diff.iter_change_type('A'):
        path = os.path.dirname(d.b_blob.path)
        if path.startswith('packages/'):
            filename = os.path.basename(d.b_blob.path)
            if filename == 'latest':
                addedpkgs.append(path)
            elif filename.startswith('latest.'):
                chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read().decode('utf-8'), True)
                addedchanges.append(chg)
    for chg in addedchanges:
        found = False
        for pkg in addedpkgs:
            if chg.path.startswith(pkg):
                found = True
                break
        if not found:
            changes.append(chg)

    # Look for cleared preinst/postinst/prerm/postrm
    for d in diff.iter_change_type('D'):
        path = os.path.dirname(d.a_blob.path)
        if path.startswith('packages/'):
            filename = os.path.basename(d.a_blob.path)
            if filename != 'latest' and filename.startswith('latest.'):
                chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
                changes.append(chg)

    # Link related changes
    for chg in changes:
        if chg.monitored:
            for chg2 in changes:
                # (Check dirname in the case of fields from recipe info files)
                if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
                    if chg2.fieldname in related_fields.get(chg.fieldname, []):
                        chg.related.append(chg2)
                    elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
                        chg.related.append(chg2)

    # filter out unwanted paths
    if exclude_path:
        for chg in changes:
            if chg.filechanges:
                fchgs = []
                for fchg in chg.filechanges:
                    for epath in exclude_path:
                        if fchg.path.startswith(epath):
                           break
                    else:
                        fchgs.append(fchg)
                chg.filechanges = fchgs

    if report_all:
        return changes
    else:
        return [chg for chg in changes if chg.monitored]
 def recognize(self, path):
     if self.styles:
         path_styles = path.style.split(';')
         for s in self.styles:
             if not s in path_styles: return None
     original_path = path
     result = ""
     if self.force_horizontal:
         original_angle = 0.0
     else:
         original_angle = None
     previous_position = None
     while len(path.points):
         found = False
         idx = path.commands[:path.commands.find('Z')]
         if idx in self.database:
             for value, compare_path, alternatives in self.database[idx]:
                 startswith = path.startswith(compare_path, tolerance=self.tolerance, min_scale=self.min_scale, max_scale=self.max_scale)
                 if startswith:
                     angle = startswith
                     if original_angle != None:
                         diff_angle = abs(angle - original_angle) 
                         if diff_angle > math.pi:
                             diff_angle = abs(2*math.pi - diff_angle)
                         if (diff_angle * 180 / math.pi) > self.angle_tolerance_deg:
                             # Ce caractère est reconu mais pas avec le bon angle, on passe 
                             continue
                     else:
                         # Le premier caractère du path déterminera l'angle du mot
                         # PB: traiter les alternatives (par exemple un mot qui commence par u OU n il faut considérer les deux possibilitées,
                         # qui peuvent ếtre déterminer par l'angle.
                         # Au lieux d'analyser toutes les alternatives, on vérifie que la positions du point suivant dans le path sera bien
                         # en avant par rapport au caractère considéré courant.
                         # FIXME: il faudrait mieux analyser toutes les alternatives possibles et renvoyer la liste de celle qui on reconnu tout le path
                         positions = projections_points(angle,  path.points[:len(compare_path.points)])
                         if len(path.points) > len(compare_path.points):
                             mean_cur_position = sum(positions)/len(positions)
                             next_point_position = projection_point(angle, path.points[len(compare_path.points)])
                             if next_point_position < mean_cur_position:
                                 #sys.stdout.write((u"caractère rejeté: " + value + "\n").encode("utf-8"))
                                 # Le caractère suivant serait dérrière, on a pas du choisir le bon angle, c'est à dire 
                                 # le bon caractère à reconnaître, on continue pour en chercher un autre:
                                 continue
                         original_angle = angle
                         #result = result + "angle(%.2f)" % (original_angle*180/math.pi)
                     if len(alternatives):
                         # Il y a des alternatives pour ce caractère, on vas utiliser le rapport_l2_l1 pour les
                         # départager
                         # NOTE: cela est fait en pratique uniquement pour distinguer le l minuscule du I majuscule
                         cur_rapport_l2_sur_l1 = rapport_l2_sur_l1(path)
                         compare_raport_l1_l2 = rapport_l2_sur_l1(compare_path)
                         for alt_value, alt_path in alternatives:
                             alt_rapport_l2_sur_l1 = rapport_l2_sur_l1(alt_path)
                             if abs(cur_rapport_l2_sur_l1-alt_rapport_l2_sur_l1) < abs(cur_rapport_l2_sur_l1-compare_raport_l1_l2):
                                 value = alt_value
                                 compare_raport_l1_l2 = alt_rapport_l2_sur_l1
                     # Calcule de la position des points por déterminer si il y a un espace
                     positions = projections_points(original_angle,  path.points[:len(compare_path.points)])
                     if previous_position != None:
                         distance = min(positions) - previous_position
                     else:
                         distance = 0
                     previous_position = max(positions)
                     #result = result + "loc[%.2f,%.2f] pos[%.2f .. %.2f]" % (path.points[0][0], path.points[0][1], min(positions), max(positions))
                     #result = result + (" distance(%.2f)" % distance)
                     if distance > self.space_width:
                         result = result + " "
                     result = result + value 
                     #result = result + ("(%.1f)" % (angle*180/math.pi))
                     # Maintenant on traite la suite du path:
                     path = Path(
                         path.commands[len(compare_path.commands):],
                         path.points[len(compare_path.points):])
                     found = True
                     break;
         if not found:
             break
     if result:
         if len(path.points):
             # On a pas tout reconnu
             result += "???"
         position = original_path.bbox().center()
         return result, position, original_angle
     else:
         return None
Beispiel #48
0
 def _find_lock(self):
     for path in os.listdir(self.config_path("inprogress_path")):
         if (path.startswith(self.username + ":")
                 and path.endswith(".ttyrec")):
             return os.path.join(self.config_path("inprogress_path"), path)
     return None
    def should_exclude(self, path):
        for exclude_path in self.exclude_paths:
            if path.startswith(exclude_path):
                return True

        return False
Beispiel #50
0
def expand_path(path, basedir):
    path = os.path.expanduser(path)
    path = os.path.expandvars(path)
    if not path.startswith('/'):
        path = os.path.join(basedir, path)
    return path
Beispiel #51
0
 def chdir(self, path):
     if (path.startswith("/")):
         self.workdir = path
     else:
         self.workdir += "/" + path
Beispiel #52
0
def _classify_files(install_paths, package_data, package_prefixes, py_modules,
                    new_py_modules, scripts, new_scripts, data_files,
                    cmake_source_dir, cmake_install_dir):
    assert not os.path.isabs(cmake_source_dir)
    assert cmake_source_dir != "."

    cmake_source_dir = to_unix_path(cmake_source_dir)

    install_root = os.path.join(os.getcwd(), CMAKE_INSTALL_DIR)
    for path in install_paths:
        found_package = False
        found_module = False
        found_script = False

        # if this installed file is not within the project root, complain and
        # exit
        if not to_platform_path(path).startswith(CMAKE_INSTALL_DIR):
            raise SKBuildError(
                ("\n  CMake-installed files must be within the project root.\n"
                 "    Project Root  : {}\n"
                 "    Violating File: {}\n").format(install_root,
                                                    to_platform_path(path)))

        # peel off the 'skbuild' prefix
        path = to_unix_path(os.path.relpath(path, CMAKE_INSTALL_DIR))

        # If the CMake project lives in a sub-directory (e.g src), its
        # include rules are relative to it. If the project is not already
        # installed in a directory, we need to prepend
        # the source directory so that the remaining of the logic
        # can successfully check if the path belongs to a package or
        # if it is a module.
        # TODO(jc) Instead of blindly checking if cmake_install_dir is set
        #          or not, a more elaborated check should be done.
        if (not cmake_install_dir and cmake_source_dir
                and not path.startswith(cmake_source_dir)):
            path = to_unix_path(os.path.join(cmake_source_dir, path))

        # check to see if path is part of a package
        for prefix, package in package_prefixes:
            if path.startswith(prefix + "/"):
                # peel off the package prefix
                path = to_unix_path(os.path.relpath(path, prefix))

                package_file_list = package_data.get(package, [])
                package_file_list.append(path)
                package_data[package] = package_file_list

                found_package = True
                break

        if found_package:
            continue
        # If control reaches this point, then this installed file is not part of
        # a package.

        # check if path is a module
        for module in py_modules:
            if path.replace("/", ".") == ".".join((module, "py")):
                new_py_modules[module] = True
                found_module = True
                break

        if found_module:
            continue
        # If control reaches this point, then this installed file is not a
        # module

        # if the file is a script, mark the corresponding script
        for script in scripts:
            if path == script:
                new_scripts[script] = True
                found_script = True
                break

        if found_script:
            continue
        # If control reaches this point, then this installed file is not a
        # script

        # If control reaches this point, then we have installed files that are
        # not part of a package, not a module, nor a script.  Without any other
        # information, we can only treat it as a generic data file.
        parent_dir = os.path.dirname(path)
        file_set = data_files.get(parent_dir)
        if file_set is None:
            file_set = set()
            data_files[parent_dir] = file_set
        file_set.add(os.path.join(CMAKE_INSTALL_DIR, path))
        del parent_dir, file_set
def urlize(path):
    if path.startswith("/"):
        return "file://%s" % path
    return path
Beispiel #54
0
def is_prefix(pre_path, path):
    """Return True if pre_path is a path-prefix of path."""
    pre_path = pre_path.strip('.')
    path = path.strip('.')
    return not pre_path or path.startswith(pre_path + '.')
Beispiel #55
0
def is_subdir(path, directory):
    path = os.path.abspath(os.path.realpath(path)) + os.sep
    directory = os.path.abspath(os.path.realpath(directory)) + os.sep

    return path.startswith(directory)
Beispiel #56
0
    def _process_and_upload_environment(self, container_name, env, moved_files,
                                        tht_root):
        """Process the environment and upload to Swift

        The environment at this point should be the result of the merged
        custom user environments. We need to look at the paths in the
        environment and update any that changed when they were uploaded to
        swift.
        """

        file_prefix = "file://"

        if env.get('resource_registry'):
            for name, path in env['resource_registry'].items():
                if not isinstance(path, six.string_types):
                    continue
                if path in moved_files:
                    new_path = moved_files[path]
                    env['resource_registry'][name] = new_path
                elif path.startswith(file_prefix):
                    path = path[len(file_prefix):]
                    if path.startswith(tht_root):
                        path = path[len(tht_root):]
                    # We want to make sure all the paths are relative.
                    if path.startswith("/"):
                        path = path[1:]
                    env['resource_registry'][name] = path

        # Parameters are removed from the environment
        params = env.pop('parameter_defaults', None)

        contents = yaml.safe_dump(env, default_flow_style=False)

        # Until we have a well defined plan update workflow in tripleo-common
        # we need to manually add an environment in swift and for users
        # custom environments passed to the deploy command.
        # See bug: https://bugs.launchpad.net/tripleo/+bug/1623431
        # Update plan env.
        swift_path = "user-environment.yaml"
        self.object_client.put_object(container_name, swift_path, contents)

        env = yaml.safe_load(
            self.object_client.get_object(container_name,
                                          constants.PLAN_ENVIRONMENT)[1])

        user_env = {'path': swift_path}
        if user_env not in env['environments']:
            env['environments'].append(user_env)
            yaml_string = yaml.safe_dump(env, default_flow_style=False)
            self.object_client.put_object(container_name,
                                          constants.PLAN_ENVIRONMENT,
                                          yaml_string)

        # Parameters are sent to the update parameters action, this stores them
        # in the plan environment and means the UI can find them.
        if params:
            with utils.TempDirs() as tmp:
                utils.run_ansible_playbook(
                    playbook='cli-update-params.yaml',
                    inventory='localhost,',
                    workdir=tmp,
                    playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
                    verbosity=utils.playbook_verbosity(self=self),
                    extra_vars={"container": container_name},
                    extra_vars_file={"parameters": params})
Beispiel #57
0
def _prefer_reldirs(base_dir, dirs):
    return [
        os.path.relpath(path) if path.startswith(base_dir) else path
        for path in dirs
    ]
Beispiel #58
0
 def normalizePath(self, path):
     if path.startswith('/'):
         path = path[1:]
     if path.endswith('/'):
         path = path[:-1]
     return path
Beispiel #59
0
 def _validatePath(self, root, path):
     return path.startswith(root)
Beispiel #60
0
    def run(self):

        # For code or literal include blocks we run the normal include
        if 'literal' in self.options or 'code' in self.options:
            return super(FormatedInclude, self).run()
        """Include a file as part of the content of this reST file."""
        if not self.state.document.settings.file_insertion_enabled:
            raise self.warning('"%s" directive disabled.' % self.name)
        source = self.state_machine.input_lines.source(
            self.lineno - self.state_machine.input_offset - 1)

        source_dir = os.path.dirname(os.path.abspath(source))

        rel_filename, filename = self.env.relfn2path(self.arguments[0])
        self.arguments[0] = filename
        self.env.note_included(filename)
        path = directives.path(self.arguments[0])

        if path.startswith('<') and path.endswith('>'):
            path = os.path.join(self.standard_include_path, path[1:-1])
        path = os.path.normpath(os.path.join(source_dir, path))

        path = utils.relative_path(None, path)
        path = nodes.reprunicode(path)

        encoding = self.options.get(
            'encoding', self.state.document.settings.input_encoding)
        e_handler = self.state.document.settings.input_encoding_error_handler
        tab_width = self.options.get('tab-width',
                                     self.state.document.settings.tab_width)
        try:
            self.state.document.settings.record_dependencies.add(path)
            include_file = io.FileInput(source_path=path,
                                        encoding=encoding,
                                        error_handler=e_handler)
        except UnicodeEncodeError:
            raise self.severe(u'Problems with "%s" directive path:\n'
                              'Cannot encode input file path "%s" '
                              '(wrong locale?).' %
                              (self.name, SafeString(path)))
        except IOError as error:
            raise self.severe(u'Problems with "%s" directive path:\n%s.' %
                              (self.name, ErrorString(error)))
        startline = self.options.get('start-line', None)
        endline = self.options.get('end-line', None)
        try:
            if startline or (endline is not None):
                lines = include_file.readlines()
                rawtext = ''.join(lines[startline:endline])
            else:
                rawtext = include_file.read()
        except UnicodeError as error:
            raise self.severe(u'Problem with "%s" directive:\n%s' %
                              (self.name, ErrorString(error)))

        # Format input
        sub = StringSubstituter()
        config = self.state.document.settings.env.config
        sub.init_sub_strings(config)
        rawtext = sub.substitute(rawtext)

        # start-after/end-before: no restrictions on newlines in match-text,
        # and no restrictions on matching inside lines vs. line boundaries
        after_text = self.options.get('start-after', None)
        if after_text:
            # skip content in rawtext before *and incl.* a matching text
            after_index = rawtext.find(after_text)
            if after_index < 0:
                raise self.severe('Problem with "start-after" option of "%s" '
                                  'directive:\nText not found.' % self.name)
            rawtext = rawtext[after_index + len(after_text):]
        before_text = self.options.get('end-before', None)
        if before_text:
            # skip content in rawtext after *and incl.* a matching text
            before_index = rawtext.find(before_text)
            if before_index < 0:
                raise self.severe('Problem with "end-before" option of "%s" '
                                  'directive:\nText not found.' % self.name)
            rawtext = rawtext[:before_index]

        include_lines = statemachine.string2lines(rawtext,
                                                  tab_width,
                                                  convert_whitespace=True)

        self.state_machine.insert_input(include_lines, path)
        return []