コード例 #1
0
ファイル: asset2json.py プロジェクト: Chiur/turbulenz_tools
    def attach_surface(self, primitives, primitive_type, shape=DEFAULT_SHAPE_NAME, name=None):
        """Attach a surface to the JSON representation. Primitive type should be:
                SurfaceLines = 0
                SurfaceTriangles = 1
                SurfaceQuads = 2
        The primitives will be added to the specified `shape`.

        If a `name` is also specified then the primitives will be put into a named surfaces dictionary."""
        # Collapse the primitives down into a flat index list
        num_primitives = len(primitives)
        indices = [ ]
        for p in primitives:
            indices.extend(p)
        if 0 == len(indices):
            LOG.error('No indices for %s on %s', name, shape)
        if isinstance(indices[0], (tuple, list)):
            indices = list(itertools_chain(*indices))

        self.__set_shape(shape, 'numPrimitives', num_primitives, name)
        if primitive_type == JsonAsset.SurfaceLines:
            self.__set_shape(shape, 'lines', indices, name)
        elif primitive_type == JsonAsset.SurfaceTriangles:
            self.__set_shape(shape, 'triangles', indices, name)
        elif primitive_type == JsonAsset.SurfaceQuads:
            self.__set_shape(shape, 'quads', indices, name)
        else:
            LOG.error('Unsupported primitive type:%i', primitive_type)
コード例 #2
0
    def attach_surface(self,
                       primitives,
                       primitive_type,
                       shape=DEFAULT_SHAPE_NAME,
                       name=None):
        """Attach a surface to the JSON representation. Primitive type should be:
                SurfaceLines = 0
                SurfaceTriangles = 1
                SurfaceQuads = 2
        The primitives will be added to the specified `shape`.

        If a `name` is also specified then the primitives will be put into a named surfaces dictionary."""
        # Collapse the primitives down into a flat index list
        num_primitives = len(primitives)
        indices = []
        for p in primitives:
            indices.extend(p)
        if 0 == len(indices):
            LOG.error('No indices for %s on %s', name, shape)
        if isinstance(indices[0], (tuple, list)):
            indices = list(itertools_chain(*indices))

        self.__set_shape(shape, 'numPrimitives', num_primitives, name)
        if primitive_type == JsonAsset.SurfaceLines:
            self.__set_shape(shape, 'lines', indices, name)
        elif primitive_type == JsonAsset.SurfaceTriangles:
            self.__set_shape(shape, 'triangles', indices, name)
        elif primitive_type == JsonAsset.SurfaceQuads:
            self.__set_shape(shape, 'quads', indices, name)
        else:
            LOG.error('Unsupported primitive type:%i', primitive_type)
コード例 #3
0
def formatDict(d: Mapping, indent=4, level=0, limit=None):
    """ Return string representation of mapping in a following format:
        {
            <name1>: str(<value1>)
            <name2>: self  # ◄ if <value2> is a self reference
            <name3>: {
                <nestedName1>: str(<nestedValue1>)
                <nestedName2>: str(<nestedValue2>)
                ...
            }
            ...
        }
    """
    def addIndent(s: str, lvl=1):
        return indent * lvl + s

    def iteritems(dct, trimmed=False):
        for name, value in dct.items():
            if value is dct: value = '<self>'
            elif isinstance(value, dict):
                value = formatDict(value, level=level + 1)
            yield f"{addIndent('', level+1)}{name}: {str(value)}"
        if trimmed: yield addIndent('...')

    if not d: return '{}'
    indent = ' ' * indent
    shortd = trimDict(d, limit) if limit else d
    return linesep.join(
        itertools_chain('{', iteritems(shortd, trimmed=len(d) != len(shortd)),
                        (addIndent('}', level), )))
コード例 #4
0
def _split_indexes(key_path):
    if "[" not in key_path:
        return [key_path]

    # key_path = "foo[0][0]" --> key = "foo", indexes=(0, 0)
    # key_path = "[0][0]"    --> key = "",    indexes=(0, 0)
    parts = key_path.split("[")
    key = parts[0]
    indexes = (int(k[:-1]) for k in parts[1:])
    return itertools_chain([key], indexes)
コード例 #5
0
ファイル: __init__.py プロジェクト: Paebbels/PoC
		def __delitem__(self, key):
			try:
				k = 'get' + (key or None)
			except TypeError:
				raise KeyError(key)
			del self._data[key]
			for inst in itertools_chain((self._parser,), self._parser.values()):
				try:
					delattr(inst, k)
				except AttributeError:
					# don't raise since the entry was present in _data, silently
					# clean up
					continue
コード例 #6
0
 def __delitem__(self, key):
     try:
         k = 'get' + (key or None)
     except TypeError:
         raise KeyError(key)
     del self._data[key]
     for inst in itertools_chain((self._parser, ),
                                 self._parser.values()):
         try:
             delattr(inst, k)
         except AttributeError:
             # don't raise since the entry was present in _data, silently
             # clean up
             continue
コード例 #7
0
ファイル: digits.py プロジェクト: sr-murthy/inttools
def chain(*seqs):
    """
    Generates the sequence obtained by chaining (or concatenating) a
    sequence of sequences, e.g.
    ::
        (1, 2), (3, 4, 5), (6, 7, 8, 9) -> 1, 2, 3, 4, 5, 6, 7, 8, 9

    The arguments can be separate sequences (generators, lists, tuples) or
    an unpacked iterable of such sequences (use * to unpack an iterable
    argument), e.g.
    ::
        digits(12), digits(345), digits(6789)  -> 1, 2, 3, 4, 5, 6, 7, 8, 9
        digits(12), [3, 4, 5], (6, 7, 8, 9)    -> 1, 2, 3, 4, 5, 6, 7, 8, 9
        *[digits(12), [3, 4, 5], (6, 7, 8, 9)] -> 1, 2, 3, 4, 5, 6, 7, 8, 9
    """
    for c in itertools_chain(c for seq in seqs for c in seq):
        yield c
コード例 #8
0
ファイル: rpCache.py プロジェクト: brsynth/rpCache
 def gen_cache(self, attr_names, args):
     try:
         results = []
         print("Generating " + " ".join(attr_names) + "...",
               end='',
               flush=True)
         # Choose method according to attribute name
         method = getattr(self, '_m_' + attr_names[0])
         # Apply method and expand 'args' list as arguments
         # Put results in a list
         results = [method(*args)]
         if type(results[0]) is tuple:
             results = list(itertools_chain(results[0]))
         print_OK()
         return results
     except:
         print_FAILED()
         raise
コード例 #9
0
def collect(infolder,
            line  = comment_LINE,
            block = comment_BLOCK,
            tags  = WORDS,
            marks = MARKS,
            include=INCLUDE,
            exclude=EXCLUDE,
            overwrite=False):
    # Process block comment marks
    blocks_open, blocks_close = comment_block_comments(block)

    # TODO: Make hidden files OS independent, probably using
    #       https://docs.python.org/3.4/library/tempfile.html ?

    # FIXME: for some reason, if a comment-type ever existed in the TODO
    #        file, but after a while its posts are all gone, the keyword
    #        still remains there, according to the current TODO file,
    #        which still have the "QUESTIONS" keyword, and comment

    # TODO: Add explicit-remove/browsing capabilities of the .*_cache files
    #       (for example: if git reverted changes --> remove hash from cache file)
    #       The best solution would be a complete CLI tool, to read and manage
    #       and use the cutils command line tools

    # Compile regular expression patterns
    pattern1 = re_compile(_COMMENT.format(r'|'.join(map(comment_escape, line)),
                                          blocks_open,
                                          r'|'.join(map(comment_escape, tags)),
                                          r'|'.join(map(comment_escape, marks)),
                                          blocks_close),
                         flags=re_IGNORECASE | re_DOTALL | re_MULTILINE | re_VERBOSE)
    pattern2 = re_compile(r'\n')

    # Get previously generated collection of all posts
    COLLECTED = os_path_join(infolder, '.ccom_todo')
    try:
        with open(COLLECTED, 'rb') as file:
            collected = pickle_load(file)
    except (FileNotFoundError, EOFError):
        collected = table_Table(row=OrderedDict)

    # Clear cache -- remove all non-existing files
    for filepath in collected.rows():
        if not os_path_isfile(filepath):
            del collected[filepath]

    # Exception containers
    except_dirs  = []  # relative path to dir from root
    except_files = []  # relative path to file from root
    except_names = []  # filename (with extension) anywhere
    except_exts  = []  # extension anywhere

    # If 'exclude' is dictionary like object
    try:
        _empty = ()
        # Exceptions relative to root
        for key, container in zip(('folders', 'files'),
                                  (except_dirs, except_files)):
            container.extend(os_path_join(infolder, p) for p in exclude.get(key, _empty))
        # Exceptions anywhere
        for key, container in zip(('names', 'extensions'),
                                  (except_names, except_exts)):
            container.extend(exclude.get(key, _empty))
    # If 'exclude' is an iterable object
    except AttributeError:
        except_names = exclude

    # Include containers
    permit_names = []  # filename (with extension) anywhere
    permit_exts  = []  # extension anywhere

    # If 'include' is dictionary like object
    try:
        _empty = ()
        # Includes anywhere
        for key, container in zip(('names', 'extensions'),
                                  (permit_names, permit_exts)):
            container.extend(include.get(key, _empty))
    # If 'include' is an iterable object
    except AttributeError:
        permit_names = include

    # Scan through all files and folders
    with check_Checker(infolder, file='.ccom_cache') as checker:
        for root, dirs, filenames in os_walk(infolder):
            # If skip this folder and all subfolders
            if root in except_dirs:
                dirs.clear()
                continue
            # Check all files in folder
            for filename in filenames:
                filepath = os_path_join(root, filename)[2:]
                # If skip this exact file
                if filepath in except_files:
                    continue
                name, extension = os_path_splitext(filename)
                # If file or extension is not banned and it is on the
                # white-list and it changed since last time checked and
                # this is not and overwrite-call
                if (filename not in except_names and
                    extension not in except_exts and
                    (extension in permit_exts or filename in permit_names) and
                    checker.ischanged(filepath) and
                    not overwrite):
                    with open(filepath, encoding='utf-8') as file:
                        _search(collected, pattern1, pattern2,
                                file.read(), filepath, marks)

    # Save collection of all posts
    with open(COLLECTED, 'wb') as file:
        pickle_dump(collected, file, pickle_HIGHEST_PROTOCOL)

    # Open the todo file and write out the results
    with open('TODO', 'w', encoding='utf-8') as todo:
        # Make it compatible with cver.py
        todo.write('## INFO ##\n'*2)
        # Format TODO file as yaml
        for key in itertools_chain(tags, marks.values()):
            KEY = key.upper()
            try:
                types = collected[KEY].items()
                len_pos = todo.tell()
                # Offset for separator comment and
                # leading and trailing new lines
                todo.write(' '*82)
                todo.write('{}:\n'.format(KEY))
                index = 1
                for filename, posts in types:
                    for i, (linenumber, content) in enumerate(posts, start=index):
                        todo.write(_ITEM.format(msg='\n'.join(content),
                                                index=i,
                                                short=_SHORT,
                                                long=_SHORT*2,
                                                sep='- '*38,
                                                file=filename,
                                                line=linenumber))
                    index = i + 1
                todo.write('\n')
                # Move back to tag separator comment
                todo.seek(len_pos)
                todo.write('\n#{:-^78}#\n'.format(
                    ' {} POSTS IN {} FILES '.format(index - 1, len(types))))
                # Move back to the end
                todo.seek(0, 2)
            except KeyError:
                continue
        print('CCOM: placed {!r}'.format(os_path_join(infolder, 'TODO')))
コード例 #10
0
def _split_keys(key_path):
    if "." not in key_path:
        return [key_path]

    return itertools_chain(*(_split_indexes(k) for k in key_path.split(".")))