Example #1
0
 def writeparsetree(self, tree):
     out = StringIO.StringIO()
     parser.show(out, tree)
     self.root.append(ET.Comment(out.getvalue().replace("--", " - - ")))
                to_subelement(d_el, drum, 'voice')
                to_subelement(d_el, drum, 'drum', 'name')
                to_subelement(d_el, drum, 'stem')
                to_subelement(d_el, drum, 'shortcut')

        if instrument['id'] in channels:
            for channel in channels[instrument['id']].values():
                ch_el = ET.SubElement(el, 'Channel')
                to_attribute(ch_el, channel, 'channel', 'name')
                bank = channel['Bank']
                prog = channel['Prog']
                msb = channel['MSB']
                lsb = channel['LSB']
                sound = channel["MS General sound"]
                assert int(bank) == (int(msb) * 128) + int(lsb)
                ch_el.append(ET.Comment("MIDI: Bank {0}, Prog {1}; MS General: {2}".format(bank, prog, sound)))
                if msb != "0":
                    con_el = ET.SubElement(ch_el, 'controller')
                    con_el.set('ctrl', "0")
                    con_el.set('value', msb)
                    ch_el.append(ET.Comment("Bank MSB"))
                if lsb != "0":
                    con_el = ET.SubElement(ch_el, 'controller')
                    con_el.set('ctrl', "32")
                    con_el.set('value', lsb)
                    ch_el.append(ET.Comment("Bank LSB"))
                ET.SubElement(ch_el, 'program').set('value', channel['Prog'])
                if is_drumset:
                    try: to_comment(ch_el, gs_drumkits[channel['Prog']], 'name')
                    except KeyError: ch_el.append(ET.Comment("Non-GS drum kit"))
                else:
Example #3
0
def main(src, output_filename=None, id=None, diskId=None):
    add_wix_to_path()

    while src[-1] in ('/', '\\'):
        src = src[:-1]
    name = os.path.basename(src)
    id = id or name.replace('-', '_').replace(' ', '_')
    output_filename = output_filename or _adjacent_file(id + ".wxi")

    import subprocess

    def check_call(args):
        print " ".join(args)
        subprocess.check_call(args)

    #subprocess.check_call('set path'.split(), shell=True)
    #subprocess.check_call('where heat'.split(), shell=True)

    check_call([
        'heat', 'dir',
        _adjacent_file(src), '-template', 'fragment', '-sreg', '-scom', '-o',
        output_filename, '-ag', '-cg', id, '-srd', '-var', 'var.' + id, '-dr',
        id, '-nologo'
    ])

    ElementTree.register_namespace("",
                                   "http://schemas.microsoft.com/wix/2006/wi")
    tree = ElementTree.parse(output_filename,
                             parser=CommentedTreeBuilder()).getroot()
    tree.insert(
        0, ElementTree.Comment('generated with gen_dir_wxi.py %s\n' % src))
    tree.insert(
        0,
        ElementTree.ProcessingInstruction(
            'define', '%s=%s' % (id, os.path.normpath(src))))
    parent_map = dict((c, p) for p in tree.getiterator() for c in p)
    for file in tree.findall(
            ".//{http://schemas.microsoft.com/wix/2006/wi}Component/{http://schemas.microsoft.com/wix/2006/wi}File"
    ):
        file_Source = file.get('Source', '')
        if file_Source.find('.svn') != -1 or os.path.basename(file_Source) in (
                'Thumbs.db', 'desktop.ini',
                '.DS_Store') or file_Source.endswith('.pyc'):
            comp = parent_map[file]
            parent_map[comp].remove(comp)
    for dir in tree.findall(
            ".//{http://schemas.microsoft.com/wix/2006/wi}Directory"):
        if dir.get('Name', '') == '.svn':
            for dirref in tree.findall(
                    ".//{http://schemas.microsoft.com/wix/2006/wi}DirectoryRef"
            ):
                if dirref.get('Id', '') == dir.get('Id', ''):
                    frag = parent_map[dirref]
                    parent_map[frag].remove(frag)
            parent_map[dir].remove(dir)
    if diskId:
        for component in tree.findall(
                ".//{http://schemas.microsoft.com/wix/2006/wi}Component"):
            component.attrib['DiskId'] = diskId

    ElementTree.ElementTree(tree).write(output_filename,
                                        xml_declaration=True,
                                        encoding='utf-8')
Example #4
0
			'stars',
			'sun',
			'thermometer-3',
			'wind',
			'umberla',
			'sun-nwave',
			'tree-3' }

symbol_count = 0
mypath = '/etc/openhab2/html/matrix-theme/original-svgs'

onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]

ET.register_namespace("","http://www.w3.org/2000/svg")
top = ET.Element('svg', attrib = { 'version':'1.1', 'xmlns:xlink':'http://www.w3.org/1999/xlink', 'x':"0px", 'y':"0px", 'viewBox':"0 0 48 48", 'enable-background':"new 0 0 48 48", 'xml:space':"preserve"})
comment = ET.Comment('Generated by SVG-Combiner')
top.append(comment)

for file in onlyfiles:
	if(file[:1] != '.'):
		print "Processing file: " + file

		r = requests.get('http://127.0.0.1:8080/static/matrix-theme/original-svgs/' + file)
		print r.status_code
		if(len(r.text)>0):
			xml = ET.fromstring(r.text)
			for child in xml:
				if(len(child.getchildren())>0 and 'id' in child.attrib):
					if(child.attrib['id'] in symbols):
						for node in child.findall('.//*[@fill]'):
							if ('stroke' in node.attrib): node.attrib.pop('stroke')
Example #5
0
 def comment(self,s):
     self.doc.insert(len(list(self.doc)), ET.Comment(s))
     if self.logger:
         self.logger(s)
Example #6
0
 def add_comment(element, comment):
     element.append(ElementTree.Comment(text=comment))
Example #7
0
def main():
    """First, build fire. Second, start coffee."""

    ap = argparse.ArgumentParser(
        description=
        ('Concentrically arranges randomly sized arcs into a pretty disc shape. Output is '
         'generated as a set of vector shapes in Scalable Vector Graphics (SVG) format and printed '
         'on the standard output stream.'),
        epilog=
        'Report bugs, request features, or provide suggestions via https://github.com/the-real-tokai/macuahuitl/issues',
        add_help=False,
    )

    g = ap.add_argument_group('Startup')
    g.add_argument(
        '-V',
        '--version',
        action='version',
        help="show version number and exit",
        version='%(prog)s {}'.format(__version__),
    )
    g.add_argument('-h',
                   '--help',
                   action='help',
                   help='show this help message and exit')

    g = ap.add_argument_group('Algorithm')
    g.add_argument(
        '--circles',
        metavar='INT',
        type=int,
        help=
        'number of concentric arc elements to generate inside the disc  [:21]',
        default=21)
    g.add_argument('--stroke-width',
                   metavar='FLOAT',
                   type=float,
                   help='width of the generated strokes  [:6]',
                   default=6.0)
    g.add_argument('--gap',
                   metavar='FLOAT',
                   type=float,
                   help='distance between the generated strokes')
    g.add_argument('--inner-radius',
                   metavar='FLOAT',
                   type=float,
                   help='setup inner disc radius to create an annular shape')
    g.add_argument('--hoffset',
                   metavar='FLOAT',
                   type=float,
                   help='shift the whole disc horizontally  [:0.0]',
                   default=0.0)
    g.add_argument('--voffset',
                   metavar='FLOAT',
                   type=float,
                   help='shift the whole disc vertically  [:0.0]',
                   default=0.0)
    g.add_argument(
        '--color',
        metavar='COLOR',
        type=str,
        help='SVG compliant color specification or identifier  [:black]',
        default='black')
    g.add_argument(
        '--random-seed',
        metavar='INT',
        type=int,
        help=
        'fixed initialization of the random number generator for predictable results'
    )
    g.add_argument(
        '--randomize',
        action='store_true',
        help=
        'generate truly random disc layouts; other algorithm values provided via command line parameters are utilized as limits'
    )

    g = ap.add_argument_group('Miscellaneous')
    g.add_argument(
        '--separate-paths',
        action='store_true',
        help=
        'generate separate <path> elements for each arc; automatically implied when animation support is enabled'
    )
    g.add_argument('--outline-mode',
                   help='generate bounding outline circles  [:both]',
                   choices=['both', 'outside', 'inside', 'none'],
                   default='both')
    g.add_argument(
        '--background-color',
        metavar='COLOR',
        type=str,
        help=
        'SVG compliant color specification or identifier; adds a background <rect> to the SVG output'
    )
    g.add_argument(
        '--disc-color',
        metavar='COLOR',
        type=str,
        help=
        'SVG compliant color specification or identifier; fills the background of the generated disc by adding an extra <circle> element'
    )
    g.add_argument(
        '--animation-mode',
        help='enables SVG <animateTransform> support',
        choices=['random', 'bidirectional', 'cascade-in', 'cascade-out'])
    g.add_argument(
        '--animation-duration',
        metavar='FLOAT',
        type=float,
        help=
        'defines base duration of one full 360° arc rotation (in seconds); negative inputs switch to counter-clockwise base direction  [:6.0]',
        default=6.0)
    g.add_argument(
        '--animation-offset',
        metavar='FLOAT',
        type=float,
        help=
        'offset the animation (in seconds) to support rendering to frame sequences for frame based animation formats.  [:0]',
        default=0.0)

    g = ap.add_argument_group('Output')
    g.add_argument(
        '-o',
        '--output',
        metavar='FILENAME',
        type=str,
        help=
        'optionally rasterize the generated vector paths and write the result into a PNG file (requires the `svgcairo\' Python module)'
    )
    g.add_argument(
        '--output-size',
        metavar='INT',
        type=int,
        help=
        'force pixel width and height of the raster image; if omitted the generated SVG viewbox dimensions are used'
    )

    user_input = ap.parse_args()

    #  Initialize…
    #
    chaos = random.Random(user_input.random_seed)
    circles = user_input.circles
    stroke = abs(user_input.stroke_width) if user_input.stroke_width else 1.0
    gap = user_input.gap if (user_input.gap is not None) else stroke
    radius = abs(user_input.inner_radius) if (user_input.inner_radius
                                              is not None) else stroke
    x = user_input.hoffset
    y = user_input.voffset
    color = user_input.color

    if user_input.randomize:
        circles = chaos.randrange(0, circles) if circles else 0
        stroke = chaos.uniform(0, stroke)
        stroke = 1.0 if stroke == 0 else stroke
        gap = chaos.uniform(0, gap)
        radius = chaos.uniform(0, radius)
        x = chaos.uniform(-x, x) if x else 0.0
        y = chaos.uniform(-y, y) if y else 0.0
        color = '#{:02x}{:02x}{:02x}'.format(chaos.randrange(0, 255),
                                             chaos.randrange(0, 255),
                                             chaos.randrange(0, 255))
        # TODO: randomize background and disc color too when the respective parameters are used
        #       (needs to respect color harmonies)

    if radius < stroke:
        radius = stroke

    #  Generate data…
    #
    outlines = []
    arcs = []

    if user_input.outline_mode in ('both', 'inside'):
        outlines.append({'x': x, 'y': y, 'r': radius})
        radius += (gap + stroke)

    for _ in range(circles):
        # Calculate angular space requirement for the "round" stroke caps to avoid some overlapping
        sqrd2 = 2.0 * math.pow(radius, 2.0)
        theta = ((2.0 * math.acos((sqrd2 - math.pow(
            (stroke / 2.0), 2.0)) / sqrd2)) * (180.0 / math.pi))

        arcs.append(
            SVGArcPathSegment(offset=chaos.uniform(0, 359.0),
                              angle=chaos.uniform(0, 359.0 - theta),
                              radius=radius,
                              x=x,
                              y=y))
        radius += (gap + stroke)

    if user_input.outline_mode in ('both', 'outside'):
        outlines.append({'x': x, 'y': y, 'r': radius})
    else:
        radius -= (gap + stroke)

    #  Generate SVG/XML…
    #
    def _f(v, max_digits=9):
        if isinstance(v, float):
            v = round(v, max_digits)
        return v if isinstance(v, str) else str(v)

    vb_dim = (radius + (stroke * 0.5)) * (
        256.0 / (256.0 - 37.35)
    )  # 37px border for 256x256; a golden ratio in there… somewhere…
    vb_off = _f(vb_dim * -1.0, 2)
    vb_dim = _f(vb_dim * 2.0, 2)
    config = {'stroke': color, 'stroke-width': _f(stroke), 'fill': 'none'}

    svg = xtree.Element(
        'svg', {
            'width': '100%',
            'height': '100%',
            'xmlns': 'http://www.w3.org/2000/svg',
            'viewBox': '{o} {o} {s} {s}'.format(o=vb_off, s=vb_dim)
        })

    title = xtree.SubElement(svg, 'title')
    title.text = 'A Comitl Artwork'

    if user_input.background_color:
        xtree.SubElement(
            svg, 'rect', {
                'id': 'background',
                'x': vb_off,
                'y': vb_off,
                'width': vb_dim,
                'height': vb_dim,
                'fill': user_input.background_color
            })

    svg_m = xtree.SubElement(svg, 'g', {'id': 'comitl-disc'})

    if user_input.disc_color:
        xtree.SubElement(
            svg_m, 'circle', {
                'id': 'disc-background',
                'cx': _f(x),
                'cy': _f(y),
                'r': _f(radius),
                'fill': user_input.disc_color
            })

    if arcs:
        if user_input.separate_paths or user_input.animation_mode:
            svg_ga = xtree.SubElement(svg_m, 'g', {'id': 'arcs'})
            for aid, a in enumerate(arcs):

                svg_arc = xtree.SubElement(
                    svg_ga, 'path', {
                        'id': 'arc-{}'.format(aid + 1),
                        'stroke-linecap': 'round',
                        **config
                    })
                shift = 0.0

                if user_input.animation_mode:
                    if user_input.animation_mode == 'cascade-out':
                        d = user_input.animation_duration * (
                            (aid + 1) * 0.25
                        )  # TODO: 1/4 decay value could be configurable
                    elif user_input.animation_mode == 'cascade-in':
                        d = user_input.animation_duration * (
                            (len(arcs) - aid + 1) * 0.25)
                    else:
                        # limits duration range into a 50% variation window to avoid super fast arcs with values closer to 0
                        d = chaos.uniform(
                            abs(user_input.animation_duration) * 0.5,
                            abs(user_input.animation_duration)
                        )  # TODO: variation could be configurable
                        if user_input.animation_duration < 0:
                            d *= -1  # restore user direction
                        if (user_input.animation_mode
                                == 'bidirectional') and (chaos.random() < 0.5):
                            d *= -1  # switch direction randomly

                    shift = (360.0 / d) * user_input.animation_offset

                    xtree.SubElement(
                        svg_arc, 'animateTransform', {
                            'attributeName': 'transform',
                            'type': 'rotate',
                            'from': '{} {} {}'.format(360 if d < 0 else 0, x,
                                                      y),
                            'to': '{} {} {}'.format(0 if d < 0 else 360, x, y),
                            'dur': '{}s'.format(abs(d)),
                            'repeatCount': 'indefinite'
                        })

                a.offset += shift
                svg_arc.set('d', str(a))
        else:
            xtree.SubElement(
                svg_m, 'path', {
                    'id': 'arcs',
                    'd': ''.join(map(str, arcs)),
                    'stroke-linecap': 'round',
                    **config
                })

    if outlines:
        svg_go = xtree.SubElement(svg_m, 'g', {'id': 'outlines'})
        for oid, o in enumerate(outlines):
            xtree.SubElement(
                svg_go, 'circle', {
                    'id': 'outline-{}'.format(oid + 1),
                    'cx': _f(o['x']),
                    'cy': _f(o['y']),
                    'r': _f(o['r']),
                    **config
                })

    svg.append(
        xtree.Comment(
            ' Generator: comitl.py {} (https://github.com/the-real-tokai/macuahuitl) '
            .format(__version__)))

    rawxml = xtree.tostring(svg, encoding='unicode')

    #  Send happy little arcs out into the world…
    #
    if not user_input.output:
        print(rawxml)
    else:
        try:
            from cairosvg import svg2png
            svg2png(bytestring=rawxml,
                    write_to=os.path.realpath(
                        os.path.expanduser(user_input.output)),
                    output_width=user_input.output_size,
                    output_height=user_input.output_size)
        except ImportError as e:
            print(
                'Couldn\'t rasterize nor write a PNG file. Required Python module \'cairosvg\' is not available: {}'
                .format(str(e)),
                file=sys.stderr)
Example #8
0
def write(filename, mesh, binary=True, compression="zlib", header_type=None):
    # Writing XML with an etree required first transforming the (potentially large)
    # arrays into string, which are much larger in memory still. This makes this writer
    # very memory hungry. See <https://stackoverflow.com/q/59272477/353337>.
    from .._cxml import etree as ET

    # Check if the mesh contains polyhedral cells, this will require special treatment
    # in certain places.
    is_polyhedron_grid = False
    for c in mesh.cells:
        if c.type[:10] == "polyhedron":
            is_polyhedron_grid = True
            break
    # The current implementation cannot mix polyhedral cells with other cell types.
    # To write such meshes, represent all cells as polyhedra.
    if is_polyhedron_grid:
        for c in mesh.cells:
            if c.type[:10] != "polyhedron":
                raise ValueError(
                    "VTU export cannot mix polyhedral cells with other cell types"
                )

    if not binary:
        logging.warning("VTU ASCII files are only meant for debugging.")

    if mesh.points.shape[1] == 2:
        logging.warning("VTU requires 3D points, but 2D points given. "
                        "Appending 0 third component.")
        mesh.points = np.column_stack([
            mesh.points[:, 0], mesh.points[:, 1],
            np.zeros(mesh.points.shape[0])
        ])

    vtk_file = ET.Element(
        "VTKFile",
        type="UnstructuredGrid",
        version="0.1",
        # Use the native endianness. Not strictly necessary, but this simplifies things
        # a bit.
        byte_order=("LittleEndian"
                    if sys.byteorder == "little" else "BigEndian"),
    )
    header_type = ("UInt32" if header_type is None else vtk_file.set(
        "header_type", header_type))
    assert header_type is not None

    if binary and compression:
        # TODO lz4, lzma <https://vtk.org/doc/nightly/html/classvtkDataCompressor.html>
        compressions = {
            "lzma": "vtkLZMADataCompressor",
            "zlib": "vtkZLibDataCompressor",
        }
        assert compression in compressions
        vtk_file.set("compressor", compressions[compression])

    # swap the data to match the system byteorder
    # Don't use byteswap to make sure that the dtype is changed; see
    # <https://github.com/numpy/numpy/issues/10372>.
    points = mesh.points.astype(mesh.points.dtype.newbyteorder("="),
                                copy=False)
    for k, (cell_type, data) in enumerate(mesh.cells):
        # Treatment of polyhedra is different from other types
        if is_polyhedron_grid:
            new_cell_info = []
            for cell_info in data:
                new_face_info = []
                for face_info in cell_info:
                    face_info = np.asarray(face_info)
                    new_face_info.append(
                        face_info.astype(face_info.dtype.newbyteorder("="),
                                         copy=False))
                new_cell_info.append(new_face_info)
            mesh.cells[k] = CellBlock(cell_type, new_cell_info)
        else:
            mesh.cells[k] = CellBlock(
                cell_type, data.astype(data.dtype.newbyteorder("="),
                                       copy=False))
    for key, data in mesh.point_data.items():
        mesh.point_data[key] = data.astype(data.dtype.newbyteorder("="),
                                           copy=False)

    for data in mesh.cell_data.values():
        for k, dat in enumerate(data):
            data[k] = dat.astype(dat.dtype.newbyteorder("="), copy=False)
    for key, data in mesh.field_data.items():
        mesh.field_data[key] = data.astype(data.dtype.newbyteorder("="),
                                           copy=False)

    def numpy_to_xml_array(parent, name, data):
        vtu_type = numpy_to_vtu_type[data.dtype]
        fmt = "{:.11e}" if vtu_type.startswith("Float") else "{:d}"
        da = ET.SubElement(parent, "DataArray", type=vtu_type, Name=name)
        if len(data.shape) == 2:
            da.set("NumberOfComponents", "{}".format(data.shape[1]))
        if binary:
            da.set("format", "binary")
            if compression:
                # compressed write
                def text_writer(f):
                    max_block_size = 32768
                    data_bytes = data.tobytes()

                    # round up
                    num_blocks = -int(-len(data_bytes) // max_block_size)
                    last_block_size = (len(data_bytes) -
                                       (num_blocks - 1) * max_block_size)

                    # It's too bad that we have to keep all blocks in memory. This is
                    # necessary because the header, written first, needs to know the
                    # lengths of all blocks. Also, the blocks are encoded _after_ having
                    # been concatenated.
                    c = {"lzma": lzma, "zlib": zlib}[compression]
                    compressed_blocks = [
                        # This compress is the slowest part of the writer
                        c.compress(block)
                        for block in _chunk_it(data_bytes, max_block_size)
                    ]

                    # collect header
                    header = np.array(
                        [num_blocks, max_block_size, last_block_size] +
                        [len(b) for b in compressed_blocks],
                        dtype=vtu_to_numpy_type[header_type],
                    )
                    f.write(base64.b64encode(header.tobytes()).decode())
                    f.write(
                        base64.b64encode(b"".join(compressed_blocks)).decode())

            else:
                # uncompressed write
                def text_writer(f):
                    data_bytes = data.tobytes()
                    # collect header
                    header = np.array(len(data_bytes),
                                      dtype=vtu_to_numpy_type[header_type])
                    f.write(
                        base64.b64encode(header.tobytes() +
                                         data_bytes).decode())

        else:
            da.set("format", "ascii")

            def text_writer(f):
                # This write() loop is the bottleneck for the write. Alternatives:
                # savetxt is super slow:
                #   np.savetxt(f, data.reshape(-1), fmt=fmt)
                # joining and writing is a bit faster, but consumes huge amounts of
                # memory:
                #   f.write("\n".join(map(fmt.format, data.reshape(-1))))
                for item in data.reshape(-1):
                    f.write((fmt + "\n").format(item))

        da.text_writer = text_writer

    def _polyhedron_face_cells(face_cells):
        # Define the faces of each cell on the format specfied for VTU Polyhedron cells.
        # These are defined in Mesh.polyhedron_faces, as block data. The block consists
        # of a nested list (outer list represents cell, inner is faces for this cells),
        # where the items of the inner list are the nodes of specific faces.
        #
        # The output format is specified at https://vtk.org/Wiki/VTK/Polyhedron_Support

        # Initialize array for size of data per cell.
        data_size_per_cell = np.zeros(len(face_cells), dtype=int)

        # The data itself is of unknown size, and cannot be initialized
        data = []
        for ci, cell in enumerate(face_cells):
            # Number of faces for this cell
            data.append(len(cell))
            for face in cell:
                # Number of nodes for this face
                data.append(face.size)
                # The nodes themselves
                data += face.tolist()

            data_size_per_cell[ci] = len(data)

        # The returned data corresponds to the faces and faceoffsets fields in the
        # vtu polyhedron data format
        return data, data_size_per_cell.tolist()

    comment = ET.Comment(f"This file was created by meshio v{__version__}")
    vtk_file.insert(1, comment)

    grid = ET.SubElement(vtk_file, "UnstructuredGrid")

    total_num_cells = sum([len(c.data) for c in mesh.cells])
    piece = ET.SubElement(
        grid,
        "Piece",
        NumberOfPoints="{}".format(len(points)),
        NumberOfCells=f"{total_num_cells}",
    )

    # points
    if points is not None:
        pts = ET.SubElement(piece, "Points")
        numpy_to_xml_array(pts, "Points", points)

    if mesh.cells is not None and len(mesh.cells) > 0:
        cls = ET.SubElement(piece, "Cells")

        faces = None
        faceoffsets = None

        if is_polyhedron_grid:
            # The VTK polyhedron format requires both Cell-node connectivity, and a
            # definition of faces. The cell-node relation must be recoved from the
            # cell-face-nodes currently in CellBlocks.
            # NOTE: If polyhedral cells are implemented for more mesh types, this code
            # block may be useful for those as well.
            con = []
            num_nodes_per_cell = []
            for block in mesh.cells:
                for cell in block.data:
                    nodes_this_cell = []
                    for face in cell:
                        nodes_this_cell += face.tolist()
                    unique_nodes = np.unique(nodes_this_cell).tolist()

                    con += unique_nodes
                    num_nodes_per_cell.append(len(unique_nodes))

            connectivity = np.array(con)
            # offsets = np.hstack(([0], np.cumsum(num_nodes_per_cell)[:-1]))
            offsets = np.cumsum(num_nodes_per_cell)

            # Initialize data structures for polyhedral cells
            faces = []
            faceoffsets = []

        else:
            # create connectivity, offset, type arrays
            connectivity = np.concatenate([
                v.data[:,
                       _meshio_to_vtk_order(v.type, v.data.shape[1])].reshape(
                           -1) for v in mesh.cells
            ])

            # offset (points to the first element of the next cell)
            offsets = [
                v.data.shape[1] *
                np.arange(1, v.data.shape[0] + 1, dtype=connectivity.dtype)
                for v in mesh.cells
            ]
            for k in range(1, len(offsets)):
                offsets[k] += offsets[k - 1][-1]
            offsets = np.concatenate(offsets)

        # types
        types_array = []
        for k, v in mesh.cells:
            # For polygon and polyhedron grids, the number of nodes is part of the cell
            # type key. This part must be stripped away.
            special_cells = [
                "polygon",
                "polyhedron",
                "VTK_LAGRANGE_CURVE",
                "VTK_LAGRANGE_TRIANGLE",
                "VTK_LAGRANGE_QUADRILATERAL",
                "VTK_LAGRANGE_TETRAHEDRON",
                "VTK_LAGRANGE_HEXAHEDRON",
                "VTK_LAGRANGE_WEDGE",
                "VTK_LAGRANGE_PYRAMID",
            ]
            key_ = None
            for string in special_cells:
                if k.startswith(string):
                    key_ = string

            if key_ is None:
                # No special treatment
                key_ = k

            # further adaptions for polyhedron
            if k.startswith("polyhedron"):
                # Get face-cell relation on the vtu format. See comments in helper
                # function for more information of how to specify this.
                faces_loc, faceoffsets_loc = _polyhedron_face_cells(v)
                # Adjust offsets to global numbering
                assert faceoffsets is not None
                if len(faceoffsets) > 0:
                    faceoffsets_loc = [
                        fi + faceoffsets[-1] for fi in faceoffsets_loc
                    ]

                assert faces is not None
                faces += faces_loc
                faceoffsets += faceoffsets_loc

            types_array.append(np.full(len(v), meshio_to_vtk_type[key_]))

        types = np.concatenate(
            types_array
            # [np.full(len(v), meshio_to_vtk_type[k]) for k, v in mesh.cells]
        )

        numpy_to_xml_array(cls, "connectivity", connectivity)
        numpy_to_xml_array(cls, "offsets", offsets)
        numpy_to_xml_array(cls, "types", types)

        if is_polyhedron_grid:
            # Also store face-node relation
            numpy_to_xml_array(cls, "faces", np.array(faces, dtype=int))
            numpy_to_xml_array(cls, "faceoffsets",
                               np.array(faceoffsets, dtype=int))

    if mesh.point_data:
        pd = ET.SubElement(piece, "PointData")
        for name, data in mesh.point_data.items():
            numpy_to_xml_array(pd, name, data)

    if mesh.cell_data:
        cd = ET.SubElement(piece, "CellData")
        for name, data in raw_from_cell_data(mesh.cell_data).items():
            numpy_to_xml_array(cd, name, data)

    # write_xml(filename, vtk_file, pretty_xml)
    tree = ET.ElementTree(vtk_file)
    tree.write(filename)
def make_xml(pmx_data, filepath, use_japanese_name, xml_save_versions):

    # const
    # "\u0030\u003a\u0062\u0061\u0073\u0065\u0028\u56fa\u5b9a\u0029\u0031\u003a\u307e\u3086\u0020\u0032\u003a\u76ee\u0020\u0033\u003a\u30ea\u30c3\u30d7\u0020\u0034\u003a\u305d\u306e\u4ed6"
    J_Face_Comment = "\u8868\u60C5\u30B0\u30EB\u30FC\u30D7\u0020\u0030\u003A\u4F7F\u7528\u4E0D\u53EF\u0020\u0031\u003A\u307E\u3086\u0020\u0032\u003A\u76EE\u0020\u0033\u003A\u30EA\u30C3\u30D7\u0020\u0034\u003A\u305D\u306E\u4ED6"

    # filename
    root, ext = os.path.splitext(filepath)
    xml_path = root + ".xml"

    num = 1
    xml_exist_list = []

    a = re.search(r'[0-9]{1,2}$', root)
    if a is not None:
        num += int(a.group())
        root = root.rstrip(a.group())

    for index in range(num, xml_save_versions + 1):
        if not os.path.isfile(xml_path):
            break

        xml_exist_list.append(bpy.path.basename(xml_path))
        xml_path = root + str(index) + ".xml"

    save_message = 'Save As "%s"' % bpy.path.basename(xml_path)
    bpy.ops.b2pmxe.message('INVOKE_DEFAULT', type='INFO', line1=save_message)

    # print xml_exist_list
    if len(xml_exist_list):
        print("xml_file is exist:")
        for data in xml_exist_list:
            print("   --> %s" % data)

    #
    # XML
    #
    root = etree.Element('{local}pmxstatus', attrib={'{http://www.w3.org/XML/1998/namespace}lang': 'jp'})

    #
    # Header
    #   Name
    #   Comment

    # Add Info
    infonode = etree.SubElement(root, "pmdinfo")
    infonode.tail = "\r\n"

    # Add Name
    pmx_name = etree.SubElement(infonode, "name")
    pmx_name.text = pmx_data.Name.rstrip()
    pmx_name_e = etree.SubElement(infonode, "name_e")
    pmx_name_e.text = pmx_data.Name_E.rstrip()

    # Add Comment
    pmx_cmment = etree.SubElement(infonode, "comment")
    pmx_cmment.text = pmx_data.Comment.rstrip()
    pmx_cmment_e = etree.SubElement(infonode, "comment_e")
    pmx_cmment_e.text = pmx_data.Comment_E.rstrip()

    #
    # Morphs
    #

    # Add Morph
    morph_root = etree.SubElement(root, "morphs")
    morph_root.tail = "\r\n"
    morph_comment = etree.Comment(J_Face_Comment)
    morph_comment.tail = "\r\n"
    morph_root.append(morph_comment)

    # Morph
    # Name    # morph name
    # Name_E  # morph name English
    # Panel   # [1:Eyebrows 2:Mouth 3:Eye 4:Other 0:System]
    # Type    # [0:Group 1:Vertex 2:Bone 3:UV 4:ExUV1 5:ExUV2 6:ExUV3 7:ExUV4 8:Material]
    # Offsets # offset data
    for (morph_index, pmx_morph) in enumerate(pmx_data.Morphs):
        blender_morph_name = Get_JP_or_EN_Name(pmx_morph.Name.rstrip(), pmx_morph.Name_E.rstrip(), use_japanese_name)

        morph_node = etree.SubElement(morph_root, "morph")
        morph_node.tail = "\r\n"
        # morph_node.set('index' , str(morph_index))
        morph_node.set('group', str(pmx_morph.Panel))
        morph_node.set('name', pmx_morph.Name.rstrip())
        morph_node.set('name_e', pmx_morph.Name_E.rstrip())
        morph_node.set('b_name', blender_morph_name)

    #
    # Bones
    #
    bone_root = etree.SubElement(root, "bones")
    bone_root.tail = "\r\n"

    blender_bone_list = {}

    for (bone_index, pmx_bone) in enumerate(pmx_data.Bones):
        blender_bone_name = Get_JP_or_EN_Name(pmx_bone.Name, pmx_bone.Name_E, use_japanese_name, bone_mode=True)
        blender_bone_list[bone_index] = (blender_bone_name)

    for (bone_index, pmx_bone) in enumerate(pmx_data.Bones):
        blender_bone_name = blender_bone_list[bone_index]
        bone_node = etree.SubElement(bone_root, "bone")
        bone_node.tail = "\r\n"
        # bone_node.set("index" , str(bone_index))
        bone_node.set("name", pmx_bone.Name)
        bone_node.set("name_e", pmx_bone.Name_E)
        bone_node.set("b_name", blender_bone_name)

        # Bone Status
        bone_node.set("rotatable", str(pmx_bone.Rotatable))
        bone_node.set("movable", str(pmx_bone.Movable))
        bone_node.set("visible", str(pmx_bone.Visible))
        bone_node.set("operational", str(pmx_bone.Operational))
        bone_node.set("ik", str(pmx_bone.UseIK))
        bone_node.set("add_rot", str(pmx_bone.AdditionalRotation))

        if pmx_bone.AdditionalBoneIndex >= 0:
            bone_node.set("target", blender_bone_list[pmx_bone.AdditionalBoneIndex])

        bone_node.set("power", str(pmx_bone.AdditionalPower))
        bone_node.set("add_move", str(pmx_bone.AdditionalMovement))

        if pmx_bone.AdditionalMovement == 1 or pmx_bone.AdditionalRotation == 1:
            bone_node.set("target", blender_bone_list[pmx_bone.AdditionalBoneIndex])
            bone_node.set("power", str(pmx_bone.AdditionalPower))

        bone_node.set("fixed_axis", str(pmx_bone.UseFixedAxis))
        bone_node.set("local_axis", str(pmx_bone.UseLocalAxis))

        set_Vector(bone_node, pmx_bone.LocalAxisX, "local_x")
        set_Vector(bone_node, pmx_bone.LocalAxisZ, "local_z")

        bone_node.set("level", str(pmx_bone.Level))
        bone_node.set("after_physical", str(pmx_bone.AfterPhysical))
        # if pmx_bone.AfterPhysical  = 0 unsupport
        # if pmx_bone.ExternalBone = 0   unsupport

    #
    # Labels
    #

    # Add Labels
    labels_root = etree.SubElement(root, "labels")
    labels_root.text = "\r\n"
    labels_root.tail = "\r\n"

    for (label_index, pmx_label) in enumerate(pmx_data.DisplayFrames):
        label_node = etree.SubElement(labels_root, "label")
        label_node.tail = "\r\n"
        # label_node.set("index" , str(label_index))
        label_node.set("name", pmx_label.Name)
        label_node.set("name_e", pmx_label.Name_E)
        label_node.set("type", str(pmx_label.Type))

        for (index, member) in enumerate(pmx_label.Members):
            member_node = etree.SubElement(label_node, "tab")
            member_node.tail = "\r\n"
            # member_node.set("index" , str(index))

            if member[0] == 0:
                member_node.set("type", "bone")
                member_node.set("name", blender_bone_list[member[1]])

            else:
                member_node.set("type", "morph")
                label_morph_name = Get_JP_or_EN_Name(
                    pmx_data.Morphs[member[1]].Name,
                    pmx_data.Morphs[member[1]].Name_E,
                    use_japanese_name)
                member_node.set("name", label_morph_name)

    #
    # Materials
    #
    material_root = etree.SubElement(root, "materials")
    material_root.tail = "\r\n"

    for (mat_index, pmx_mat) in enumerate(pmx_data.Materials):
        blender_mat_name = Get_JP_or_EN_Name(pmx_mat.Name, pmx_mat.Name_E, use_japanese_name)

        material_node = etree.SubElement(material_root, "material")
        material_node.tail = "\r\n"
        # material_node.set("index",str(mat_index))
        material_node.set("name", pmx_mat.Name)
        material_node.set("name_e", pmx_mat.Name_E)
        material_node.set("b_name", blender_mat_name)
        material_node.set("use_systemtoon", str(pmx_mat.UseSystemToon))

        if pmx_mat.UseSystemToon == 1:
            material_node.set("toon", str(pmx_mat.ToonIndex))

        elif pmx_mat.ToonIndex < 0:
            material_node.set("toon", "-1")

        else:
            material_node.set("toon", str(pmx_data.Textures[pmx_mat.ToonIndex].Path))

        material_node.set("both", str(pmx_mat.Both))
        material_node.set("ground_shadow", str(pmx_mat.GroundShadow))
        material_node.set("drop_shadow", str(pmx_mat.DropShadow))
        material_node.set("on_shadow", str(pmx_mat.OnShadow))
        material_node.set("on_edge", str(pmx_mat.OnEdge))
        material_node.set("edge_size", str(pmx_mat.EdgeSize))
        material_node.set("power", str(pmx_mat.Power))
        material_edge_color = etree.SubElement(material_node, "edge_color")
        material_edge_color.set("r", str(pmx_mat.EdgeColor.x))
        material_edge_color.set("g", str(pmx_mat.EdgeColor.y))
        material_edge_color.set("b", str(pmx_mat.EdgeColor.z))
        material_edge_color.set("a", str(pmx_mat.EdgeColor.w))
        material_deffuse = etree.SubElement(material_node, "deffuse")
        material_deffuse.set("r", str(pmx_mat.Deffuse.x))
        material_deffuse.set("g", str(pmx_mat.Deffuse.y))
        material_deffuse.set("b", str(pmx_mat.Deffuse.z))
        material_deffuse.set("a", str(pmx_mat.Deffuse.w))
        material_specular = etree.SubElement(material_node, "specular")
        material_specular.set("r", str(pmx_mat.Specular.x))
        material_specular.set("g", str(pmx_mat.Specular.y))
        material_specular.set("b", str(pmx_mat.Specular.z))
        material_ambient = etree.SubElement(material_node, "ambient")
        material_ambient.set("r", str(pmx_mat.Ambient.x))
        material_ambient.set("g", str(pmx_mat.Ambient.y))
        material_ambient.set("b", str(pmx_mat.Ambient.z))

        if pmx_mat.SphereIndex != -1 and len(pmx_data.Textures) > pmx_mat.SphereIndex:
            material_sphere = etree.SubElement(material_node, "sphere")
            material_sphere.set("type", str(pmx_mat.SphereType))
            material_sphere.set("path", str(pmx_data.Textures[pmx_mat.SphereIndex].Path))

    #
    # Rigid
    #
    rigid_root = etree.SubElement(root, "rigid_bodies")
    rigid_root.text = "\r\n"
    rigid_root.tail = "\r\n"

    for (rigid_index, pmx_rigid) in enumerate(pmx_data.Rigids):
        rigid_node = etree.SubElement(rigid_root, "rigid")
        rigid_node.tail = "\r\n"
        # rigid_node.set("index",str(rigid_index))
        rigid_node.set("name", pmx_rigid.Name)
        rigid_node.set("name_e", pmx_rigid.Name_E)

        if (pmx_rigid.Bone < 0):
            rigid_node.set("attach", "World")
        else:
            rigid_node.set("attach", blender_bone_list[pmx_rigid.Bone])

        rigid_node.set("type", str(pmx_rigid.PhysicalType))
        rigid_node.set("group", str(pmx_rigid.Group))
        rigid_node.set("groups", str(pmx_rigid.NoCollision))
        rigid_node.set("shape", str(pmx_rigid.BoundType))

        rigid_size = etree.SubElement(rigid_node, "size")
        rigid_size.set("a", str("%.7f" % pmx_rigid.Size[0]))
        rigid_size.set("b", str("%.7f" % pmx_rigid.Size[1]))
        rigid_size.set("c", str("%.7f" % pmx_rigid.Size[2]))

        set_Vector(rigid_node, pmx_rigid.Position, "pos")
        set_Vector_Deg(rigid_node, pmx_rigid.Rotate, "rot")

        rigid_node.set("mass", str("%.7f" % pmx_rigid.Mass))
        rigid_node.set("pos_dump", str("%.7f" % pmx_rigid.PosLoss))
        rigid_node.set("rot_dump", str("%.7f" % pmx_rigid.RotLoss))
        rigid_node.set("restitution", str("%.7f" % pmx_rigid.OpPos))
        rigid_node.set("friction", str("%.7f" % pmx_rigid.Friction))

    #
    # Joint
    #
    joint_root = etree.SubElement(root, "constraints")
    joint_root.text = "\r\n"
    joint_root.tail = "\r\n"

    for (joint_index, pmx_joint) in enumerate(pmx_data.Joints):

        joint_node = etree.SubElement(joint_root, "constraint")
        joint_node.tail = "\r\n"
        # joint_node.set("index",str(joint_index))
        joint_node.set("name", pmx_joint.Name)
        joint_node.set("name_e", pmx_joint.Name_E)
        joint_node.set("body_A", str(pmx_joint.Parent))
        joint_node.set("body_B", str(pmx_joint.Child))

        set_Vector(joint_node, pmx_joint.Position, "pos")
        set_Vector_Deg(joint_node, pmx_joint.Rotate, "rot")

        joint_pos_limit = etree.SubElement(joint_node, "pos_limit")
        set_Vector(joint_pos_limit, pmx_joint.PosLowerLimit, "from")
        set_Vector(joint_pos_limit, pmx_joint.PosUpperLimit, "to")

        joint_rot_limit = etree.SubElement(joint_node, "rot_limit")
        set_Vector_Deg(joint_rot_limit, pmx_joint.RotLowerLimit, "from")
        set_Vector_Deg(joint_rot_limit, pmx_joint.RotUpperLimit, "to")

        set_Vector(joint_node, pmx_joint.PosSpring, "pos_spring")
        set_Vector(joint_node, pmx_joint.RotSpring, "rot_spring")

    tree = etree.ElementTree(root)
    tree.write(xml_path, encoding="utf-8")
    return blender_bone_list
Example #10
0
from collections import OrderedDict
import re
from xml.etree import ElementTree

from . import base

tag_regexp = re.compile("{([^}]*)}(.*)")

ElementTreeCommentType = ElementTree.Comment("asd").tag


class TreeWalker(base.NonRecursiveTreeWalker):  # pylint:disable=unused-variable
    """Given the particular ElementTree representation, this implementation,
    to avoid using recursion, returns "nodes" as tuples with the following
    content:

    1. The current element

    2. The index of the element relative to its parent

    3. A stack of ancestor elements

    4. A flag "text", "tail" or None to indicate if the current node is a
       text node; either the text or tail of the current element (1)
    """
    def getNodeDetails(self, node):
        if isinstance(node, tuple):  # It might be the root Element
            elt, _, _, flag = node
            if flag in ("text", "tail"):
                return base.TEXT, getattr(elt, flag)
            else:
Example #11
0
    def adjust(self):
        """ Export data to GNU Gama xml, adjust the network and read result

            :returns: result list of adjusment and blunder from GNU Gama
        """
        # gama-local OK?
        if self.gama_path is None:
            logging.error("GNU gama path is None")
            return (None, None)
        # fix = 0 free network
        fix = sum([1 for p, s in self.points if s == 'FIX'])
        adj = sum([1 for p, s in self.points if s == 'ADJ'])
        if adj == 0 or len(self.observations) < 2:
            # no unknowns or observations
            logging.error("GNU gama no unknowns or not enough observations")
            return (None, None)

        gama_local = ET.Element('gama-local', {'version': '2.0'})
        comment = ET.Comment('Gama XML created by Ulyxes')
        gama_local.append(comment)
        network = ET.SubElement(gama_local, 'network',
            {'axes-xy': 'ne', 'angles': 'left-handed'})
        description = ET.SubElement(network, 'description')
        if self.dimension == 1:
            description.text = 'GNU Gama 1D network'
        elif self.dimension == 2:
            description.text = 'GNU Gama 2D network'
        elif self.dimension == 3:
            description.text = 'GNU Gama 3D network'
        parameters = ET.SubElement(network, 'parameters',
            {'sigma-apr': '1', 'conf-pr': str(self.probability),
            'tol-abs': '1000', 'sigma-act': 'aposteriori',
            'update-constrained-coordinates': 'yes'})
        points_observations = ET.SubElement(network, 'points-observations',
            {'distance-stdev': str(self.stdev_dist)+' '+str(self.stdev_dist1),
            'direction-stdev': str(self.stdev_angle / 3600.0 * 10000.0),
            'angle-stdev': str(math.sqrt(2)*self.stdev_angle/3600.0*10000),
            'zenith-angle-stdev': str(self.stdev_angle/3600.0*10000.0)})
        for p, s in self.points:
            attr = {}
            if self.dimension == 1:
                attr['id'] = p['id']
                if 'elev' in p and p['elev'] is not None:
                    attr['z'] = str(p['elev'])
                if s == 'FIX':
                    attr['fix'] = 'z'
                else:
                    if fix == 0:
                        attr['adj'] = 'Z'
                    else:
                        attr['adj'] = 'z'
                tmp = ET.SubElement(points_observations, 'point', attr)
            elif self.dimension == 2:
                attr['id'] = p['id']
                if 'east' in p and 'north' in p and \
                    p['east'] is not None and p['north'] is not None:
                    attr['y'] = str(p['east'])
                    attr['x'] = str(p['north'])
                if s == 'FIX':
                    attr['fix'] = 'xy'
                else:
                    if fix == 0:
                        # free network
                        attr['adj'] = 'XY'
                    else:
                        attr['adj'] = 'xy'
                tmp = ET.SubElement(points_observations, 'point', attr)
            elif self.dimension == 3:
                attr['id'] = p['id']
                if 'east' in p and 'north' in p and \
                    p['east'] is not None and p['north'] is not None:
                    attr['y'] = str(p['east'])
                    attr['x'] = str(p['north'])
                if 'elev' in p and p['elev'] is not None:
                    attr['z'] = str(p['elev'])
                if s == 'FIX':
                    attr['fix'] = 'xyz'
                else:
                    if fix == 0:
                        attr['adj'] = 'XYZ'
                    else:
                        attr['adj'] = 'xyz'
                tmp = ET.SubElement(points_observations, 'point', attr)
        for o in self.observations:
            if 'station' in o:
                # station record
                attr = {}
                attr['from'] = o['station']
                # instrument height
                ih = 0
                if 'ih' in o:
                    ih = o['ih']
                sta = ET.SubElement(points_observations, 'obs', attr)
            else:
                # observation
                th = 0
                if 'th' in o:
                    th = o['th']
                if self.dimension == 2:
                    # horizontal network
                    if 'hz' in o:
                        attr = {}
                        attr['to'] = o['id']
                        attr['val'] = str(o['hz'].GetAngle('GON'))
                        tmp = ET.SubElement(sta, 'direction', attr)
                    if 'distance' in o and 'v' in o:
                        # horizontal distance
                        attr = {}
                        hd = math.sin(o['v'].GetAngle()) * o['distance']
                        attr['to'] = o['id']
                        attr['val'] = str(hd)
                        tmp = ET.SubElement(sta, 'direction', attr)
                elif self.dimension == 1:
                    # elevations only
                    pass
                elif self.dimension == 3:
                    # 3d
                    if 'hz' in o:
                        attr = {}
                        attr['to'] = o['id']
                        attr['val'] = str(o['hz'].GetAngle('GON'))
                        tmp = ET.SubElement(sta, 'direction', attr)
                    if 'distance' in o:
                        attr = {}
                        attr['to'] = o['id']
                        attr['val'] = str(o['distance'])
                        attr['from_dh'] = str(ih)
                        attr['to_dh'] = str(th)
                        tmp = ET.SubElement(sta, 's-distance', attr)
                    if 'v' in o:
                        attr = {}
                        attr['to'] = o['id']
                        attr['val'] = str(o['v'].GetAngle('GON'))
                        attr['from_dh'] = str(ih)
                        attr['to_dh'] = str(th)
                        tmp = ET.SubElement(sta, 'z-angle', attr)
                else:
                    # unknown dimension
                    logging.error("GNU gama unknown dimension")
                    return (None, None)
        # generate temp file name
        f = tempfile.NamedTemporaryFile()
        tmp_name = f.name
        f.close()
        f = open(tmp_name + '.xml', 'w')
        w = ET.tostring(gama_local).decode('utf-8')
        f.write(w)
        f.close()

        # run gama-local
        status = os.system(self.gama_path + ' ' + tmp_name + '.xml --text ' +
                           tmp_name + '.txt --xml ' + tmp_name + 'out.xml ' +
                           '--cov-band 0')
        if status != 0:
            logging.error("GNU gama failed")
            return (None, None)

        doc = ET.parse(tmp_name + 'out.xml')
        root = doc.getroot()

        if not root.tag.endswith("gama-local-adjustment"):
            #return res
            print("***")
        root_tag = re.sub('gama-local-adjustment$', '', root.tag)
        p = {}  # the single adjusted point from result
        blunder = {'std-residual': 0}
        for child in root:
            if root_tag + "coordinates" == child.tag:
                for gchild in child:
                    if root_tag + "adjusted" == gchild.tag:
                        for ggchild in gchild:
                            if root_tag + "point" == ggchild.tag:
                                for pdata in ggchild:
                                    if root_tag + "id" == pdata.tag:
                                        p['id'] = pdata.text
                                    elif root_tag + "y" == pdata.tag:
                                        p['east'] = float(pdata.text)
                                    elif root_tag + "x" == pdata.tag:
                                        p['north'] = float(pdata.text)
                                    elif root_tag + "z" == pdata.tag:
                                        p['elev'] = float(pdata.text)
                    if root_tag + "orientation-shifts" == gchild.tag:
                        for ggchild in gchild:
                            if root_tag + "orientation" == ggchild.tag:
                                for pdata in ggchild:
                                    if root_tag + "approx" == pdata.tag:
                                        p['approx_ori'] = float(pdata.text)
                                    if root_tag + "adj" == pdata.tag:
                                        p['ori'] = float(pdata.text)
                    if root_tag + "cov-mat" == gchild.tag:
                        i = 0
                        idx = ['std_east', 'std_north', 'std_elev', 'std_ori']
                        for ggchild in gchild:
                            if root_tag + "flt" == ggchild.tag:
                                    p[idx[i]] = math.sqrt(float(ggchild.text))
                                    i += 1
            if root_tag + "observations" == child.tag:
                for gchild in child:
                    o = {'std-residual': 0}
                    if gchild.tag in (root_tag + "direction",
                                      root_tag + "slope-distance",
                                      root_tag + "zenith-angle"):
                        o["type"] = re.sub('^' + root_tag, '', gchild.tag)
                        for ggchild in gchild:
                            if root_tag + "from" == ggchild.tag:
                                o["from"] = ggchild.text
                            if root_tag + "to" == ggchild.tag:
                                o["to"] = ggchild.text
                            if root_tag + "f" == ggchild.tag:
                                o["f"] = float(ggchild.text)
                            if root_tag + "std-residual" == ggchild.tag:
                                o["std-residual"] = float(ggchild.text)
                    if o['std-residual'] > self.krit and \
                       o['std-residual'] > blunder['std-residual'] and \
                       o['f'] > 10:     # extra observations ratio
                        blunder = o

                    

        # remove input xml and output xml
        os.remove(tmp_name + '.xml')
        os.remove(tmp_name + '.txt')
        os.remove(tmp_name + 'out.xml')

        return (p, blunder)
Example #12
0
 def setUp(self):
     # Create comment node
     self.comment = etree.Comment('foo')
Example #13
0
def write(filename, mesh, binary=True, compression="zlib", header_type=None):
    # Writing XML with an etree required first transforming the (potentially large)
    # arrays into string, which are much larger in memory still. This makes this writer
    # very memory hungry. See <https://stackoverflow.com/q/59272477/353337>.
    from .._cxml import etree as ET

    if not binary:
        logging.warning("VTU ASCII files are only meant for debugging.")

    if mesh.points.shape[1] == 2:
        logging.warning("VTU requires 3D points, but 2D points given. "
                        "Appending 0 third component.")
        mesh.points = numpy.column_stack([
            mesh.points[:, 0], mesh.points[:, 1],
            numpy.zeros(mesh.points.shape[0])
        ])

    vtk_file = ET.Element(
        "VTKFile",
        type="UnstructuredGrid",
        version="0.1",
        # Use the native endianness. Not strictly necessary, but this simplifies things
        # a bit.
        byte_order=("LittleEndian"
                    if sys.byteorder == "little" else "BigEndian"),
    )
    header_type = ("UInt32" if header_type is None else vtk_file.set(
        "header_type", header_type))

    if binary and compression:
        # TODO lz4, lzma <https://vtk.org/doc/nightly/html/classvtkDataCompressor.html>
        compressions = {
            "lzma": "vtkLZMADataCompressor",
            "zlib": "vtkZLibDataCompressor",
        }
        assert compression in compressions
        vtk_file.set("compressor", compressions[compression])

    # swap the data to match the system byteorder
    # Don't use byteswap to make sure that the dtype is changed; see
    # <https://github.com/numpy/numpy/issues/10372>.
    points = mesh.points.astype(mesh.points.dtype.newbyteorder("="))
    for key, data in mesh.point_data.items():
        mesh.point_data[key] = data.astype(data.dtype.newbyteorder("="))
    for data in mesh.cell_data.values():
        for k, dat in enumerate(data):
            data[k] = dat.astype(dat.dtype.newbyteorder("="))
    for key, data in mesh.field_data.items():
        mesh.field_data[key] = data.astype(data.dtype.newbyteorder("="))

    def numpy_to_xml_array(parent, name, fmt, data):
        da = ET.SubElement(parent,
                           "DataArray",
                           type=numpy_to_vtu_type[data.dtype],
                           Name=name)
        if len(data.shape) == 2:
            da.set("NumberOfComponents", "{}".format(data.shape[1]))
        if binary:
            da.set("format", "binary")
            if compression:
                # compressed write
                def text_writer(f):
                    max_block_size = 32768
                    data_bytes = data.tostring()

                    # round up
                    num_blocks = -int(-len(data_bytes) // max_block_size)
                    last_block_size = (len(data_bytes) -
                                       (num_blocks - 1) * max_block_size)

                    # It's too bad that we have to keep all blocks in memory. This is
                    # necessary because the header, written first, needs to know the
                    # lengths of all blocks. Also, the blocks are encoded _after_ having
                    # been concatenated.
                    c = {"lzma": lzma, "zlib": zlib}[compression]
                    compressed_blocks = [
                        # This compress is the slowest part of the writer
                        c.compress(block)
                        for block in _chunk_it(data_bytes, max_block_size)
                    ]

                    # collect header
                    header = numpy.array(
                        [num_blocks, max_block_size, last_block_size] +
                        [len(b) for b in compressed_blocks],
                        dtype=vtu_to_numpy_type[header_type],
                    )
                    f.write(base64.b64encode(header.tostring()).decode())
                    f.write(
                        base64.b64encode(b"".join(compressed_blocks)).decode())

            else:
                # uncompressed write
                def text_writer(f):
                    data_bytes = data.tostring()
                    # collect header
                    header = numpy.array(len(data_bytes),
                                         dtype=vtu_to_numpy_type[header_type])
                    f.write(
                        base64.b64encode(header.tostring() +
                                         data_bytes).decode())

        else:
            da.set("format", "ascii")

            def text_writer(f):
                # This write() loop is the bottleneck for the write. Alternatives:
                # savetxt is super slow:
                #   numpy.savetxt(f, data.reshape(-1), fmt=fmt)
                # joining and writing is a bit faster, but consumes huge amounts of
                # memory:
                #   f.write("\n".join(map(fmt.format, data.reshape(-1))))
                for item in data.reshape(-1):
                    f.write((fmt + "\n").format(item))

        da.text_writer = text_writer
        return

    comment = ET.Comment(
        "This file was created by meshio v{}".format(__version__))
    vtk_file.insert(1, comment)

    grid = ET.SubElement(vtk_file, "UnstructuredGrid")

    total_num_cells = sum([len(c.data) for c in mesh.cells])
    piece = ET.SubElement(
        grid,
        "Piece",
        NumberOfPoints="{}".format(len(points)),
        NumberOfCells="{}".format(total_num_cells),
    )

    # points
    if points is not None:
        pts = ET.SubElement(piece, "Points")
        numpy_to_xml_array(pts, "Points", "{:.11e}", points)

    if mesh.cells is not None:
        cls = ET.SubElement(piece, "Cells")

        # create connectivity, offset, type arrays
        connectivity = numpy.concatenate(
            [v.data.reshape(-1) for v in mesh.cells])

        # offset (points to the first element of the next cell)
        offsets = [
            v.data.shape[1] *
            numpy.arange(1, v.data.shape[0] + 1, dtype=connectivity.dtype)
            for v in mesh.cells
        ]
        for k in range(1, len(offsets)):
            offsets[k] += offsets[k - 1][-1]
        offsets = numpy.concatenate(offsets)

        # types
        types = numpy.concatenate(
            [numpy.full(len(v), meshio_to_vtk_type[k]) for k, v in mesh.cells])

        numpy_to_xml_array(cls, "connectivity", "{:d}", connectivity)
        numpy_to_xml_array(cls, "offsets", "{:d}", offsets)
        numpy_to_xml_array(cls, "types", "{:d}", types)

    if mesh.point_data:
        pd = ET.SubElement(piece, "PointData")
        for name, data in mesh.point_data.items():
            numpy_to_xml_array(pd, name, "{:.11e}", data)

    if mesh.cell_data:
        cd = ET.SubElement(piece, "CellData")
        for name, data in raw_from_cell_data(mesh.cell_data).items():
            numpy_to_xml_array(cd, name, "{:.11e}", data)

    # write_xml(filename, vtk_file, pretty_xml)
    tree = ET.ElementTree(vtk_file)
    tree.write(filename)
Example #14
0
import xml.etree.ElementTree as etree
import xlrd

xlsdoc = xlrd.open_workbook('E:/PythonProject/Python3/019/number.xls')
sheet = xlsdoc.sheet_by_index(0)

row = sheet.nrows
col = sheet.ncols

alldata = []
for i in range(0, row):
    l = sheet.row_values(i)
    alldata.append(l)

comment = etree.Comment('数字信息')
root = etree.Element('root')
root.insert(1, comment)
son = etree.SubElement(root, 'numbers')
son.text = str(alldata)
xmldoc = etree.ElementTree(root)
xmldoc.write('E:/PythonProject/Python3/019/number.xml',
             encoding='utf-8',
             xml_declaration=True)
Example #15
0
import blob

parser = argparse.ArgumentParser()
parser.add_argument('Files', nargs = '+', help = "Manifest files")
parser.add_argument('--update', action="store_true", help="Edit manifest files with the updated heatshrink params.")
args = parser.parse_args()

update = True if args.update else False

for manifest_path in args.Files:
    if not os.path.isfile(manifest_path):
        print("Invalid path: %s" % manifest_path)
        continue
    tree = ET.parse(manifest_path)
    root = tree.getroot()
    root.insert(0, ET.Comment(blob.Blob.manifest_comment()))
    manifest_entries = root.find("manifest_entries")
    if manifest_entries is None:
        print("Cannot find <manifest_entries> element in manifest \"%s\"" % manifest_path)
        continue
    for entry in manifest_entries:
        if 'hs_window' in entry.attrib and entry.attrib['hs_window'] != '0' and 'hs_lookahead' in entry.attrib and entry.attrib['hs_lookahead'] != 0:
            # Now, do an analysis on the file represented by the entry
            path = entry.attrib['path']
            path = blob.Blob.parse_path(path, manifest_path, "Continuous", "../..")
            if not os.path.isfile(path):
                continue
            print("\nTesting file %s ..." % path)
            smallest = (0, 0, 0)
            for window in range(6, 13):
                for lookahead in range(3, 7):
Example #16
0
    def gen_xml_program (self, filename, table_filepath, table_index = None):
        ''' Generate a series of XML TAGS to program the device using JTAG.
        filename  Output XML file name to use.
        table_filepath   Filename for the binary file that holds the Firmware Table info.'''

        def add_basic_xml_attributes (xml_elem, operation, filename, start_sect, num_sect):
            ''' Add the filename, number of sectors and start sectors into a new element.
            For example, xml_elem, 'erase', 'fdt.bin', 0, 3 would generate:
            <erase filename="fdt.bin" num_partition_sectors="3", start_sector="0" /> '''

            element = ET.SubElement(xml_str, operation)
            element.set ('filename', filename)
            element.set ('start_sector', str(int(start_sect)))
            element.set ('num_partition_sectors', str(int(num_sect)))
            return element

        if table_index == None:
            table_list = self.tables
            start_block = 0
        else:
            if not isinstance(table_index, int):
                logging.error("Table index is not valid %s" % (str(table_index)))
                raise AssertionError("Invalid table index")
            if table_index >= len(self.tables) or table_index < 0:
                logging.error("Table index out of range %d >= %d" % (table_index, len(self.tables)))
                raise AssertionError("Invalid table index, too large or small.")
            table_list = []
            table_list.append(self.tables[table_index])
            start_block = table_index

        xml_str = ET.Element('jdata')
        xml_str.append(ET.Comment('This is an autogenerated file'))
        xml_str.append(ET.Comment('Sector size used is %d' % (self.block_size)))


        table_binary = self.tables_to_binary(table_list)
        table_len = len(table_binary)
        try:
            with open(table_filepath , 'wb') as f:
                f.seek(0)
                f.write(table_binary)
                f.close()
        except IOError as e:
            logging.exception("Unable to open the file '%s'\n" %
                              (table_filepath))
            raise AssertionError("Can't open file %s" % (table_filepath))

        table_len_blocks = table_len / self.block_size

        # Erase the sectors, then program them.

        table_filename = ntpath.basename(table_filepath)
        add_basic_xml_attributes (xml_str, 'program', table_filename, start_block, table_len_blocks)

        if table_index == None:
            count = 0
        else:
            count = table_index

        for table in table_list:
            logging.info('Checking for files to add to the image for Firmware Descriptor Table %d' % (count))
            count = count + 1
            for entry in table:
                block_start = entry.get_start_block()
                size_blocks = math.ceil (entry.size_in_kb * 1024.0 / float(self.block_size))
                # Erase the whole partition
                if entry.erase == True:
                    add_basic_xml_attributes (xml_str, 'erase', entry.filename, block_start, size_blocks)

                if len(entry.filename) > 0:
                    # Try to get the file size to only program the file data, if it can't open use the whole partition.
                    logging.debug('Will try to open file %s to check size' % (entry.filename))
                    try:
                        file_size = os.path.getsize(entry.filename)
                        file_size_blocks = math.ceil (float(file_size) / float(self.block_size))
                        if file_size_blocks > size_blocks:
                            logging.error ("The partition size (%d blocks) isn't large enough to hold the file %s (%d blocks)" % (size_blocks, entry.filename, file_size_blocks))
                    except:
                        logging.debug("Unable to access the file '%s', Can't check if it fits in the partition.\n" % (entry.filename))

                    # Now program only the file data.
                    add_basic_xml_attributes (xml_str, 'program', entry.filename, block_start, size_blocks)

        logging.debug('Completed generating the XML for programming.')
        xml_unformat = ET.tostring(xml_str, 'utf-8')
        reparse = minidom.parseString(xml_unformat)
        reparse.toprettyxml(indent="   ")

        try:
            with open(filename , 'wb') as f:
                f.seek(0)
                f.write(reparse.toprettyxml(indent="   "))
                f.close()
        except IOError as e:
            logging.exception("Unable to open the file '%s'\n" %
                              (filename))
            raise AssertionError("Can't open file %s" % (filename))

        logging.info('Done Generating XML file to program.')
Example #17
0
    def __str__(self):
        """Render the output of the PaloAltoFirewall policy into config."""

        # IPv4 addresses are normalized into the policy as IPv6 addresses
        # using ::<ipv4-address>.  The 0.0.0.0-255.255.255.255 range is
        # equivalent to ::0/96 which will only match IPv4 addresses; when
        # negated it will match only IPv6 addresses.
        # Used for address families inet and inet6 when source and
        # destination address are not specified (any any).
        ANY_IPV4_RANGE = "0.0.0.0-255.255.255.255"
        add_any_ipv4 = False

        address_book_names_dict = {}
        address_book_groups_dict = {}
        for zone in self.addressbook:
            # building individual addresses dictionary
            groups = sorted(self.addressbook[zone])
            for group in groups:
                for address, name in self.addressbook[zone][group]:
                    if name in address_book_names_dict:
                        if address_book_names_dict[name].supernet_of(address):
                            continue
                    address_book_names_dict[name] = address

                # building individual address-group dictionary
                for nested_group in groups:
                    group_names = []
                    for address, name in self.addressbook[zone][nested_group]:
                        group_names.append(name)
                    address_book_groups_dict[nested_group] = group_names

            # sort address books and address sets
            address_book_groups_dict = collections.OrderedDict(
                sorted(address_book_groups_dict.items()))

        address_book_keys = sorted(list(address_book_names_dict.keys()),
                                   key=self._SortAddressBookNumCheck)

        # INITAL CONFIG
        config = etree.Element("config", {
            "version": "8.1.0",
            "urldb": "paloaltonetworks"
        })
        devices = etree.SubElement(config, "devices")
        device_entry = etree.SubElement(devices, "entry",
                                        {"name": "localhost.localdomain"})
        vsys = etree.SubElement(device_entry, "vsys")
        vsys_entry = etree.SubElement(vsys, "entry", {"name": "vsys1"})

        # APPLICATION
        app_entries = etree.Element("application")
        for app_name in self.applications:
            if app_name not in self.application_refs:
                # this is not a custom application.
                continue
            app = self.application_refs[app_name]
            app_entry = etree.SubElement(app_entries, "entry",
                                         {"name": app_name})
            for k in self.application_refs[app_name]:
                if isinstance(app[k], (str)):
                    etree.SubElement(app_entry, k).text = app[k]
                elif isinstance(app[k], (dict)):
                    if k == "default":
                        default_props = etree.SubElement(app_entry, "default")
                    else:
                        continue
                    for prop in app[k]:
                        if k == "default" and prop in [
                                "ident-by-icmp-type", "ident-by-icmp6-type"
                        ]:
                            icmp_type_props = etree.SubElement(
                                default_props, prop)
                            etree.SubElement(icmp_type_props,
                                             "type").text = app[k][prop]
                        else:
                            pass
        vsys_entry.append(app_entries)

        # APPLICATION GROUPS
        etree.SubElement(vsys_entry, "application-group")

        # SERVICES
        vsys_entry.append(etree.Comment(" Services "))
        service = etree.SubElement(vsys_entry, "service")
        for k, v in self.service_map.entries.items():
            entry = etree.SubElement(service, "entry", {"name": v["name"]})
            proto0 = etree.SubElement(entry, "protocol")
            proto = etree.SubElement(proto0, k[2])
            # destination port
            port = etree.SubElement(proto, "port")
            tup = str(k[1])[1:-1]
            if tup[-1] == ",":
                tup = tup[:-1]
            port.text = tup.replace("'", "").replace(", ", ",")
            # source port
            if len(k[0]):
                sport = etree.SubElement(proto, "source-port")
                tup = str(k[0])[1:-1]
                if tup[-1] == ",":
                    tup = tup[:-1]
                sport.text = tup.replace("'", "").replace(", ", ",")

        # RULES
        vsys_entry.append(etree.Comment(" Rules "))
        rulebase = etree.SubElement(vsys_entry, "rulebase")
        security = etree.SubElement(rulebase, "security")
        rules = etree.SubElement(security, "rules")
        tag = etree.Element("tag")

        tag_num = 0

        # pytype: disable=key-error
        # pylint: disable=unused-variable
        for (header, pa_rules, filter_options) in self.pafw_policies:
            tag_name = None
            if header.comment:
                comment = " ".join(header.comment).strip()
                if comment:
                    tag_num += 1
                    # max tag len 127, max zone len 31
                    tag_name = self._TAG_NAME_FORMAT.format(
                        from_zone=filter_options[1],
                        to_zone=filter_options[3],
                        num=tag_num)
                    tag_entry = etree.SubElement(tag, "entry",
                                                 {"name": tag_name})
                    comments = etree.SubElement(tag_entry, "comments")
                    if len(comment) > self._MAX_TAG_COMMENTS_LENGTH:
                        logging.warning(
                            "WARNING: tag %s comments exceeds maximum "
                            "length %d, truncated.", tag_name,
                            self._MAX_TAG_COMMENTS_LENGTH)
                    comments.text = comment[:self._MAX_TAG_COMMENTS_LENGTH]

            no_addr_obj = True if (
                len(filter_options) > 5
                and filter_options[5] == "no-addr-obj") else False

            for name, options in pa_rules.items():
                entry = etree.SubElement(rules, "entry", {"name": name})
                if options["description"]:
                    descr = etree.SubElement(entry, "description")
                    x = " ".join(options["description"])
                    if len(x) > self._MAX_RULE_DESCRIPTION_LENGTH:
                        logging.warning(
                            "WARNING: rule %s description exceeds maximum "
                            "length %d, truncated.", name,
                            self._MAX_RULE_DESCRIPTION_LENGTH)
                    descr.text = x[:self._MAX_RULE_DESCRIPTION_LENGTH]

                to = etree.SubElement(entry, "to")
                for x in options["to_zone"]:
                    member = etree.SubElement(to, "member")
                    member.text = x

                from_ = etree.SubElement(entry, "from")
                for x in options["from_zone"]:
                    member = etree.SubElement(from_, "member")
                    member.text = x

                af = filter_options[4] if len(filter_options) > 4 else "inet"

                max_src_dst = 0
                source = etree.SubElement(entry, "source")
                if not options["source"]:
                    member = etree.SubElement(source, "member")
                    if not options["destination"] and af != "mixed":
                        # only inet and inet6 use the any-ipv4 object
                        member.text = "any-ipv4"
                        add_any_ipv4 = True
                    else:
                        member.text = "any"
                else:
                    for x in options["source"]:
                        if no_addr_obj:
                            for group in address_book_groups_dict[x]:
                                member = etree.SubElement(source, "member")
                                member.text = str(
                                    address_book_names_dict[group])
                                max_src_dst += 1
                        else:
                            member = etree.SubElement(source, "member")
                            member.text = x
                            max_src_dst += 1

                if max_src_dst > self._MAX_RULE_SRC_DST_MEMBERS:
                    raise UnsupportedFilterError(
                        "term %s source members exceeds maximum of %d: %d" %
                        (name, self._MAX_RULE_SRC_DST_MEMBERS, max_src_dst))

                max_src_dst = 0
                dest = etree.SubElement(entry, "destination")
                if not options["destination"]:
                    member = etree.SubElement(dest, "member")
                    if options["source"]:
                        member.text = "any"
                    else:
                        if af != "mixed":
                            # only inet and inet6 use the any-ipv4 object
                            member.text = "any-ipv4"
                            if af == "inet6":
                                for x in [
                                        "negate-source", "negate-destination"
                                ]:
                                    negate = etree.SubElement(entry, x)
                                    negate.text = "yes"
                        else:
                            member.text = "any"
                else:
                    for x in options["destination"]:
                        if no_addr_obj:
                            for group in address_book_groups_dict[x]:
                                member = etree.SubElement(dest, "member")
                                member.text = str(
                                    address_book_names_dict[group])
                                max_src_dst += 1
                        else:
                            member = etree.SubElement(dest, "member")
                            member.text = x
                            max_src_dst += 1

                if max_src_dst > self._MAX_RULE_SRC_DST_MEMBERS:
                    raise UnsupportedFilterError(
                        "term %s destination members exceeds maximum of %d: %d"
                        % (name, self._MAX_RULE_SRC_DST_MEMBERS, max_src_dst))

                # service section of a policy rule.
                service = etree.SubElement(entry, "service")
                if not options["service"] and not options["application"]:
                    member = etree.SubElement(service, "member")
                    member.text = "any"
                elif not options["service"] and options["application"]:
                    # Adds custom applications.
                    member = etree.SubElement(service, "member")
                    member.text = "application-default"
                else:
                    # Adds services.
                    for x in options["service"]:
                        member = etree.SubElement(service, "member")
                        member.text = x

                # ACTION
                action = etree.SubElement(entry, "action")
                action.text = options["action"]

                # check whether the rule is interzone
                if list(
                        set(options["from_zone"]).difference(
                            options["to_zone"])):
                    type_ = etree.SubElement(entry, "rule-type")
                    type_.text = "interzone"
                elif not options["from_zone"] and not options["to_zone"]:
                    type_ = etree.SubElement(entry, "rule-type")
                    type_.text = "interzone"

                # APPLICATION
                app = etree.SubElement(entry, "application")
                if not options["application"]:
                    member = etree.SubElement(app, "member")
                    member.text = "any"
                else:
                    for x in options["application"]:
                        member = etree.SubElement(app, "member")
                        member.text = x

                if tag_name is not None:
                    rules_tag = etree.SubElement(entry, "tag")
                    member = etree.SubElement(rules_tag, "member")
                    member.text = tag_name

                # LOGGING
                if options["logging"]:
                    if "disable" in options["logging"]:
                        log = etree.SubElement(entry, "log-start")
                        log.text = "no"
                        log = etree.SubElement(entry, "log-end")
                        log.text = "no"
                    if "log-start" in options["logging"]:
                        log = etree.SubElement(entry, "log-start")
                        log.text = "yes"
                    if "log-end" in options["logging"]:
                        log = etree.SubElement(entry, "log-end")
                        log.text = "yes"

        # pytype: enable=key-error

        if no_addr_obj:
            address_book_groups_dict = {}
            address_book_keys = {}

        # ADDRESS
        vsys_entry.append(etree.Comment(" Address Groups "))
        addr_group = etree.SubElement(vsys_entry, "address-group")

        for group, address_list in address_book_groups_dict.items():
            entry = etree.SubElement(addr_group, "entry", {"name": group})
            static = etree.SubElement(entry, "static")
            for name in address_list:
                member = etree.SubElement(static, "member")
                member.text = name

        vsys_entry.append(etree.Comment(" Addresses "))
        addr = etree.SubElement(vsys_entry, "address")

        for name in address_book_keys:
            entry = etree.SubElement(addr, "entry", {"name": name})
            desc = etree.SubElement(entry, "description")
            desc.text = name
            ip = etree.SubElement(entry, "ip-netmask")
            ip.text = str(address_book_names_dict[name])

        if add_any_ipv4:
            entry = etree.SubElement(addr, "entry", {"name": "any-ipv4"})
            desc = etree.SubElement(entry, "description")
            desc.text = ("Object to match all IPv4 addresses; "
                         "negate to match all IPv6 addresses.")
            range = etree.SubElement(entry, "ip-range")
            range.text = ANY_IPV4_RANGE

        vsys_entry.append(tag)

        self.config = config
        document = etree.tostring(config, encoding="UTF-8")
        dom = minidom.parseString(document.decode("UTF-8"))

        return dom.toprettyxml(indent=self.INDENT)
Example #18
0
 def add_comment(self, text):
     self.element().append(ET.Comment(text))
            elem.tail = j
    return elem

def name_parser(name):
    '''
    name convention:
    
    [name or id] : [ description ]
    '''
    return name.split(':')

srcdir = sys.argv[1]
dst = sys.argv[2] 

top = ET.Element('qgis_style', version="1")
comment = ET.Comment('geologic symbols for QGis')
top.append(comment)

symbols = ET.SubElement(top, 'symbols')

count_dict = {}

status_file  = open('../STATUS.md','w') 

for rootdir, dirs, files in os.walk( srcdir ):    
   for filename in files:
      if filename.endswith(".xml"): 
         xmlfile = os.path.join(rootdir, filename)
         auth = os.path.dirname( xmlfile ).split('/')[-1]
         if auth not in count_dict.keys():
            count_dict[auth] = 0
Example #20
0
    def generate(self):
        if len(self.tasks_results) != 1:
            raise exceptions.RallyException(
                "Only one test suite is expected to be exported.")

        t = self.tasks_results[0]
        created_at = dt.datetime.strptime(t["created_at"], "%Y-%m-%dT%H:%M:%S")
        updated_at = dt.datetime.strptime(t["updated_at"], "%Y-%m-%dT%H:%M:%S")
        task = {
            "id": t["uuid"],
            "name": "heketi-rally-cases",
            "tests": 0,
            "errors": "0",
            "skipped": "0",
            "failures": 0,
            "time": "%.2f" % (updated_at - created_at).total_seconds(),
            "timestamp": t["created_at"],
        }
        test_cases = []
        for workload in itertools.chain(
                *[s["workloads"] for s in t["subtasks"]]):
            class_name, name = workload["name"].split(".", 1)
            test_case = {
                "id": workload["uuid"],
                "time": "%.2f" % workload["full_duration"],
                "name": name,
                "classname": class_name,
                "timestamp": workload["created_at"]
            }
            if not workload["pass_sla"]:
                task["failures"] += 1
                test_case["failure"] = "\n".join([
                    s["detail"] for s in workload["sla_results"]["sla"]
                    if not s["success"]
                ])
            test_cases.append(test_case)

        task["tests"] = str(len(test_cases))
        task["failures"] = str(task["failures"])

        testsuite = ET.Element("testsuite", task)
        testsuite.append(
            ET.Comment(
                "Report is generated by Rally %s at %s" %
                (version.version_string(), dt.datetime.utcnow().strftime(
                    consts.TimeFormat.ISO8601))))
        for test_case in test_cases:
            failure = test_case.pop("failure", None)
            test_case = ET.SubElement(testsuite, "testcase", test_case)
            if failure:
                ET.SubElement(test_case, "failure").text = failure

        utils.prettify_xml(testsuite)
        raw_report = ET.tostring(testsuite, encoding="utf-8").decode("utf-8")

        if self.output_destination:
            return {
                "files": {
                    self.output_destination: raw_report
                },
                "open": "file://" + os.path.abspath(self.output_destination),
            }
        else:
            return {"print": raw_report}
Example #21
0
def main(argv):
    global top_layers
    global list_layers
    global output_fd
    global root

    parser = argparse.ArgumentParser(
        description=
        'toaster_fixture.py: create Toaster fixture file from setup output')
    parser.add_argument('--project-dir',
                        dest='project_dir',
                        help='Project Directory')
    parser.add_argument('--verbose',
                        '-v',
                        action='store_true',
                        dest='verbose',
                        help='Verbose mode')
    args = parser.parse_args()

    # Core paths
    script_dir = os.path.dirname(os.path.abspath(argv[0]))
    wrlinux_dir = os.path.dirname(script_dir)
    if args.project_dir:
        install_dir = args.project_dir
    else:
        install_dir = os.getcwd()

    # Read setup default.xml data
    default_xml_file = os.path.join(install_dir, 'default.xml')
    if not os.path.exists(default_xml_file):
        print(
            "ERROR: 'default.xml' does not exist. You need to run the 'setup' program."
        )
        exit(-1)
    else:
        remote_base_fetch, remote_base_revision, bitbake_branch, bitbake_path = read_default_xml(
            default_xml_file)
        bitbake_url = os.path.join(remote_base_fetch, bitbake_path)

    # Load layer index cache
    json_cache = os.path.join(install_dir,
                              settings.INDEXES[0]['CACHE'] + '.json')
    read_layer_index_cache(json_cache)

    # Discover the XML directory
    xml_dir = os.path.join(wrlinux_dir, 'data/xml')
    if os.path.exists(
            os.path.join(install_dir, 'config', 'mirror-index', 'xml')):
        xml_dir = os.path.join(install_dir, 'config', 'mirror-index', 'xml')

    # Prepare the output file
    output_fd = open(os.path.join(install_dir, FIXTURE_FILE), 'w')
    write_prolog()

    # Write Toaster environment hints
    #   1. Point Toaster to the wrlinux-x directory
    root.append(ET.Comment(' HINT:WRLINUX_DIR="%s" ' % wrlinux_dir))

    # Write default setting overrides
    root.append(ET.Comment(' Set the project default values '))

    append_setting('DEFCONF_DISTRO', settings.DEFAULT_DISTRO, 1)
    append_setting('DEFAULT_RELEASE', remote_base_revision, 2)
    append_setting('DEFCONF_MACHINE', settings.DEFAULT_MACHINE, 4)
    # append custom settings
    setting_pk = CUSTOM_SETTINGS_BASE
    setting_pk = append_setting('DEFCONF_LINUX_KERNEL_TYPE',
                                settings.DEFAULT_KTYPE, setting_pk)
    setting_pk = append_setting('DEFAULT_KTYPE_LIST',
                                'standard preempt-rt tiny', setting_pk)
    setting_pk = append_setting('CUSTOM_LAYERINDEX_SERVER', json_cache,
                                setting_pk)
    setting_pk = append_setting('SETUP_XMLDIR', xml_dir, setting_pk)
    setting_pk = append_setting('SETUP_GITURL', remote_base_fetch, setting_pk)
    setting_pk = append_setting('SETUP_PATH_FILTER',
                                's|layers/[a-zA-Z0-9_-]*||', setting_pk)

    # Write bitbake version
    root.append(
        ET.Comment(
            ' Bitbake versions which correspond to the metadata release '))
    bitbake_pk = 1
    bitbake_pk = append_bitbake(remote_base_revision, bitbake_url,
                                bitbake_branch, bitbake_pk)

    # Write releases
    root.append(ET.Comment(' Releases available '))
    append_releases(
        remote_base_revision, "Wind River Linux " + remote_base_revision, 1,
        remote_base_revision,
        "Toaster will run your builds using the tip of the Wind River Linux '%s' branch."
        % remote_base_revision)

    # Write base default layers
    for layer in settings.BASE_LAYERS.split():
        top_layers.append(layer)

    # Write DEFAULT_MACHINE layer
    add_machine_layers(settings.DEFAULT_MACHINE)

    # Write DEFAULT_DISTRO layer
    add_distro_layers(settings.DEFAULT_DISTRO)

    # Resolve dependent layers, exclude optional layers
    for layer in top_layers:
        add_dependent_layers(layer, INCLUDE_DEFAULT_LAYERS)

    # Write default layer list per release
    root.append(ET.Comment(' Default project layers for each release '))
    default_layers_pk = 1
    default_layers_pk = write_default_layer_release(1, default_layers_pk)

    # Write layer list
    root.append(ET.Comment(' Default layers from wrlinux defaults '))
    layer_pk, layer_version_pk = write_layer_release(1, 1, TYPE_LAYERINDEX)

    write_epilog()
    output_fd.close()

    if args.verbose:
        print("Done:")
        print(
            "  Layers=%d, LayerRelease=%d, LayerVersions=%d, Custom Settings=%d"
            % (len(list_layers), layer_pk, layer_version_pk,
               setting_pk - CUSTOM_SETTINGS_BASE))
        print("  Default Layers=%s" % list_layers)
Example #22
0
def build(root, textures, links_param_revert):
    """
	Creates textures with in the scene.
	Return a list of texture ids.
	"""
    verbose = config.verbose

    if verbose: print("textures_builder_fbx launched")
    comment = etree.Comment("Textures with ids")
    root.append(comment)

    textures_id = {}

    if textures != [] and not os.path.exists(config.filepath +
                                             "export\\textures"):
        os.makedirs(config.filepath + "export\\textures")
    copyfile(config.curr_place + "\\missing_texture.png",
             config.filepath + "export\\textures\\missing_texture.png")

    # Go through all textures in the scene
    for texture in textures:
        id, type, obj = texture.get("value").replace("::", "").split(",")
        rel_reference = texture.find("RelativeFilename").text.strip()
        abs_reference = texture.find("FileName").text.strip()

        properties = tools.getProperties(texture)
        uoff, voff = properties["Translation"][
            -3:-1] if "Translation" in properties else ["0", "0"]
        uscaling, vscaling = properties["Scaling"][
            -3:-1] if "Scaling" in properties else ["1", "1"]

        if rel_reference == "" and abs_reference == "":
            if verbose:
                print("Empty reference for id " + id +
                      ", replacing with error texture")
            reference = "textures\\missing_texture.png"

        # Try relative path then absolute if it did'nt work
        elif not Path(config.filepath + rel_reference).is_file():
            if not Path(abs_reference).is_file():
                if verbose: print("Missing texture : " + rel_reference)
                reference = "textures\\missing_texture.png"
            else:
                new_reference = "textures\\" + id + "." + abs_reference.split(
                    ".")[-1]
                copyfile(abs_reference,
                         config.filepath + "export\\" + new_reference)
                reference = new_reference
        else:
            new_reference = "textures\\" + id + "." + abs_reference.split(
                ".")[-1]
            copyfile(config.filepath + rel_reference,
                     config.filepath + "export\\" + new_reference)
            reference = new_reference

        if id not in links_param_revert:
            if verbose:
                print("Texture " + reference +
                      " never used. Not writing it to file.")
        elif any(reference.lower().endswith(s)
                 for s in [".bmp", ".jpg", ".png", ".tga", ".exr"]):
            textures_id[id] = properties
            textures_id[id]["reference"] = reference
            curr_texture = tools.create_obj(root, "texture", "bitmap", id)
            tools.set_value(curr_texture, "string", "filename", reference)

            # Avoid cluttering for the final file, removing 0 offsets and 1 scaling
            if uoff != "0":
                tools.set_value(curr_texture, "float", "uoffset", uoff)
            if voff != "0":
                tools.set_value(curr_texture, "float", "voffset", voff)
            if uscaling != "1":
                tools.set_value(curr_texture, "float", "uscale", uscaling)
            if vscaling != "-1":
                tools.set_value(curr_texture, "float", "vscale",
                                str(-float(vscaling)))
            # Textures in 3dsmax have their v values inverted compared to mitsuba.

        else:
            print("Unknown texture file type : " + reference.split(".")[-1])
            print("for file : " + reference)
    return textures_id
def salvar_partida_completa(c, dados=None):
    """
    Salva os dados da partida em um arquivo para ser recuperado depois.
    Os dados seguem as seguintes restricoes:
        Devem ser um dicionario com as chaves todas como string
        Ele aceita valores de conteudo como int, float ou string
        Ele aceita que o valor seja uma lista, porem:
            Todos os elementos devem ser do mesmo tipo.
            Os elementos seguem as mesmas regras acima (int, float, string)
    Retorna 0
    """

    peoes = _coletar_peoes(c)  # coleta as peoes do BD
    tabuleiro = _coletar_tabuleiro(c)  # coleta as peoes no tabuleiro do BD

    jogo = ET.Element('jogo')  # topo do xml
    jogo.append(ET.Comment("Dados de uma partida de ludo"))

    # para peoes e tabuleiros, preciso criar um elemento
    # depois, criar um elemento para cada objeto, e com seus dados,
    # criar um elemento para cada dado, setando o seu tipo e seu conteudo

    elementos_peoes = ET.SubElement(jogo, 'peoes')  # guardando as peoes
    for p in peoes:
        elemento_peao = ET.SubElement(elementos_peoes, 'peao')
        for d in p:
            elemento_dado = ET.SubElement(elemento_peao,
                                          d)  # chave do dicionario
            elemento_dado.text = str(p[d])  # conteudo do dicionario
            elemento_dado.set(
                'tipo', str(type(p[d]).__name__)
            )  # armazena o tipo da variavel para converter dps

    # repete a mesma coisa para o tabuleiro
    elementos_tabuleiro = ET.SubElement(jogo, 'tabuleiros')
    for t in tabuleiro:
        elemento_tabuleiro = ET.SubElement(elementos_tabuleiro, 'tabuleiro')
        for d in t:
            elemento_dado = ET.SubElement(elemento_tabuleiro, d)
            elemento_dado.text = str(t[d])
            elemento_dado.set('tipo', str(type(t[d]).__name__))

    # para dados, eh mais complicado.
    # funciona parecido, mas eh mais geral
    # se o dado for uma lista, preciso converter toda a lista.
    #   CONSIDERANDO QUE TODOS OS ELEMENTOS DA LISTA SAO DO MESMO TIPO

    if dados is not None:
        elemento_dados = ET.SubElement(jogo, 'dados')
        for d in dados:
            if type(dados[d]) == list:  # se o dado for uma lista
                elemento_dado = ET.SubElement(elemento_dados, d)
                elemento_dado.text = ','.join([str(x) for x in dados[d]
                                               ])  # junta tudo com virgulas
                elemento_dado.set('tipo', 'list')
                if not dados[d]:
                    elemento_dado.set(
                        'subtipo',
                        'str')  # se a lista estiver vazia, fala que eh string
                else:
                    elemento_dado.set('subtipo', str(
                        type(dados[d][0]).__name__))  # pega o tipo do primeiro

            elif type(dados[d]) not in [str, int, float]:
                print("Nao consegui armazenar o dado:", d, dados[d])
            else:
                elemento_dado = ET.SubElement(elemento_dados, d)
                elemento_dado.text = str(dados[d])
                elemento_dado.set('tipo', str(type(dados[d]).__name__))

    saida = _formata_xml(jogo)  # copia dos slides 15
    with open(PATH + ARQUIVO_PARTIDA, "w+") as f:
        f.write(saida)

    return 0
def create_comment_element(text):
    """
    creates and returns a xml comment element
    """
    elem = ElementTree.Comment(text)
    return elem
Example #25
0
def rupture_to_element(rupture, parent=None):
    """
    Convert a rupture object into an Element object.

    :param rupture:
        must have attributes .rupture, .tag and .seed
    :param parent:
        if None a new element is created, otherwise a sub element is
        attached to the parent.
    """
    if parent is None:
        rup_elem = et.Element('rupture')
    else:
        rup_elem = et.SubElement(parent, 'rupture')

    rup_elem.append(et.Comment('rupture seed=%d' % rupture.seed))

    rup = rupture.rupture
    rup_elem.set('id', rupture.tag)
    rup_elem.set('magnitude', str(rup.magnitude))
    rup_elem.set('strike', str(rup.strike))
    rup_elem.set('dip', str(rup.dip))
    rup_elem.set('rake', str(rup.rake))
    rup_elem.set('tectonicRegion', str(rup.tectonic_region_type))

    if rup.is_from_fault_source:
        # rup is from a simple or complex fault source
        # the rup geometry is represented by a mesh of 3D
        # points
        mesh_elem = et.SubElement(rup_elem, 'mesh')

        # we assume the mesh components (lons, lats, depths)
        # are of uniform shape
        for i, row in enumerate(rup.lons):
            for j, col in enumerate(row):
                node_elem = et.SubElement(mesh_elem, 'node')
                node_elem.set('row', str(i))
                node_elem.set('col', str(j))
                node_elem.set('lon', str(rup.lons[i][j]))
                node_elem.set('lat', str(rup.lats[i][j]))
                node_elem.set('depth', str(rup.depths[i][j]))
        try:
            # if we never entered the loop above, it's possible
            # that i and j will be undefined
            mesh_elem.set('rows', str(i + 1))
            mesh_elem.set('cols', str(j + 1))
        except NameError:
            raise ValueError('Invalid rupture mesh')
    else:
        # rupture is from a multi surface fault source
        if rup.is_multi_surface:
            # the arrays lons, lats and depths contain 4*N elements,
            # where N is the number of planar surfaces contained in the
            # multisurface; each planar surface if characterised by 4
            # vertices top_left, top_right, bottom_left, bottom_right
            assert len(rup.lons) % 4 == 0
            assert len(rup.lons) == len(rup.lats) == len(rup.depths)

            for offset in range(len(rup.lons) / 4):
                # looping on the coordinates of the sub surfaces, one
                # planar surface at the time
                start = offset * 4
                end = offset * 4 + 4
                lons = rup.lons[start:end]  # 4 lons of the current surface
                lats = rup.lats[start:end]  # 4 lats of the current surface
                depths = rup.depths[start:end]  # 4 depths

                ps_elem = et.SubElement(rup_elem, 'planarSurface')

                top_left, top_right, bottom_left, bottom_right = \
                    zip(lons, lats, depths)

                for el_name, corner in (('topLeft', top_left),
                                        ('topRight', top_right), ('bottomLeft',
                                                                  bottom_left),
                                        ('bottomRight', bottom_right)):

                    corner_elem = et.SubElement(ps_elem, el_name)
                    corner_elem.set('lon', str(corner[0]))
                    corner_elem.set('lat', str(corner[1]))
                    corner_elem.set('depth', str(corner[2]))

        else:
            # rupture is from a point or area source
            # the rupture geometry is represented by four 3D
            # corner points
            ps_elem = et.SubElement(rup_elem, 'planarSurface')

            # create the corner point elements, in the order of:
            # * top left
            # * top right
            # * bottom left
            # * bottom right
            for el_name, corner in (('topLeft', rup.top_left_corner),
                                    ('topRight', rup.top_right_corner),
                                    ('bottomLeft', rup.bottom_left_corner),
                                    ('bottomRight', rup.bottom_right_corner)):

                corner_elem = et.SubElement(ps_elem, el_name)
                corner_elem.set('lon', str(corner[0]))
                corner_elem.set('lat', str(corner[1]))
                corner_elem.set('depth', str(corner[2]))
    return rup_elem
Example #26
0
 def test_write_comment(self):
     el = ET.Comment('Test comment')
     c = StringIO()
     self.assertTrue(self.provider.write_comment(c, el))
     self.assertEqual(c.getvalue(), '#Test comment\n')
     del(c, el)
def to_comment(el, data, key):
    val = data[key]
    if val and val != null:
        el.append(ET.Comment(val))
    def generate_ome_xml_description(cls, axes, shape, dtype, filename=''):
        """
        Generate an OME XML description of the data we're exporting,
        suitable for the image_description tag of the first page.

        axes and shape should be provided in C-order (will be reversed in the XML)
        """
        import uuid
        import xml.etree.ElementTree as ET

        # Normalize the inputs
        axes = "".join(axes)
        shape = tuple(shape)
        if not isinstance(dtype, type):
            dtype = dtype().type

        ome = ET.Element('OME')
        uuid_str = "urn:uuid:" + str(uuid.uuid1())
        ome.set('UUID', uuid_str)
        ome.set('xmlns:xsi', "http://www.w3.org/2001/XMLSchema-instance")
        ome.set(
            'xsi:schemaLocation',
            "http://www.openmicroscopy.org/Schemas/OME/2015-01 "
            "http://www.openmicroscopy.org/Schemas/OME/2015-01/ome.xsd")

        image = ET.SubElement(ome, 'Image')
        image.set('ID', 'Image:0')
        image.set('Name', 'exported-data')

        pixels = ET.SubElement(image, 'Pixels')
        pixels.set('BigEndian', 'true')
        pixels.set('ID', 'Pixels:0')

        fortran_axes = "".join(reversed(axes)).upper()
        pixels.set('DimensionOrder', fortran_axes)

        for axis, dim in zip(axes.upper(), shape):
            pixels.set('Size' + axis, str(dim))

        types = {
            numpy.uint8: 'uint8',
            numpy.uint16: 'uint16',
            numpy.uint32: 'uint32',
            numpy.int8: 'int8',
            numpy.int16: 'int16',
            numpy.int32: 'int32',
            numpy.float32: 'float',
            numpy.float64: 'double',
            numpy.complex64: 'complex',
            numpy.complex128: 'double-complex'
        }

        pixels.set('Type', types[dtype])

        # Omit channel information (is that okay?)
        # channel0 = ET.SubElement(pixels, "Channel")
        # channel0.set("ID", "Channel0:0")
        # channel0.set("SamplesPerPixel", "1")

        assert axes[-2:] == "yx"
        for page_index, page_ndindex in enumerate(numpy.ndindex(*shape[:-2])):
            tiffdata = ET.SubElement(pixels, "TiffData")
            for axis, index in zip(axes[:-2].upper(), page_ndindex):
                tiffdata.set("First" + axis, str(index))
            tiffdata.set("PlaneCount", "1")
            tiffdata.set("IFD", str(page_index))
            uuid_tag = ET.SubElement(tiffdata, "UUID")
            uuid_tag.text = uuid_str
            uuid_tag.set('FileName', filename)

        from textwrap import dedent
        from StringIO import StringIO
        xml_stream = StringIO()
        comment = ET.Comment(
            dedent(
                '\
            <!-- Warning: this comment is an OME-XML metadata block, which contains crucial '
                'dimensional parameters and other important metadata. Please edit cautiously '
                '(if at all), and back up the original data before doing so. For more information, '
                'see the OME-TIFF web site: http://ome-xml.org/wiki/OmeTiff. -->'
            ))

        tree = ET.ElementTree(ome)
        tree.write(xml_stream, encoding='utf-8', xml_declaration=True)

        if logger.isEnabledFor(logging.DEBUG):
            import xml.dom.minidom
            reparsed = xml.dom.minidom.parseString(xml_stream.getvalue())
            logger.debug("Generated OME-TIFF metadata:\n" +
                         reparsed.toprettyxml())

        return xml_stream.getvalue()
Example #29
0
    def write_xacro(self):
        # Defining the robot
        root = ET.Element('robot')
        root.set('name','{}_robot'.format(self.getID()))
        root.set('xmlns:xacro','http://www.ros.org/wiki/xacro')

        # Initializing the constants for the robot
        Comment = [ET.Comment('Constants for robot dimensions')]
        root.append(Comment[-1])

        pi = ET.SubElement(root,'xacro:property')
        pi.set('name','PI')
        pi.set('value','3.1415926535897931')

        dens = ET.SubElement(root,'xacro:property')
        dens.set('name','density')
        dens.set('value',str(self._dens))

        axel_offset = ET.SubElement(root,'xacro:property')
        axel_offset.set('name','axel_offset')
        axel_offset.set('value','0.05')

        # Importing Gazebo elements
        find_gazebo = ET.SubElement(root, 'xacro:include')
        ID = self.getID()
        find_gazebo.set('filename','$(find robot_generator)/urdf/{0}_root/{0}_robot.gazebo'.format(ID))

        # Importing materials
        materials = ET.SubElement(root, 'xacro:include')
        materials.set('filename','$(find robot_generator)/urdf/materials.xacro'.format(ID))

        # World link to connect the robot to the Gazebo frame of reference with a fixed joint
        Comment.append(ET.Comment('World link for fixing robot to Gazebo frame'))
        root.append(Comment[-1])

        link = [ET.SubElement(root,'link')]
        link[-1].set('name','world')

        joint = [ET.SubElement(root,'joint')]
        joint[-1].set('name','joint0')
        joint[-1].set('type','fixed')
        parent = [ET.SubElement(joint[-1],'parent')]
        parent[-1].set('link','world')
        child = [ET.SubElement(joint[-1],'child')]
        child[-1].set('link','link1')

        # Creating newly generated robot
        Comment.append(ET.Comment('Start of generated robot'))
        root.append(Comment[-1])

        for k in range(self.getLinkNo()):
            root = self.add_link(root,k)
            if k == 0: continue
            root = self.add_joint(root,k)
            root = self.add_transmission(root,k)

        ID = self.getID()
        finalString = self.xml_space(root)

        # Write file to a urdf file with the robot's ID. Eg. robot_1.urdf
        fileName = '{}_robot.xacro'.format(ID)
        self.write_file(finalString,fileName)
Example #30
0
 def header(self, comment):
     self.rec = ElementTree.Element('add')
     cmt_el = ElementTree.Comment(' Generated on {} {} '.format(
         datetime.date.today(), comment))
     self.rec.append(cmt_el)