Example #1
0
            E.size(
                E.width(width),
                E.height(height),
                E.depth(3),
                ),
            E.segmented(0)
            )

def instance_to_xml(class_label, xmin, ymin, xmax, ymax):
    E = objectify.ElementMaker(annotate=False)

    return E.object(
            E.name(class_label),
            E.bndbox(
                E.xmin(xmin),
                E.ymin(ymin),
                E.xmax(xmax),
                E.ymax(ymax),
                ),
            )

if __name__=="__main__":
    annotation = root("folder", "filename", 640, 480)
    label1 = instance_to_xml("wasabi_snack", 34, 45, 100, 100)
    label2 = instance_to_xml("butter_bisuco", 34, 45, 100, 100)    
    annotation.append(label1)
    annotation.append(label2)
    etree.ElementTree(annotation).write("{}.xml".format("temp2"))


Example #2
0
def generate_legend(colormaps, output, output_format, orientation, label_color,
                    colorbar_only):

    # set ticklines out
    rcParams['xtick.direction'] = 'out'
    rcParams['ytick.direction'] = 'out'

    lc = len(colormaps)
    t = 0
    has_values = False

    for colormap in colormaps:
        if colormap.title != None:
            t = 0.15
        if colormap.legend != None:
            if colormap.legend.legend_type != "classification":
                has_values = True

    if orientation == 'horizontal':
        t = 0.15
        fig = pyplot.figure(figsize=(4.2, t + 0.8 + (1 * (lc - 1))))
    else:  # default vertical orientation
        fig = pyplot.figure(figsize=(1.5 + (2 * (lc - 1)), 3.2))

    colors = []
    legend_entries = []
    legend_count = 0
    labels = []
    label_index = []

    for colormap in colormaps:
        legend_count += 1
        bounds = []
        ticks = []
        ticklabels = []
        legendcolors = []
        legendlabels = []

        if colormap.legend == None:
            entries = colormap.colormap_entries
            # ensure showTick and showLabel exist if no legend
            for idx in range(0, len(entries)):
                entries[idx].showtick = False
                entries[idx].showlabel = False
        else:
            entries = colormap.legend.legend_entries
            colormap.style = colormap.legend.legend_type
            if colormap.legend.legend_type != "classification":
                # clear colors if not classification
                colors = []
                legend_entries = []

        for legend_entry in entries:
            if legend_entry.transparent == False:
                if colormap.style == "classification":
                    legendcolors.append(legend_entry.color)

                    if legend_entry.tooltip:
                        legendlabels.append(legend_entry.tooltip)
                        labels.append(legend_entry.tooltip)
                    else:
                        legendlabels.append(legend_entry.label)
                        labels.append(legend_entry.label)

                else:
                    if legend_entry.color != None:
                        has_values = True
                        legend_entries.append(legend_entry)
                        colors.append(legend_entry.color)

        if colormap.style != "classification":
            for idx in range(0, len(legend_entries)):
                if legend_entries[idx].showtick == True or legend_entries[
                        idx].showlabel == True or idx == 0 or idx == len(
                            legend_entries) - 1:
                    if colormap.style == "discrete":
                        ticks.append(idx + 0.5)
                    else:
                        if idx == len(legend_entries) - 1:
                            ticks.append(idx + 1)  # add end label
                        else:
                            ticks.append(idx)

                    if legend_entries[idx].showlabel == True:
                        ticklabels.append(legend_entries[idx].label)
                        labels.append(legend_entries[idx].label)
                    elif idx == 0 and colormap.legend.min_label != None:
                        ticklabels.append(colormap.legend.min_label)
                        labels.append(colormap.legend.min_label)
                    elif idx == len(
                            legend_entries
                    ) - 1 and colormap.legend.max_label != None:
                        ticklabels.append(colormap.legend.max_label)
                        labels.append(colormap.legend.max_label)
                    else:
                        ticklabels.append("")
                        labels.append("")
                    label_index.append(idx)

        # Handle +/- INF
        lowerinf = False
        upperinf = False
        if len(bounds) > 0:
            lowerinf = math.isinf(bounds[0])
            upperinf = math.isinf(bounds[-1])
            bounds = [x for x in bounds if math.isinf(x) == False]
            ticks = [x for x in ticks if math.isinf(x) == False]

        # Check for long labels
        longlabels = False
        for legendlabel in legendlabels:
            if len(legendlabel) > 14:
                longlabels = True

        if orientation == 'horizontal':
            if lc == 1:
                bottom = 0.6 - t
            else:
                bottom = 0.90 - ((0.9 / lc) * (legend_count - 1)) - (0.20 / lc)
            height = 0.20 / lc

            # use legend for classifications
            if colormap.style == "classification":
                if lc == 1:
                    fig.set_figheight(3)
                    if longlabels:
                        fig.set_figwidth(3)
                    else:
                        fig.set_figwidth(1.5)
                else:
                    bottom = bottom
                patches = []
                for color in legendcolors:
                    polygon = mpl.patches.Rectangle((0, 0),
                                                    10,
                                                    10,
                                                    facecolor=color)
                    polygon.set_linewidth(0.5)
                    patches.append(polygon)
                if len(legendcolors) < 7 and has_values == False:
                    if lc == 1:
                        fig.set_figheight(1.5)
                if len(legendcolors) <= (15 / lc):
                    col = 1
                    fontsize = 9
                if len(legendcolors) > (15 / lc):
                    if lc == 1:
                        fig.set_figwidth(3)
                    col = 2
                    fontsize = 8
                if len(legendcolors) > (30 / lc):
                    if lc == 1:
                        fig.set_figwidth(4.2)
                    col = 3
                    fontsize = 7
                if has_values == True:
                    if lc == 1:
                        fig.set_figwidth(4.2)
                    legend = fig.legend(
                        patches,
                        legendlabels,
                        bbox_to_anchor=[0.025, bottom + (0.3 / lc)],
                        loc='upper left',
                        ncol=col,
                        fancybox=True,
                        prop={'size': fontsize})
                    legend.get_frame().set_alpha(0)
                else:
                    legend = fig.legend(patches,
                                        legendlabels,
                                        bbox_to_anchor=[0.5, 0.5],
                                        loc='center',
                                        ncol=col,
                                        fancybox=True,
                                        prop={'size': fontsize})
                    legend.get_frame().set_alpha(0.5)
                for text in legend.get_texts():
                    text.set_color(label_color)

            if has_values == True and (colormap.style != "classification"
                                       or colormap.legend == None):
                if colorbar_only:
                    fig.set_figheight(height)
                    fig.set_figwidth(2.56)
                    ax = fig.add_axes([0, 0.03, 0.995, 0.97])
                else:
                    ax = fig.add_axes([0.075, bottom, 0.85, height])
                cmap = mpl.colors.ListedColormap(colors)

                if len(bounds) > 0:
                    norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
                    cb = mpl.colorbar.ColorbarBase(ax,
                                                   cmap=cmap,
                                                   norm=norm,
                                                   ticks=ticks,
                                                   orientation=orientation)
                    cb.ax.set_xticklabels(ticks)
                else:
                    norm = mpl.colors.BoundaryNorm(range(len(colors) + 1),
                                                   cmap.N)
                    cb = mpl.colorbar.ColorbarBase(ax,
                                                   cmap=cmap,
                                                   norm=norm,
                                                   ticks=ticks,
                                                   orientation=orientation)
                    cb.ax.set_xticklabels(ticklabels)

                cb.solids.set_edgecolor("face")

                for tick in cb.ax.xaxis.get_ticklabels():
                    tick.set_fontsize(8)
                    tick.set_color(label_color)
                    if colorbar_only:
                        tick.set_alpha(0)
                if colorbar_only:  # hide ticks if we want to show colorbar only
                    for tickline in cb.ax.xaxis.get_ticklines():
                        tickline.set_alpha(0)

                if colormap.legend != None and len(bounds) > 0:
                    if len(cb.ax.get_xticklabels()) > 0:
                        xticklabels = cb.ax.get_xticklabels()
                        xticklabels = [
                            label.get_text() for label in xticklabels
                        ]
                        # Check for infinity
                        if lowerinf:
                            xticklabels[0] = "<=" + xticklabels[0]
                        if upperinf:
                            xticklabels[-1] = ">=" + xticklabels[-1]

                        # show only those with showLabel
                        for idx in range(0, len(xticklabels)):
                            try:
                                if float(xticklabels[idx]) not in ticklabels:
                                    xticklabels[idx] = ""
                            except ValueError:
                                xticklabels[idx] = ""

                        # Use min/max labels
                        if colormap.legend.min_label != None:
                            xticklabels[0] = colormap.legend.min_label
                        if colormap.legend.max_label != None:
                            xticklabels[-1] = colormap.legend.max_label

                        # use int labels if all values are integers
#                         xticklabels = [int(float(label)) for label in xticklabels if float(label).is_integer()]
                        cb.ax.set_xticklabels(xticklabels)

                if colormap.units != None and colorbar_only == False:
                    fig.text(0.5,
                             bottom - height - (0.20 / lc),
                             colormap.units,
                             fontsize=10,
                             horizontalalignment='center',
                             color=label_color)

            if colormap.title != None and colorbar_only == False:
                if lc == 1:
                    title_loc = 1 - t
                else:
                    title_loc = bottom + height + (0.07 / lc)
                fig.text(0.5,
                         title_loc,
                         colormap.title,
                         fontsize=10,
                         horizontalalignment='center',
                         weight='bold',
                         color=label_color)

        else:  # default vertical orientation
            left = ((1.00 / lc) * legend_count) - (0.73 / lc)
            width = 0.15 / lc

            # use legend for classifications
            if colormap.style == "classification":
                if longlabels and fig.get_figwidth() < 3:
                    fig.set_figwidth(3.2)
                patches = []
                for color in legendcolors:
                    polygon = mpl.patches.Rectangle((0, 0),
                                                    10,
                                                    10,
                                                    facecolor=color)
                    polygon.set_linewidth(0.5)
                    patches.append(polygon)
                if len(legendcolors) < 7 and has_values == False:
                    if lc <= 2:
                        fig.set_figheight(1.5)
                if len(legendcolors) <= 14:
                    col = 1
                    fontsize = 9
                if len(legendcolors) > 14:
                    if lc <= 2:
                        fig.set_figwidth(3.2)
                    col = 2
                    fontsize = 8
                if len(legendcolors) > 28:
                    if lc <= 2:
                        fig.set_figwidth(4.2)
                    col = 3
                    fontsize = 7
                if has_values == True:
                    if lc <= 2:
                        fig.set_figwidth(3.2)
                    legend = fig.legend(
                        patches,
                        legendlabels,
                        bbox_to_anchor=[left - (0.15 / lc), 0.9],
                        loc='upper left',
                        ncol=1,
                        fancybox=True,
                        prop={'size': fontsize})
                    legend.get_frame().set_alpha(0)
                else:
                    legend = fig.legend(patches,
                                        legendlabels,
                                        bbox_to_anchor=[0.5, 0.5],
                                        loc='center',
                                        ncol=col,
                                        fancybox=True,
                                        prop={'size': fontsize})
                    legend.get_frame().set_alpha(0.5)
                for text in legend.get_texts():
                    text.set_color(label_color)

            if has_values == True and (colormap.style != "classification"
                                       or colormap.legend == None):
                if colorbar_only:
                    fig.set_figheight(2.56)
                    fig.set_figwidth(0.2)
                    ax = fig.add_axes([0.02, 0.005, 0.94, 0.995])
                else:
                    ax = fig.add_axes([left, 0.1, width, 0.8])
                cmap = mpl.colors.ListedColormap(colors)

                if len(bounds) > 0:
                    norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
                    cb = mpl.colorbar.ColorbarBase(ax,
                                                   cmap=cmap,
                                                   norm=norm,
                                                   ticks=ticks,
                                                   orientation=orientation)
                    cb.ax.set_yticklabels(ticks)
                else:
                    norm = mpl.colors.BoundaryNorm(range(len(colors) + 1),
                                                   cmap.N)
                    cb = mpl.colorbar.ColorbarBase(ax,
                                                   cmap=cmap,
                                                   norm=norm,
                                                   ticks=ticks,
                                                   orientation=orientation)
                    cb.ax.set_yticklabels(ticklabels)

                cb.solids.set_edgecolor("face")

                for tick in cb.ax.yaxis.get_ticklabels():
                    tick.set_fontsize(10)
                    tick.set_color(label_color)
                    if colorbar_only:
                        tick.set_alpha(0)
                if colorbar_only:  # hide ticks if we want to show colorbar only
                    for tickline in cb.ax.yaxis.get_ticklines():
                        tickline.set_alpha(0)

                if colormap.legend != None and len(bounds) > 0:
                    if len(cb.ax.get_yticklabels()) > 0:
                        yticklabels = cb.ax.get_yticklabels()
                        yticklabels = [
                            label.get_text() for label in yticklabels
                        ]
                        # Check for infinity
                        if lowerinf:
                            yticklabels[0] = "<=" + yticklabels[0]
                        if upperinf:
                            yticklabels[-1] = ">=" + yticklabels[-1]

                        # show only those with showLabel
                        for idx in range(0, len(yticklabels)):
                            try:
                                if float(yticklabels[idx]) not in ticklabels:
                                    yticklabels[idx] = ""
                                else:
                                    if float(yticklabels[idx]).is_integer():
                                        yticklabels[idx] = int(
                                            float(yticklabels[idx]))
                            except ValueError:
                                yticklabels[idx] = ""

                        # Use min/max labels
                        if colormap.legend.min_label != None:
                            yticklabels[0] = colormap.legend.min_label
                        if colormap.legend.max_label != None:
                            yticklabels[-1] = colormap.legend.max_label

                        # use int labels if all values are integers
#                         yticklabels = [int(float(label)) for label in yticklabels if float(label).is_integer()]
                        cb.ax.set_yticklabels(yticklabels)

                if colormap.units != None and colorbar_only == False:
                    fig.text(left + (0.08 / lc),
                             0.01,
                             colormap.units,
                             fontsize=10,
                             horizontalalignment='center',
                             color=label_color)

            if colormap.title != None and colorbar_only == False:
                title_left = left + (0.08 / lc)
                title_top = 0.935
                if colormap.style == "classification":
                    if lc == 1:
                        title_left = 0.5  #center if only one classification legend
                        title_top = 1 - t
                fs = 10
                if len(colormap.title) > 10:
                    fs = 9
                if len(colormap.title) > 14:
                    fs = 8
                if len(colormap.title) > 16:
                    title_words = colormap.title.split(" ")
                    half = (len(title_words) / 2)
                    if len(title_words) > 2: half += 1
                    title = ""
                    for word in title_words[0:half]:
                        title = title + word + " "
                    title = title + "\n"
                    for word in title_words[half:len(title_words)]:
                        title = title + word + " "
                    colormap.title = title
                fig.text(title_left,
                         title_top,
                         colormap.title,
                         fontsize=fs,
                         horizontalalignment='center',
                         weight='bold',
                         color=label_color)

    fig.savefig(output, transparent=True, format=output_format)

    # Add tooltips to SVG
    if output_format == 'svg' and has_values == True:

        ax = fig.get_axes()[0]  # only supports one axis
        entries = colormaps[0].legend.legend_entries

        for i, entry in enumerate(entries):
            if entry.tooltip:
                text = entry.tooltip
                if colormaps[0].units:
                    text = text + " " + colormaps[0].units
            else:
                text = entry.label
            if orientation == "horizontal":
                position = (float(i) / float(len(entries)), 1)
            else:
                position = (1, float(i) / float(len(entries)))
            ax.annotate(
                text,
                xy=position,
                xytext=position,
                textcoords='offset points',
                color='black',
                ha='center',
                fontsize=10,
                gid='tooltip',
                bbox=dict(boxstyle='round,pad=.3',
                          fc=(1, 1, .9, 1),
                          ec=(.1, .1, .1),
                          lw=1,
                          zorder=1),
            )

        # Set id for the annotations
        for i, t in enumerate(ax.texts):
            t.set_gid('tooltip_%d' % i)

        # Save the figure
        f = StringIO()
        plt.savefig(f, transparent=True, format="svg")

        # Create XML tree from the SVG file
        tree, xmlid = ET.XMLID(f.getvalue())
        tree.set('onload', 'init(evt)')

        # Hide the tooltips
        for i, t in enumerate(ax.texts):
            try:
                el = xmlid['tooltip_%d' % i]
                el.set('visibility', 'hidden')
            except KeyError:
                None

        # Add mouseover events to color bar
        try:
            el = xmlid['QuadMesh_1']
            elements = list(el)
            elements.pop(0)  # remove definitions
        except KeyError:
            print "Warning: Unable to add tooltips"
            elements = []
        for i, t in enumerate(elements):
            el = elements[i]
            el.set('onmouseover', "ShowTooltip(" + str(i) + ")")
            el.set('onmouseout', "HideTooltip(" + str(i) + ")")

        # This is the script defining the ShowTooltip and HideTooltip functions.
        script = """
            <script type="text/ecmascript">
            <![CDATA[
            
            function init(evt) {
                if ( window.svgDocument == null ) {
                    svgDocument = evt.target.ownerDocument;
                    }
                }
                
            function ShowTooltip(idx) {
                var tip = svgDocument.getElementById('tooltip_'+idx);
                tip.setAttribute('visibility',"visible")
                }
                
            function HideTooltip(idx) {
                var tip = svgDocument.getElementById('tooltip_'+idx);
                tip.setAttribute('visibility',"hidden")
                }
                
            ]]>
            </script>
            """

        # Insert the script at the top of the file and save it.
        tree.insert(0, ET.XML(script))
        ET.ElementTree(tree).write(output)
        print "SVG tooltips added"

    print output + " generated successfully"
Example #3
0
 def fo_xsl(self):
     sheet = ET.Element(xsl.stylesheet, version="1.0", nsmap=nsmap(xsl))
     ET.SubElement(sheet, xsl['import'],
         href=os.path.join(XSLDIR, "fo", "docbook.xsl"))
     return ET.ElementTree(sheet)
Example #4
0
def main(summary_path, repos_xml_path):
    GITHUB_USERNAME = os.environ['GITHUB_USERNAME']
    GITHUB_TOKEN_FILE = os.environ['GITHUB_TOKEN_FILE']
    GITHUB_ORG = os.environ['GITHUB_ORG']

    with open(summary_path) as f:
        repos = json.load(f)

    with open(GITHUB_TOKEN_FILE) as f:
        token = f.read().strip()

    g = github.Github(GITHUB_USERNAME, token, per_page=50)
    gu = g.get_organization(GITHUB_ORG)
    gh_repos = set()

    # check repo states
    for data in repos.values():
        # 1. we don't add repos with broken metadata but we also don't
        # remove existing ones -- we hope maintainers will fix them,
        # or overlays team will remove them
        #
        # 2. remove repos with unsupported VCS -- this means that
        # upstream has switched, and there's no point in keeping
        # an outdated mirror
        #
        # 3. we can't update repos which are broken to the point of
        # being implicitly removed

        data['x-can-create'] = data['x-state'] in ('GOOD', 'BAD_CACHE')
        data['x-can-update'] = data['x-can-create']
        data['x-should-remove'] = data['x-state'] in ('REMOVED', 'UNSUPPORTED')

    # 0. scan all repos
    to_remove = []
    to_update = []
    for i, r in enumerate(gu.get_repos()):
        sys.stderr.write('\r@ scanning [%-3d/%-3d]' % (i+1, gu.public_repos))
        if r.name not in repos or repos[r.name]['x-should-remove']:
            to_remove.append(r)
        else:
            gh_repos.add(r.name)
            if repos[r.name]['x-can-update']:
                to_update.append(r)
            repos[r.name]['x-mirror-sources'] = gh_sources(r)
    sys.stderr.write('\n')

    # 1. delete stale repos
    for r in to_remove:
        sys.stderr.write('* removing %s\n' % r.name)
        r.delete()

    # 2. now create new repos :)
    for r, data in sorted(repos.items()):
        if r not in gh_repos and data['x-can-create']:
            sys.stderr.write('* adding %s\n' % r)
            gr = gu.create_repo(r,
                    description = ' '.join(data.get('description', {}).get('en').split()) or github.GithubObject.NotSet,
                    homepage = data.get('homepage') or github.GithubObject.NotSet,
                    has_issues = False,
                    has_wiki = False)
            repos[r]['x-mirror-sources'] = gh_sources(gr)
            to_update.append(gr)

    # 3. write a new repositories.xml for them
    root = et.Element('repositories')
    root.set('version', '1.0')

    for r, data in sorted(repos.items()):
        if 'x-mirror-sources' not in data:
            continue

        rel = et.Element('repo')
        for attr, val in sorted(data.items(), key=dtd_sort_key):
            if attr.startswith('x-'):
                continue
            elif attr == 'source': # replace
                for t, url in data['x-mirror-sources']:
                    subel = et.Element('source')
                    subel.set('type', t)
                    subel.text = url
                    rel.append(subel)
            elif attr in ('quality', 'status'): # attributes
                rel.set(attr, val)
            elif attr in ('name', 'homepage'): # single-value
                subel = et.Element(attr)
                subel.text = val
                rel.append(subel)
            elif attr in ('description', 'longdescription'): # lang-dict
                for l, v in val.items():
                    subel = et.Element(attr)
                    subel.set('lang', l)
                    subel.text = v
                    rel.append(subel)
            elif attr in ('owner', 'feed'): # lists
                for v in val:
                    subel = et.Element(attr)
                    if attr == 'owner':
                        for k, subval in v.items():
                            if k == 'type':
                                subel.set(k, subval)
                            else:
                                subsubel = et.Element(k)
                                subsubel.text = subval
                                subel.append(subsubel)
                    else:
                        subel.text = v
                    rel.append(subel)

        root.append(rel)

    xml = et.ElementTree(root)
    with open(repos_xml_path, 'wb') as f:
        f.write(b'<?xml version="1.0" encoding="UTF-8"?>\n')
        f.write(b'<!DOCTYPE repositories SYSTEM "http://www.gentoo.org/dtd/repositories.dtd">\n')
        xml.write(f, encoding='utf-8', xml_declaration=False)

    print('DELETED_REPOS = %s' % ' '.join(r.name for r in to_remove))
    print('REPOS = %s' % ' '.join(r.name for r in to_update))
Example #5
0
def instrument_host(context_dict):

    # Dictionary Extractions
    instrument_host_string_list = context_dict['instrument_host_string_list']
    lidvid_reference = context_dict['lidvid_reference']
    Mission = context_dict['Mission']

    # Declarations
    count_inst = 0
    count_targs = 0
    instrument_host_list = []
    #instrument_string_list = []
    #target_string_list = []
    print '\n\n crawl.instrument_host Debug -------------'
    print '--Debug instrument_host_string_list ---------'
    for instrument_host in instrument_host_string_list:
        print 'Instrument Host: {}'.format(instrument_host)

        # Clean data and make URL
        instrument_host = instrument_host.upper()
        instrument_host = replace_all(instrument_host, '::', '_')
        instrument_host_url = 'https://starbase.jpl.nasa.gov/pds4/context-pds4/instrument_host/Product/PDS4_host_{0}.xml'.format(
            instrument_host)

        # Starbase instrument host tree
        instrument_host_tree = etree.ElementTree(
            file=urllib2.urlopen(instrument_host_url))
        instrument_host_root = instrument_host_tree.getroot()

        # Create InstrumentHost model
        h = InstrumentHostForm().save(commit=False)
        h.mission = Mission
        h.raw_data = instrument_host
        h.lid = instrument_host_root[0][0].text
        t = instrument_host_root[2][0].text
        h.title = t.title()  # Starbase error-Title needs to be in title case.
        h.type_of = instrument_host_root[2][1].text
        h.save()

        # Find Associated Instruments and Targets listed in instrument host tree
        for element in instrument_host_tree.findall(lidvid_reference):
            if element.text[21:31] == 'instrument':
                count_inst += 1

                # Clean data and make URL
                instrument = element.text[32:-5]
                instrument_cleaned = instrument.upper()
                instrument_cleaned = replace_all(instrument_cleaned, '.', '__')
                instrument_url = 'https://starbase.jpl.nasa.gov/pds4/context-pds4/instrument/Product/PDS4_inst_{0}.xml'.format(
                    instrument_cleaned)
                print '{0}: {1}'.format(count_inst, instrument)

                # Starbase instrument tree
                instrument_tree = etree.ElementTree(
                    file=urllib2.urlopen(instrument_url))
                instrument_root = instrument_tree.getroot()

                # Create Instrument model
                i = InstrumentForm().save(commit=False)
                i.instrument_host = h
                i.raw_data = instrument
                i.lid = instrument_root[0][0].text
                i.title = instrument_root[2][0].text
                i.type_of = instrument_root[2][1].text
                i.save()

            elif element.text[21:27] == 'target':
                count_targs += 1

                # Clean data and make URL
                target = element.text[28:]
                begin_index = target.index('.') + 1
                target_cleaned = target[begin_index:]
                target_cleaned = target_cleaned.upper()
                target_cleaned = replace_all(target_cleaned, '::', '_')
                target_url = 'https://starbase.jpl.nasa.gov/pds4/1700/PDS4_context_bundle_20161220/target/Product/PDS4_target_{}.xml'.format(
                    target_cleaned)
                print '{0}: {1}'.format(count_targs, target)

                # Starbase target tree
                target_tree = etree.ElementTree(
                    file=urllib2.urlopen(target_url))
                target_root = target_tree.getroot()

                # Create Target model
                t = TargetForm().save(commit=False)
                t.instrument_host = h
                t.raw_data = target
                t.lid = target_root[0][0].text
                t.title = target_root[0][2].text
                t.type_of = target_root[1][1].text
                t.save()

        print '\n\nThere are {0} instruments and {1} targets in {2}.'.format(
            count_inst, count_targs, h.title)

        # Update List
        instrument_host_list.append(h)
    context_dict['instrument_host_list'] = instrument_host_list
    return context_dict
Example #6
0
 def save(self, path):
     doc = etree.ElementTree(self.to_xml())
     doc.write(open(path, 'wb'),
               xml_declaration=True,
               encoding='utf-8',
               pretty_print=True)
		f = ET.Element("annotations")
		ET.SubElement(f,'filename').text = file.split(".csv")[0] + ".png"
		thesize = ET.SubElement(f,'size')
		ET.SubElement(thesize,'width').text = str(width)
		ET.SubElement(thesize,'height').text = str(height)
		ET.SubElement(thesize,'depth').text = "3"
		for i in range(len(xmin)):

			obj = ET.SubElement(f,'object')
			ET.SubElement(obj,'name').text = "Poma"
			ET.SubElement(obj,'difficult').text = "0"
			bbox = ET.SubElement(obj,'bbox')
			xmin_xml = ET.SubElement(bbox,'xmin')		
			ymin_xml = ET.SubElement(bbox,'ymin')
			xmax_xml = ET.SubElement(bbox,'xmax')
			ymax_xml = ET.SubElement(bbox,'ymax')		
			xmin_xml.text = str(int(xmin[i]))
			xmax_xml.text = str(int(xmax[i]))
			ymin_xml.text = str(int(ymin[i]))
			ymax_xml.text = str(int(ymax[i]))
		filename = os.path.join(save_dir,file.split(".csv")[0] + ".xml")
		tree = ET.ElementTree(f)
		
		tree.write(filename, pretty_print = True)
	





	
    f = subprocess.Popen(
        ["inkscape",
         "--query-%s" % query,
         "--query-id=%s" % id,
         "%s" % file],
        stdout=subprocess.PIPE)
    q[query] = float(f.stdout.read())

# add some margins
q['width'] = q['width'] * 1.3
q['height'] = q['height'] * 1.3

#print q

root = tdoc.getroot()
tout = etree.ElementTree(copy_element(root))
newroot = tout.getroot()
for ch in root.getchildren():
    chcopy = ch.__deepcopy__(-1)
    newroot.append(chcopy)
    if ch.tag == e_defs:
        for defs in fdoc.getroot().getchildren():
            for fi in defs.getchildren():
                ficopy = fi.__deepcopy__(-1)
                newroot.getchildren()[-1].append(ficopy)
    if ch.tag == e_g:
        newroot.getchildren()[-1].attrib["id"] = "original"
        for menu in menus:
            text = etree.Element(e_text, nsmap=NSS)
            text.attrib['x'] = str(q['x'] - q['width'] * 0.2)
            text.attrib['y'] = str(q['y'] + q['height'] *
Example #9
0
def write_empty_file(filepath: str, root_tag: str) -> None:
    """Write an empty tasks file."""

    root = etree.Element(root_tag)
    save_file(filepath, etree.ElementTree(root))
    def createContents(self, directory):
        """
        Create the contents.xml file.

        Args:
            directory (str): Directory path to validate existance of files.

        Returns:
            Element Tree
        """
        if self.paths is None:
            self._paths = {}
        contents = etree.Element('contents')

        # Look for  and add basemaps
        if not os.path.exists(os.path.join(directory, 'FFM.geojson')):
            raise FileNotFoundError('Missing FFM geojson file.')
        file_attrib, format_attrib = self._getAttributes(
            'modelmaps', "Finite Fault Model Maps ", 'FFM.geojson',
            "text/plain")
        maps = etree.SubElement(contents, 'file', file_attrib)
        grid_caption = etree.SubElement(maps, 'caption')
        caption_str = ("Map representation of the finite fault model ")
        grid_caption.text = etree.CDATA(caption_str)
        etree.SubElement(maps, 'format', format_attrib)

        basemap = self._checkDownload(directory, "*base*.png")
        kmls = self._checkDownload(directory, "*.kml")
        kmzs = self._checkDownload(directory, "*.kmz")
        if len(kmls) > 0:
            self._paths['kmls'] = (kmls[0], "finite_fault.kml")
            file_attrib, format_attrib = self._getAttributes(
                '', '', "finite_fault.kml",
                "application/vnd.google-earth.kml+xml")
            etree.SubElement(maps, 'format', format_attrib)
        if len(kmzs) > 0:
            self._paths['kmzs'] = (kmzs[0], "finite_fault.kmz")
            file_attrib, format_attrib = self._getAttributes(
                '', '', "finite_fault.kmz", "application/vnd.google-earth.kmz")
            etree.SubElement(maps, 'format', format_attrib)
        if len(basemap) > 0:
            self._paths['basemap'] = (basemap[0], "basemap.png")
            file_attrib, format_attrib = self._getAttributes(
                'basemap', "Base Map ", "basemap.png", "image/png")
            etree.SubElement(maps, 'format', format_attrib)

        # Look for body and surface wave plots
        plots = self._checkDownload(directory, "waveplots.zip")
        if len(plots) > 0:
            self._paths['waveplots'] = (plots[0], "waveplots.zip")
            file_attrib, format_attrib = self._getAttributes(
                'waveplots', "Wave Plots ", "waveplots.zip", "application/zip")
            file_tree = etree.SubElement(contents, 'file', file_attrib)
            caption = etree.SubElement(file_tree, 'caption')
            caption.text = etree.CDATA("Body and surface wave plots ")
            etree.SubElement(file_tree, 'format', format_attrib)
        else:
            zipped = self.zip_waveplots(directory)
            if zipped != '':
                self._paths['waveplots'] = (zipped, "waveplots.zip")
                file_attrib, format_attrib = self._getAttributes(
                    'waveplots', "Wave Plots ", "waveplots.zip",
                    "application/zip")
                file_tree = etree.SubElement(contents, 'file', file_attrib)
                caption = etree.SubElement(file_tree, 'caption')
                caption.text = etree.CDATA("Body and surface wave plots ")
                etree.SubElement(file_tree, 'format', format_attrib)

        # CMT solution
        cmt = self._checkDownload(directory, "*CMTSOLUTION*")
        if len(cmt) > 0:
            self._paths['cmtsolution'] = (cmt[0], "CMTSOLUTION")
            file_attrib, format_attrib = self._getAttributes(
                'cmtsolution', "CMT Solution ", "CMTSOLUTION", "text/plain")
            file_tree = etree.SubElement(contents, 'file', file_attrib)
            caption = etree.SubElement(file_tree, 'caption')
            caption.text = etree.CDATA(
                "Full CMT solution for every point in finite fault "
                "region ")
            etree.SubElement(file_tree, 'format', format_attrib)

        # inversion files
        param = self._checkDownload(directory, "*.param")
        fsp = self._checkDownload(directory, "*.fsp")
        if len(fsp) > 0 or len(param) > 0:
            if len(param) > 0:
                self._paths['inpfile1'] = (param[0], "basic_inversion.param")
                file_attrib, format_attrib = self._getAttributes(
                    'inpfiles', "Inversion Parameters ",
                    "basic_inversion.param", "text/plain")
                file_tree = etree.SubElement(contents, 'file', file_attrib)
                caption = etree.SubElement(file_tree, 'caption')
                caption.text = etree.CDATA(
                    "Files of inversion parameters for the finite fault ")
                etree.SubElement(file_tree, 'format', format_attrib)
                self._paths['inpfile2'] = (fsp[0], "complete_inversion.fsp")
                file_attrib, format_attrib = self._getAttributes(
                    'inpfiles', "Inversion Parameters ",
                    "complete_inversion.fsp", "text/plain")
                etree.SubElement(file_tree, 'format', format_attrib)
            else:
                self._paths['inpfile2'] = (fsp[0], "complete_inversion.fsp")
                file_attrib, format_attrib = self._getAttributes(
                    'inpfiles', "Inversion Parameters ",
                    "complete_inversion.fsp", "text/plain")
                file_tree = etree.SubElement(contents, 'file', file_attrib)
                caption = etree.SubElement(file_tree, 'caption')
                caption.text = etree.CDATA(
                    "Files of inversion parameters for the finite fault ")
                etree.SubElement(file_tree, 'format', format_attrib)

        # Coulomb inp
        coul = self._checkDownload(directory, "*coulomb.inp")
        if len(coul) > 0:
            self._paths['coulomb'] = (coul[0], "coulomb.inp")
            file_attrib, format_attrib = self._getAttributes(
                'coulomb', "Coulomb Input File ", "coulomb.inp", "text/plain")
            file_tree = etree.SubElement(contents, 'file', file_attrib)
            caption = etree.SubElement(file_tree, 'caption')
            caption.text = etree.CDATA(
                "Format necessary for compatibility with Coulomb3 "
                "(http://earthquake.usgs.gov/research/software/coulomb/) ")
            etree.SubElement(file_tree, 'format', format_attrib)

        # Moment rate
        mr_plot = self._checkDownload(directory, "*mr*.png")
        mr_ascii = self._checkDownload(directory, "*.mr")
        if len(mr_ascii) > 0:
            self._paths['momentrate1'] = (mr_ascii[0], "moment_rate.mr")
            file_attrib, format_attrib = self._getAttributes(
                'momentrate', "Moment Rate Function Files ", "moment_rate.mr",
                "text/plain")
            file_tree = etree.SubElement(contents, 'file', file_attrib)
            caption = etree.SubElement(file_tree, 'caption')
            caption.text = etree.CDATA(
                "Files of time vs. moment rate for source time "
                "functions ")
            etree.SubElement(file_tree, 'format', format_attrib)
            self._paths['momentrate2'] = (mr_plot[0], "moment_rate.png")
            file_attrib, format_attrib = self._getAttributes(
                'momentrate', "Moment Rate Function Files ", "moment_rate.png",
                "image/png")
            etree.SubElement(file_tree, 'format', format_attrib)
        else:
            self._paths['momentrate2'] = (mr_plot[0], "moment_rate.png")
            file_attrib, format_attrib = self._getAttributes(
                'momentrate', "Moment Rate Function Files ", "moment_rate.png",
                "image/png")
            file_tree = etree.SubElement(contents, 'file', file_attrib)
            caption = etree.SubElement(file_tree, 'caption')
            caption.text = etree.CDATA(
                "Files of time vs. moment rate for source time "
                "functions ")
            etree.SubElement(file_tree, 'format', format_attrib)

        # surface displacement
        surf = self._checkDownload(directory, "*.disp")
        if len(surf) > 0:
            self._paths['deformation'] = (surf[0], "surface_deformation.disp")
            file_attrib, format_attrib = self._getAttributes(
                'surface', "Surface Deformation File ",
                "surface_deformation.disp", "text/plain")
            file_tree = etree.SubElement(contents, 'file', file_attrib)
            caption = etree.SubElement(file_tree, 'caption')
            caption.text = etree.CDATA(
                "Surface displacement resulting from finite fault, "
                "calculated using Okada-style deformation codes ")
            etree.SubElement(file_tree, 'format', format_attrib)

        # shakemap polygon
        poly = self._checkDownload(directory, "shakemap_polygon.txt")
        if len(poly) > 0:
            self._paths['shakemap_polygon'] = (poly[0], "shakemap_polygon.txt")
            file_attrib, format_attrib = self._getAttributes(
                'shakemap_polygon', "ShakeMap Rupture Polygon File ",
                "shakemap_polygon.txt", "text/plain")
            file_tree = etree.SubElement(contents, 'file', file_attrib)
            caption = etree.SubElement(file_tree, 'caption')
            caption.text = etree.CDATA(
                "Geometry of finite fault slipped area ")
            etree.SubElement(file_tree, 'format', format_attrib)

        tree = etree.ElementTree(contents)
        self._contents = tree
        return tree
Example #11
0
def xmlWrite(xmltree, xmlFileName):
    ET = etree.ElementTree(xmltree)
    ET.write(xmlFileName, encoding="UTF-8")
Example #12
0
            sys.exit(1)

    # Normalize
    for paper in root_being_added.findall('.//paper'):
        for oldnode in paper:
            process(oldnode, informat='xml')

    # Ingest each volume.
    # First, find the XML file.
    collection_file = os.path.join(os.path.dirname(sys.argv[0]), '..', 'data',
                                   'xml', f'{collection_id}.xml')

    if os.path.exists(collection_file):
        existing_tree = etree.parse(collection_file)
    else:
        existing_tree = etree.ElementTree(
            make_simple_element('collection', attrib={'id': collection_id}))

    # Insert each volume
    for i, new_volume in enumerate(root_being_added.findall('volume')):
        new_volume_id = int(new_volume.attrib['id'])
        existing_volume = existing_tree.getroot().find(
            f"./volume[@id='{new_volume_id}']")
        if existing_volume is None:
            new_volume.attrib['ingest-date'] = args.ingest_date

            # Find the insertion point among the other volumes
            insertion_point = 0
            for i, volume in enumerate(existing_tree.getroot()):
                if new_volume_id < int(volume.attrib['id']):
                    break
                insertion_point = i + 1
Example #13
0
    xmlname = excelpath.split("/")[-1].split(".")[0] + "_New.xml"
    sf = open(savepath + xmlname, "w")
    sf.write(pretty_xml)
    sf.close()


if (__name__ == "__main__"):
    try:
        path = sys.argv[1]
        if len(sys.argv) == 3:
            xmlStr = open(path).read()
            xmlStr = xmlStr.replace("\t", " ", 1)
            a = xmlStr.find('xmlns="')
            b = xmlStr.find('"', a + len('xmlns="'))
            namespace = xmlStr[a:b + 1].replace("xmlns=", "").replace('"', '')
            xmlStr = xmlStr.replace(" " + xmlStr[a:b + 1], "")
            your_tree = etree.fromstring(xmlStr)
            tree = etree.ElementTree(your_tree)
            for e in your_tree.iter():
                if "vdu-depends-on" in str(tree.getpath(e)):
                    print tree.getpath(e)
        else:
            if path.split(".")[-1] == "xml":
                xmlToExcel(path)
            elif path.split(".")[-1] == "xlsx":
                excelToXml(path)
            else:
                print("ERROR: Unsupported file")
    except:
        print "\n\nUSAGE: python xl_xml.py /path/to/excel_or_xml_file\n\nMake sure the file is present in path specified\n\n\n"
Example #14
0
def create_role_test_data(orgs, branches):
    from lxml import etree
    import StringIO

    db = current.db
    s3db = current.s3db
    auth = current.auth
    s3mgr = current.manager
    request = current.request

    #----------------------------------------------------------------------
    # Initialize Data & Users
    auth.override = True
    s3db.load_all_models()

    test_dir = os.path.join(current.request.folder, "modules", "tests",
                            "roles", current.deployment_settings.base.template)

    org_file = open(os.path.join(test_dir, "org_organisation.xml"), "rb")
    org_template_string = org_file.read()
    data_file = open(os.path.join(test_dir, "data.xml"), "rb")
    data_template_string = data_file.read()
    org_resource = s3db.resource("org_organisation")
    org_branch_file = open(
        os.path.join(test_dir, "org_organisation_branch.xml"), "rb")
    org_branch_template_string = org_branch_file.read()
    org_branch_resource = s3db.resource("org_organisation_branch")

    user_file = open(os.path.join(test_dir, "users_template.csv"), "rb")
    user_template_string = user_file.read()

    # Ensure that the users are imported correctly
    s3db.configure("auth_user",
                   onaccept=lambda form: auth.s3_link_user(form.vars))
    s3db.add_component("auth_membership", auth_user="******")
    s3mgr.import_prep = auth.s3_membership_import_prep

    user_resource = s3db.resource("auth_user")
    hr_resource = s3db.resource("pr_person")

    user_file = StringIO.StringIO()
    user_stylesheet = os.path.join(current.request.folder, "static", "formats",
                                   "s3csv", "auth", "user.xsl")
    hr_stylesheet = os.path.join(current.request.folder, "static", "formats",
                                 "s3csv", "hrm", "person.xsl")

    for org in orgs:
        for branch in branches:

            # Get the "Other" Orgs
            copy_orgs = list(orgs)
            copy_orgs.remove(org)
            orgx1 = copy_orgs[0]
            orgx2 = copy_orgs[1]

            if branch:
                orgx = "%s-%s" % (org, branch)
            else:
                orgx = org
            #print orgx

            # Create Org & get id
            org_string = org_template_string % dict(org=orgx)
            xmltree = etree.ElementTree(etree.fromstring(org_string))
            success = org_resource.import_xml(xmltree)
            otable = s3db.org_organisation
            org_id = db(otable.name == orgx).select(otable.id).first().id
            auth.user = Storage(organisation_id=org_id)

            # Create Test Data for each Organisation
            data_string = data_template_string % dict(
                org=orgx,
                orgx1=orgx1,
                orgx2=orgx2,
            )
            xmltree = etree.ElementTree(etree.fromstring(data_string))
            success = org_resource.import_xml(xmltree)

            # Create Users for each Organisation
            user_string = user_template_string % dict(org=orgx,
                                                      org_lower=orgx.lower())
            user_file = StringIO.StringIO(user_string)
            success = user_resource.import_xml(user_file,
                                               format="csv",
                                               stylesheet=user_stylesheet)
            user_file = StringIO.StringIO(user_string)
            hr_resource.import_xml(user_file,
                                   format="csv",
                                   stylesheet=hr_stylesheet)

            if branch:
                # Link Branch to Org
                org_branch_string = org_branch_template_string % dict(
                    org=org, branch=branch)
                #print org_branch_string
                xmltree = etree.ElementTree(
                    etree.fromstring(org_branch_string))
                success = org_branch_resource.import_xml(xmltree)
                #print success

    # Import Test Users
    test_user_file = open(os.path.join(test_dir, "test_users.csv"), "rb")
    success = user_resource.import_xml(test_user_file,
                                       format="csv",
                                       stylesheet=user_stylesheet)
    test_user_file = open(os.path.join(test_dir, "test_users.csv"), "rb")
    hr_resource.import_xml(test_user_file,
                           format="csv",
                           stylesheet=hr_stylesheet)

    db.commit()
    auth.override = False
Example #15
0
        style.append(elem)

color = "black"
colorBackground = "#010101"
prefix = f"ColorPatch.{color.capitalize()}"
style = Element("style", {"name": prefix+"_Background"})
styles_root.append(style)
elem = Element("item", {"name": "android:colorBackground"})
elem.text = colorBackground
style.append(elem)
elem = Element("item", {"name": "colorSurface"})
elem.text = colorBackground
style.append(elem)

objectify.deannotate(styles_root, cleanup_namespaces=True)
etree.ElementTree(styles_root)\
    .write(path_dest_styles_xml, encoding='utf-8', pretty_print=True, xml_declaration=True)


objectify.deannotate(colors_root, cleanup_namespaces=True)
etree.ElementTree(colors_root)\
    .write(path_dest_colors_xml, encoding='utf-8', pretty_print=True, xml_declaration=True)


# kt

primaryStyles = []
secondaryStyles = []
backgroundStyles = []

for elem in styles_root.getchildren():
    name = elem.get("name")
def _write_annotation(annotation_path, annotation):
    root = etree.Element("annotation")
    _recursive_create_dict_to_xml(annotation, root)  # write to file:
    tree = etree.ElementTree(root)
    tree.write(annotation_path, pretty_print=True, encoding='utf-8')
Example #17
0
def render_board(
        cell_size=2.0,
        cell_border=.5,
        outer_border=.75,
        board_rows=9,
        board_columns=12,
        stroke=.05,
        corner_radius_ratio=.15,
        text_scale=.4,
        text_pad=.2,  #height of text relative to cell
        font="04b",
        separator_stroke=.1):
    board_height = board_rows * cell_size \
                    + (board_rows - 1) * cell_border \
                    + outer_border * 2
    board_width = board_columns * cell_size \
                    + (board_columns - 1) * cell_border \
                    + outer_border * 2

    corner_radius = cell_size * corner_radius_ratio

    document_height = board_height * 3 + 2 * outer_border

    style = "fill: none; stroke: black; stroke-width: {stroke}".format(
        **locals())

    def piece_outlines(shape_style=style):
        pieces = elem("g")
        for row in range(board_rows):
            for col in range(board_columns):
                pieces.append(
                    elem("rect",
                         x=col * cell_size + col * cell_border,
                         y=row * cell_size + row * cell_border,
                         height=cell_size,
                         width=cell_size,
                         rx=corner_radius,
                         style=shape_style))
        return pieces

    def piece_labels():
        labels = elem("g")
        text_height = text_scale * cell_size
        for row in range(board_rows):
            for col in range(board_columns):
                letter_label = elem(
                    "text",
                    x=col * cell_size + col * cell_border + text_pad,
                    y=row * cell_size + row * cell_border + text_height +
                    .5 * text_pad,
                    font_size=text_height,
                    font_family="{font}".format(font=font),
                    #text_anchor="left",
                    fill="red",
                    dominant_baseline="top")
                letter_label.text = str(chr(ord('A') + row))
                number_label = elem(
                    "text",
                    x=col * cell_size + col * cell_border + cell_size -
                    text_pad,
                    y=row * cell_size + row * cell_border + cell_size -
                    text_pad,
                    font_size=text_height,
                    font_family="{font}".format(font=font),
                    text_anchor="end",
                    fill="red",
                    #dominant_baseline="bottom"
                )

                separator = elem(
                    "line",
                    style="stroke: red; stroke-width: {separator_stroke}".
                    format(separator_stroke=separator_stroke),
                    x1=col * cell_size + col * cell_border + .75 * cell_size,
                    y1=row * cell_size + row * cell_border + .45 * cell_size,
                    x2=col * cell_size + col * cell_border + .25 * cell_size,
                    y2=row * cell_size + row * cell_border + .54 * cell_size)
                dot = elem(
                    "circle",
                    cx=col * cell_size + col * cell_border + .5 * cell_size,
                    cy=row * cell_size + row * cell_border + .5 * cell_size,
                    r=.05 * cell_size,
                    style="fill: red; stroke: none")
                number_label.text = str(col + 1)
                labels.append(letter_label)
                labels.append(number_label)
                #labels.append(separator)
                #labels.append(dot)
        return labels

    def board_outline():
        return elem("rect",
                    width=board_width,
                    height=board_height,
                    rx=corner_radius,
                    style=style)

    def root_node():
        return elem(
            "svg",
            height="{0}cm".format(board_height),
            width="{0}cm".format(board_width),
            viewBox="0 0 {board_width} {board_height}".format(**locals()),
            xmlns="http://www.w3.org/2000/svg",
            version="1.1",
            baseProfile="full")

    lower_board = et.Element("g")
    lower_board.append(board_outline())
    lower_labels = elem("g",
                        transform="translate({x} {y})".format(x=outer_border,
                                                              y=outer_border))
    lower_labels.append(piece_labels())
    #if you want engraved outlines of the piece shapes
    #lower_labels.append(piece_outlines("fill: none; stroke: red; stroke-width: {stroke}".format(**locals())))
    lower_board.append(lower_labels)

    upper_board = elem("g")
    upper_board.append(board_outline())
    upper_board_pieces = elem("g",
                              transform="translate({x} {y})".format(
                                  x=outer_border, y=outer_border))
    upper_board_pieces.append(piece_outlines())
    #Don't actually want these on the upper board cutout because
    #the holes are going to get discarded
    #upper_board_pieces.append(piece_labels())
    upper_board.append(upper_board_pieces)

    pieces = elem("g",
                  transform="translate({x} {y})".format(x=outer_border,
                                                        y=outer_border))
    pieces.append(piece_outlines())
    pieces.append(piece_labels())

    for node, filename in [(upper_board, 'upper.svg'),
                           (lower_board, 'lower.svg'), (pieces, 'pieces.svg')]:
        root = root_node()
        root.append(node)
        document = et.ElementTree(root)
        with open(filename, 'wb') as f:
            document.write(f, encoding='utf-8', xml_declaration=True)
Example #18
0
    def __init__(self):
        self.conf = conf.Conf()

        self.platform = platform.Platform()
        self.currentdir = os.path.dirname(os.path.abspath(__file__))
        self.currentLanguage = self.conf.get('GENERAL', 'lang')
        self.language = language.Language(self.currentdir, 'openplotter-avnav',
                                          self.currentLanguage)

        if os.path.dirname(os.path.abspath(__file__))[0:4] == '/usr':
            v = version
        else:
            v = version.version

        wx.Frame.__init__(self, None, title='Avnav' + ' ' + v, size=(800, 444))
        self.SetFont(
            wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL,
                    wx.FONTWEIGHT_NORMAL))
        icon = wx.Icon(self.currentdir + "/data/sailboat24r.png",
                       wx.BITMAP_TYPE_PNG)
        self.SetIcon(icon)
        self.CreateStatusBar()
        font_statusBar = self.GetStatusBar().GetFont()
        font_statusBar.SetWeight(wx.BOLD)
        self.GetStatusBar().SetFont(font_statusBar)

        self.toolbar1 = wx.ToolBar(self, style=wx.TB_TEXT)
        toolHelp = self.toolbar1.AddTool(
            101, _('Help'), wx.Bitmap(self.currentdir + "/data/help.png"))
        self.Bind(wx.EVT_TOOL, self.OnToolHelp, toolHelp)
        if not self.platform.isInstalled('openplotter-doc'):
            self.toolbar1.EnableTool(101, False)
        toolSettings = self.toolbar1.AddTool(
            102, _('Settings'),
            wx.Bitmap(self.currentdir + "/data/settings.png"))
        self.Bind(wx.EVT_TOOL, self.OnToolSettings, toolSettings)
        self.toolbar1.AddSeparator()
        toolAvnav = self.toolbar1.AddTool(
            110, 'Avnav', wx.Bitmap(self.currentdir + "/data/sailboat24r.png"))
        self.Bind(wx.EVT_TOOL, self.OnToolAvnav, toolAvnav)
        toolAvnavSplit = self.toolbar1.AddTool(
            111, 'Avnav split',
            wx.Bitmap(self.currentdir + "/data/sailboath24rs.png"))
        self.Bind(wx.EVT_TOOL, self.OnToolAvnavSplit, toolAvnavSplit)
        self.toolbar1.AddSeparator()
        toolApply = self.toolbar1.AddTool(
            105, _('Apply Changes'),
            wx.Bitmap(self.currentdir + "/data/apply.png"))
        self.Bind(wx.EVT_TOOL, self.OnToolApply, toolApply)
        toolCancel = self.toolbar1.AddTool(
            106, _('Cancel Changes'),
            wx.Bitmap(self.currentdir + "/data/cancel.png"))
        self.Bind(wx.EVT_TOOL, self.OnToolCancel, toolCancel)

        self.notebook = wx.Notebook(self)
        self.notebook.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.onTabChange)
        self.apps = wx.Panel(self.notebook)
        self.settings = wx.Panel(self.notebook)
        self.systemd = wx.Panel(self.notebook)
        #self.output = wx.Panel(self.notebook)
        self.notebook.AddPage(self.settings, _('Settings'))
        self.notebook.AddPage(self.systemd, _('Processes'))
        #self.notebook.AddPage(self.output, '')
        self.il = wx.ImageList(24, 24)
        img0 = self.il.Add(
            wx.Bitmap(self.currentdir + "/data/settings2.png",
                      wx.BITMAP_TYPE_PNG))
        img1 = self.il.Add(
            wx.Bitmap(self.currentdir + "/data/process.png",
                      wx.BITMAP_TYPE_PNG))
        #img2 = self.il.Add(wx.Bitmap(self.currentdir+"/data/output.png", wx.BITMAP_TYPE_PNG))
        self.notebook.AssignImageList(self.il)
        self.notebook.SetPageImage(0, img0)
        self.notebook.SetPageImage(1, img1)
        #self.notebook.SetPageImage(1, img2)

        vbox = wx.BoxSizer(wx.VERTICAL)
        vbox.Add(self.toolbar1, 0, wx.EXPAND)
        vbox.Add(self.notebook, 1, wx.EXPAND)
        self.SetSizer(vbox)

        self.appsDict = []

        app = {
            'name': 'AvnavUpdater',
            'included': True,
            'show': '',
            'service': ['avnavupdater'],
            'edit': True,
            'install': '',
            'uninstall': '',
        }
        self.appsDict.append(app)

        app = {
            'name': 'Avnav',
            'included': True,
            'show': '',
            'service': ['avnav'],
            'edit': True,
            'install': '',
            'uninstall': '',
        }
        self.appsDict.append(app)

        self.OCHARTSport = 8082
        self.AVNport = 8080
        self.updatePort = 8085
        self.xmlDocFile = self.conf.home + '/avnav/data/avnav_server.xml'
        self.xmlload = False
        if os.path.exists(self.xmlDocFile):
            self.xmlDoc = et.ElementTree(file=self.xmlDocFile)
            self.xmlload = True

            AVNHttpS = self.xmlDoc.find('.//AVNHttpServer')
            if AVNHttpS != None:
                if 'httpPort' in AVNHttpS.attrib:
                    try:
                        self.AVNport = int(AVNHttpS.attrib['httpPort'] or 8080)
                    except:
                        pass
            sys_ocharts = self.xmlDoc.find('.//system-ocharts')
            if sys_ocharts != None:
                if 'port' in sys_ocharts.attrib:
                    try:
                        self.OCHARTSport = int(sys_ocharts.attrib['port']
                                               or 8082)
                    except:
                        pass
            output = subprocess.check_output([
                'grep', '-F', 'Environment=PORT=',
                '/etc/systemd/system/avnavupdater.service.d/override.conf'
            ]).decode("utf-8").split('=')
            if len(output) == 3:
                try:
                    self.updatePort = int(output[2])
                except:
                    pass

        self.pageSettings()
        self.pageSystemd()
        #self.pageOutput()

        maxi = self.conf.get('GENERAL', 'maximize')
        if maxi == '1': self.Maximize()

        self.Centre()
Example #19
0
    codePoint = int(l[0], 16)
    if codePoint % 256 == 0:
        comment = ET.Comment("%X" % codePoint)
        characterSetNode.append(comment)

    character = ET.SubElement(characterSetNode,
                              "字符",
                              attrib={
                                  "名稱": chr(codePoint),
                                  "註記": "U+%X" % codePoint,
                              })

    operator = l[1]
    if operator == "XXXX":
        ET.SubElement(character, "組字")
    elif operator == "龜":
        ET.SubElement(character, "組字")
    else:
        assemble = ET.SubElement(character, "組字", attrib={
            "運算": operator,
        })
        for operand in l[2:]:
            operandNode = ET.SubElement(assemble,
                                        "字根",
                                        attrib={
                                            "置換": operand,
                                        })
xmlNode = ET.ElementTree(rootNode)
print(ET.tounicode(xmlNode, pretty_print=True))
Example #20
0
    def to_mmd(self):
        """
        Method for parsing content of NetCDF file, mapping discovery
        metadata to MMD, and writes MMD to disk.
        """

        # Why
        cf_mmd_lut = self.generate_cf_acdd_mmd_lut()
        # Some mandatory MMD does not have equivalent in ACDD
        # Make one to one mapping
        cf_mmd_lut.update(self.generate_cf_mmd_lut_missing_acdd())
        mmd_required_elements = self.required_mmd_elements()

        ncin = Dataset(self.netcdf_product)

        global_attributes = ncin.ncattrs()
        all_netcdf_variables = [var for var in ncin.variables]

        # Create XML file with namespaces
        ns_map = {
            'mmd': "http://www.met.no/schema/mmd",
            'gml': "http://www.opengis.net/gml"
        }
        root = ET.Element(ET.QName(ns_map['mmd'], 'mmd'), nsmap=ns_map)

        # Write MMD elements from global attributes in NetCDF
        for ga in global_attributes:

            # Check if global attribute is in the Look Up Table
            if ga in cf_mmd_lut.keys():
                # Check if global attribute has a MMD mapping
                if cf_mmd_lut[ga]:
                    all_elements = cf_mmd_lut[ga].split(',')
                    len_elements = len(all_elements)
                    parent_element = root
                    for i, e in enumerate(all_elements):

                        # Check if the element is an attribute to an element
                        if e.startswith('attrib_'):
                            continue

                        # Check if we have iterated to the end of the children
                        elif i == len_elements - 1:
                            value_list = [ncin.getncattr(ga)]
                            # Split some elements by comma into list
                            if ga in 'iso_topic_category':
                                value_list = ncin.getncattr(ga).split(',')
                            for value in value_list:
                                current_element = ET.SubElement(
                                    parent_element, ET.QName(ns_map['mmd'], e))
                                current_element.text = str(value)

                        # Checks to avoid duplication
                        else:

                            # Check if parent element already exist to avoid duplication
                            if root.findall(parent_element.tag):
                                parent_element = root.findall(
                                    parent_element.tag)[0]

                            # Check if current_element already exist to avoid duplication
                            current_element = None
                            for c in parent_element.getchildren():
                                if c.tag == ET.QName(ns_map['mmd'], e):
                                    current_element = c
                                    continue

                            if current_element is None:
                                current_element = ET.SubElement(
                                    parent_element, ET.QName(ns_map['mmd'], e))

                            parent_element = current_element

        # add MMD attribute values from CF and ACDD
        for ga in global_attributes:

            if ga in cf_mmd_lut.keys():
                if cf_mmd_lut[ga]:
                    all_elements = cf_mmd_lut[ga].split(',')
                    len_elements = len(all_elements)
                    parent_element = root

                    for i, e in enumerate(all_elements):
                        if e.startswith('attrib_'):
                            if ga == 'keywords_vocabulary':
                                attrib = e.split('_')[-1]
                                for keywords_element in root.findall(
                                        ET.QName(ns_map['mmd'], 'keywords')):
                                    keywords_element.attrib[
                                        attrib] = ncin.getncattr(ga)
                            elif ga == 'geospatial_bounds_crs':
                                attrib = e.split('_')[-1]
                                for keywords_element in root.findall(
                                        ET.QName(ns_map['mmd'], 'rectangle')):
                                    keywords_element.attrib[
                                        attrib] = ncin.getncattr(ga)
                            elif ga == 'title_lang':
                                attrib = e.split('_')[-1]
                                for title_element in root.findall(
                                        ET.QName(ns_map['mmd'], 'title')):
                                    title_element.attrib[
                                        '{http://www.w3.org/XML/1998/namespace}'
                                        + attrib] = ncin.getncattr(ga)
                            elif ga == 'summary_lang':
                                attrib = e.split('_')[-1]
                                for element in root.findall(
                                        ET.QName(ns_map['mmd'], 'abstract')):
                                    element.attrib[
                                        '{http://www.w3.org/XML/1998/namespace}'
                                        + attrib] = ncin.getncattr(ga)
                            else:
                                print(
                                    "Warning: don't know how to handle attrib: ",
                                    e)

        # Add empty/commented required  MMD elements that are not found in NetCDF file
        for k, v in mmd_required_elements.items():

            # check if required element is part of output MMD (ie. of NetCDF file)
            if not len(root.findall(ET.QName(ns_map['mmd'], k))) > 0:
                print('Did not find required element: {}.'.format(k))
                if not v:
                    root.append(ET.Comment('<mmd:{}></mmd:{}>'.format(k, k)))
                else:
                    root.append(
                        ET.Comment('<mmd:{}>{}</mmd:{}>'.format(k, v, k)))

        # Add OPeNDAP data_access if "netcdf_product" is OPeNDAP url
        if 'dodsC' in self.netcdf_product:
            da_element = ET.SubElement(root,
                                       ET.QName(ns_map['mmd'], 'data_access'))
            type_sub_element = ET.SubElement(da_element,
                                             ET.QName(ns_map['mmd'], 'type'))
            description_sub_element = ET.SubElement(
                da_element, ET.QName(ns_map['mmd'], 'description'))
            resource_sub_element = ET.SubElement(
                da_element, ET.QName(ns_map['mmd'], 'resource'))
            type_sub_element.text = "OPeNDAP"
            description_sub_element.text = "Open-source Project for a Network Data Access Protocol"
            resource_sub_element.text = self.netcdf_product

            _desc = [
                'Open-source Project for a Network Data Access Protocol.',
                'OGC Web Mapping Service, URI to GetCapabilities Document.'
            ]
            _res = [
                self.netcdf_product.replace('dodsC', 'fileServer'),
                self.netcdf_product.replace('dodsC', 'wms')
            ]
            access_list = []
            _desc = []
            _res = []
            add_wms_data_access = True
            if add_wms_data_access:
                access_list.append('OGC WMS')
                _desc.append(
                    'OGC Web Mapping Service, URI to GetCapabilities Document.'
                )
                _res.append(self.netcdf_product.replace('dodsC', 'wms'))
            add_http_data_access = True
            if add_http_data_access:
                access_list.append('HTTP')
                _desc.append(
                    'Open-source Project for a Network Data Access Protocol.')
                _res.append(self.netcdf_product.replace('dodsC', 'fileServer'))
            for prot_type, desc, res in zip(access_list, _desc, _res):
                dacc = ET.SubElement(root,
                                     ET.QName(ns_map['mmd'], 'data_access'))
                dacc_type = ET.SubElement(dacc,
                                          ET.QName(ns_map['mmd'], 'type'))
                dacc_type.text = prot_type
                dacc_desc = ET.SubElement(
                    dacc, ET.QName(ns_map['mmd'], 'description'))
                dacc_desc.text = str(desc)
                dacc_res = ET.SubElement(dacc,
                                         ET.QName(ns_map['mmd'], 'resource'))
                if 'OGC WMS' in prot_type:
                    wms_layers = ET.SubElement(
                        dacc, ET.QName(ns_map['mmd'], 'wms_layers'))
                    # Don't add variables containing these names to the wms layers
                    skip_layers = ['latitude', 'longitude', 'angle']
                    for w_layer in all_netcdf_variables:
                        if any(skip_layer in w_layer
                               for skip_layer in skip_layers):
                            continue
                        wms_layer = ET.SubElement(
                            wms_layers, ET.QName(ns_map['mmd'], 'wms_layer'))
                        wms_layer.text = w_layer
                    # Need to add get capabilities to the wms resource
                    res += '?service=WMS&version=1.3.0&request=GetCapabilities'
                dacc_res.text = res

        # Add OGC WMS data_access as comment
        root.append(
            ET.Comment(
                str('<mmd:data_access>\n\t<mmd:type>OGC WMS</mmd:type>\n\t<mmd:description>OGC Web '
                    'Mapping Service, URI to GetCapabilities Document.</mmd:description>\n\t'
                    '<mmd:resource></mmd:resource>\n\t<mmd:wms_layers>\n\t\t<mmd:wms_layer>'
                    '</mmd:wms_layer>\n\t</mmd:wms_layers>\n</mmd:data_access>'
                    )))

        # print(ET.tostring(root,pretty_print=True).decode("utf-8"))

        if not self.output_name.endswith('.xml'):
            output_file = str(self.output_path + self.output_name) + '.xml'
        else:
            output_file = str(self.output_path + self.output_name)

        et = ET.ElementTree(root)
        et = ET.ElementTree(
            ET.fromstring(
                ET.tostring(root, pretty_print=True).decode("utf-8")))
        et.write(output_file, pretty_print=True)
Example #21
0
                            tableElem.attrib["have_exclude_column"] = "1"
                        else:
                            subElem.attrib["is_exclude"] = "0"
                        subElem.attrib["disableflag"] = "0"

        except BaseException, ex:
            exc_info = cF.getExceptionInfo()
            err = u"数据库执行%s后出现异常,%s" % (sql, exc_info)
            Logging.getLog().critical(err)
            crs.close()
            conn.rollback()
            conn.close()
            return -1, err
        crs.close()
        conn.close()
    doc = etree.ElementTree(root)
    doc.write("JZJY_column_report_scan.xml", pretty_print=True, xml_declaration=True, encoding='utf-8')
    Logging.getLog().info(u"扫描集中交易数据库完成")
    return 0, ""


def CheckJZJYColumnSetting():
    Logging.getLog().info(u"更新集中交易数据库配置文件开始")

    # 读取文件
    doc = etree.ElementTree(file='JZJY_column_report.xml')
    root = doc.getroot()
    # 备份接口参数文件
    doc = etree.ElementTree(root)  # root 为根元素
    doc.write("JZJY_column_report.xml%s.xml" % time.strftime('%Y%m%d%H%M%S'), pretty_print=True, xml_declaration=True,
              encoding='utf-8')
Example #22
0
def apply_xml(df, file_path, obj_df=None, parameters=None):
    ret = get_python_obj(df, obj_df=obj_df, parameters=parameters)
    from lxml import etree

    prefix = "ocel:"

    root = etree.Element("log")
    global_event = etree.SubElement(root, "global")
    global_event.set("scope", "event")
    for k, v in ret[prefix + "global-event"].items():
        child = etree.SubElement(global_event, "string")
        child.set("key", k.split(prefix)[-1])
        child.set("value", v)
    global_object = etree.SubElement(root, "global")
    global_object.set("scope", "event")
    for k, v in ret[prefix + "global-object"].items():
        child = etree.SubElement(global_object, "string")
        child.set("key", k.split(prefix)[-1])
        child.set("value", v)
    global_log = etree.SubElement(root, "global")
    global_log.set("scope", "log")
    attribute_names = etree.SubElement(global_log, "list")
    attribute_names.set("key", "attribute-names")
    object_types = etree.SubElement(global_log, "list")
    object_types.set("key", "object-types")
    for k in ret[prefix + "global-log"][prefix + "attribute-names"]:
        subel = etree.SubElement(attribute_names, "string")
        subel.set("key", "attribute-name")
        subel.set("value", k)
    for k in ret[prefix + "global-log"][prefix + "object-types"]:
        subel = etree.SubElement(object_types, "string")
        subel.set("key", "object-type")
        subel.set("value", k)
    version = etree.SubElement(global_log, "string")
    version.set("key", "version")
    version.set("value", ret[prefix + "global-log"][prefix + "version"])
    ordering = etree.SubElement(global_log, "string")
    ordering.set("key", "ordering")
    ordering.set("value", ret[prefix + "global-log"][prefix + "ordering"])
    events = etree.SubElement(root, "events")
    for k, v in ret[prefix + "events"].items():
        event = etree.SubElement(events, "event")
        event_id = etree.SubElement(event, "string")
        event_id.set("key", "id")
        event_id.set("value", str(k))
        event_activity = etree.SubElement(event, "string")
        event_activity.set("key", "activity")
        event_activity.set("value", v[prefix + "activity"])
        event_timestamp = etree.SubElement(event, "date")
        event_timestamp.set("key", "timestamp")
        event_timestamp.set("value", v[prefix + "timestamp"].isoformat())
        event_omap = etree.SubElement(event, "list")
        event_omap.set("key", "omap")
        for k2 in v[prefix + "omap"]:
            obj = etree.SubElement(event_omap, "string")
            obj.set("key", "object-id")
            obj.set("value", k2)
        event_vmap = etree.SubElement(event, "list")
        event_vmap.set("key", "vmap")
        for k2, v2 in v[prefix + "vmap"].items():
            attr = etree.SubElement(event_vmap,
                                    get_type(df["event_" + k2].dtype))
            attr.set("key", k2)
            attr.set("value", str(v2))
    objects = etree.SubElement(root, "objects")
    for k, v in ret[prefix + "objects"].items():
        object = etree.SubElement(objects, "object")
        object_id = etree.SubElement(object, "string")
        object_id.set("key", "id")
        object_id.set("value", str(k))
        object_type = etree.SubElement(object, "string")
        object_type.set("key", "type")
        object_type.set("value", v[prefix + "type"])
        object_ovmap = etree.SubElement(object, "list")
        object_ovmap.set("key", "ovmap")
        for k2, v2 in v[prefix + "ovmap"].items():
            if str(v2).lower() != "nan" and str(v2).lower() != "nat":
                object_att = etree.SubElement(
                    object_ovmap, get_type(obj_df["object_" + k2].dtype))
                object_att.set("key", k2)
                object_att.set("value", str(v2))

    tree = etree.ElementTree(root)
    tree.write(file_path,
               pretty_print=True,
               xml_declaration=True,
               encoding="utf-8")
for file_comp4 in f_list:
    #file_comp4='budweiser22234.jpg'
    #if True:
    if os.path.splitext(file_comp4)[1] in ['.jpg', '.JPG', '.jpeg', '.JPEG']:
        basename = os.path.splitext(file_comp4)[0]
        bfind = False
        bmakehead = False
        for line in lines:
            line = line.strip('\r\n')
            line = line.strip('\n')
            line = line.strip(' ')
            name, path, xmin, ymin, objwidth, objheight, angle, sku, skuname = line.split(
                " ", 8)
            if bfind == True:
                if file_comp4 != name:
                    tree = etree.ElementTree(root)
                    savexml = savepath + os.path.splitext(
                        file_comp4)[0] + ".xml"
                    tree.write(savexml, encoding='utf-8', pretty_print=True)
                    #cv2.imwrite(testpath+"test.jpg",img)
                    #cv2.imshow("Canvas", img)
                    break
            if name == file_comp4:
                bfind = True
                if bmakehead == False:
                    bmakehead = True
                    jpgfile = basedir + name
                    img = cv2.imread(jpgfile)
                    print img.shape
                    size = img.shape
                    height = size[0]
def main():
    '''Program entry point'''
    knight_info = get_knight_info()
    knight_root = build_tree(knight_info)
    knight_doc = ET.ElementTree(knight_root)
    write_doc(knight_doc)
def run(image_path, gt_file, ann_path):

    classNameList = ["go","stop"]

    files = []
    with open(gt_file) as csvfile:
        readCSV = csv.reader(csvfile, delimiter=' ')
        for row in readCSV:
            cols = []
            for col in row:
                cols.append( int(col) )
            files.append(cols)

    #obj = imageRegionOfInterest(image_path)
    valid_images = [".jpg",".gif",".png",".tga",".jpeg"]

    classes_qtd = []
    images_total_qtd = 0
    images_without_classes_qtd = 0

    xml_list = []
    for filename in os.listdir(image_path):
        name, ext = os.path.splitext(filename)
        if ext.lower() not in valid_images:
            continue
        
        images_total_qtd = images_total_qtd + 1

        #load image
        image = cv2.imread(os.path.join(image_path,filename))    
        annotation = root(image_path, filename, int(image.shape[1]), int(image.shape[0]))

        #found GT
        numberFile = int(name)
        points = []
        for index, item in enumerate(files):
            if item[0] == numberFile:
                points.append([item[2],item[1],item[4],item[3],0 if item[5]==2 else 1])

        if len(points)>0:
            for point in points:
                annotation.append(instance_to_xml(point, classNameList))
                iclass = int(point[4]) 
                while len(classes_qtd) < iclass+1:
                    classes_qtd.append(0)

                classes_qtd[iclass] = classes_qtd[iclass] + 1
        else:
            images_without_classes_qtd = images_without_classes_qtd + 1

        #create xml
        xmlFileName = os.path.join(ann_path,name+".xml")
        print(xmlFileName)
        etree.ElementTree(annotation).write(xmlFileName)

    print('Successfully converted to xml PASCAL.')

    print('Total Images: ', images_total_qtd)
    print('Images without classes: ', images_without_classes_qtd)
    print('Classes: ')
    for q in classes_qtd:
        print( q)



    return 
Example #26
0
def modify(changes, type):
    """
        This function will take a set of changes to be applied to a document,
        apply the changes, and return a set of rollback instructions to be stored
        in the migration database entry.

        Right now, the following formats are assumed with this function:

        Volumes:
        [
          {
            "barcode": "32044123321123",
            "changes": [{
              "xpath": "(if non_casebody) //mets/blah/blah[@BLAH=666]/p[2]",
              "actions": {
                "remove": false,
                "create": false,
                "content": "love them new words",
                "name": "judge",
                "attributes": {
                  "delete": ["name", "name", "name"],
                  "add_update": {
                    "id": "some identifier",
                    "alt_id": "some other identifier"
                  }
                }
              }
            }]
          }
        ]


        Cases:
        [{
          "barcode": "32044123321123",
          "case_id": "32044123321123_002",
          "changes": [{
            "type": "non_casebody/casebody",
            "element_id": "(if casebody) b-123a",
            "xpath": "(if non_casebody) //mets/blah/blah[@BLAH=666]/p[2]",
            "actions": {
              "remove": false,
              "create": false,
              "content": "love them new words",
              "name": "judge",
              "attributes": {
                "delete": ["name", "name", "name"],
                "add_update": {
                  "id": "some identifier",
                  "alt_id": "some other identifier"
                }
              }
            }
          }]
        }]


        Altos:
        [{
          "barcode": "32044123321123",
          "alto_id": "32044123321123_002",
          "changes": [{
              "type": "tag/layout/other",
              "element_id": "(if tag or layout) b-123a",
              "xpath": "(if other) //mets/blah/blah[@BLAH=666]/p[2]",
              "actions": {
                "remove": false,
                "create": false,
                "content": "love them new words",
                "name": "judge",
                "attributes": {
                  "delete": ["name", "name", "name"],
                  "add_update": {
                    "id": "some identifier",
                    "alt_id": "some other identifier"
                  }
                }
              }
            }
          ]
        }]
    """
    if type == 'volume':
        doc = VolumeXML.objects.get(barcode=changes['barcode'])
    elif type == 'case':
        doc = CaseXML.objects.get(barcode=changes['case_id'])
    elif type == 'alto':
        doc = PageXML.objects.get(barcode=changes['alto_id'])

    root = etree.fromstring(doc.orig_xml)
    tree = etree.ElementTree(root)

    element_list = defaultdict()
    for index, change in enumerate(changes['changes']):
        if 'element_id' in change:
            elements = tree.findall(".//*[@id='{}']".format(
                change['element_id']),
                                    namespaces=nsmap)
            if len(elements) != 1:
                elements = tree.findall(".//*[@ID='{}']".format(
                    change['element_id']),
                                        namespaces=nsmap)
        elif 'xpath' in change:
            elements = tree.xpath(change['xpath'], namespaces=nsmap)

        if len(elements) != 1:
            # TODO: FIXME
            # print("Provided xpath must select ONE element. {} elements selected with {} in {}".format(len(elements), xpath, case_changes['case_id']))
            return False

        element_list[index] = elements[0]

    rollback_list = defaultdict()
    #This loops through the changes and makes an array of elements to modify
    for index, change in enumerate(changes['changes']):
        element = element_list[index]

        rollback_list_entry = {}

        rollback_list_entry['complete_element'] = xmltodict.parse(
            etree.tostring(element))
        rollback_list_entry['actions'] = {}
        if 'type' in change:
            rollback_list_entry['type'] = change['type']

        if 'remove' in change['actions']:
            if change['actions']['remove'] is True:
                rollback_list_entry['actions']['create'] = True
                rollback_list_entry['parent_path'] = element.getparent()
                rollback_list_entry['parent_index'] = element.getparent(
                ).index(element)
                rollback_list.append(rollback_list_entry)
                element.getparent().remove(element)
                continue

        if 'name' in change['actions']:
            tag_breakdown = re.match('({[^}]+})(.*)', element.tag)
            namespace = tag_breakdown.group(1)
            name = tag_breakdown.group(2)
            if namespace is not None:
                element.tag = "{}{}".format(namespace,
                                            change['actions']['name'])
            else:
                element.tag = change['actions']['name']
            rollback_list_entry['actions']['name'] = name

        if 'content' in change['actions']:
            rollback_list_entry['actions']['content'] = element.text
            element.text = change['actions']['content']

        if 'attributes' in change['actions']:
            rollback_list_entry['actions']['attributes'] = {}
            if 'remove' in change['actions']['attributes']:
                rollback_list_entry['actions']['attributes']['add_update'] = {}
                for attribute in change['actions']['attributes']['remove']:
                    #yes, I do want the whole thing to die if this fails
                    rollback_list_entry['actions']['attributes']['add_update'][
                        attribute] = element.attrib[attribute]
                    del element.attrib[attribute]
            if 'add_update' in change['actions']['attributes']:
                for attribute in change['actions']['attributes']['add_update']:

                    if attribute in element.attrib:
                        if 'add_update' not in rollback_list_entry['actions'][
                                'attributes']:
                            rollback_list_entry['actions']['attributes'][
                                'add_update'] = {}
                        rollback_list_entry['actions']['attributes'][
                            'add_update'][attribute] = element.attrib[
                                attribute]
                    else:
                        if 'remove' not in rollback_list_entry['actions'][
                                'attributes']:
                            rollback_list_entry['actions']['attributes'][
                                'remove'] = []
                        rollback_list_entry['actions']['attributes'][
                            'remove'].append(attribute)

                    element.attrib[attribute] = change['actions'][
                        'attributes']['add_update'][attribute]

        rollback_list[index] = rollback_list_entry

    for index, change in enumerate(changes['changes']):
        element = element_list[index]
        rollback = rollback_list[index]

        if 'element_id' in change:
            for o_tag in rollback['complete_element']:
                if '@ID' in rollback['complete_element'][o_tag]:
                    rollback['element_id'] = element.attrib['ID']
                if '@id' in rollback['complete_element'][o_tag]:
                    rollback['element_id'] = element.attrib['id']
        elif 'xpath' in change:
            rollback['xpath'] = normalize_namespace(
                tree.getelementpath(element))

        rollback['numeric_xpath'] = tree.getpath(element)

        if 'remove' not in change['actions']:
            del rollback['complete_element']

    doc.orig_xml = etree.tostring(root, pretty_print=True).decode("utf-8")
    doc.save()

    return rollback_list
Example #27
0
    def _create_xml_file(self, number_of_samples, temp_dir=''):
        """
        This function creates an xml file containing the header for the wfmx-file format using
        etree.
        """
        root = ET.Element('DataFile', offset='xxxxxxxxx', version="0.1")
        DataSetsCollection = ET.SubElement(root, 'DataSetsCollection',
                                           xmlns="http://www.tektronix.com")
        DataSets = ET.SubElement(DataSetsCollection, 'DataSets', version="1",
                                 xmlns="http://www.tektronix.com")
        DataDescription = ET.SubElement(DataSets, 'DataDescription')
        NumberSamples = ET.SubElement(DataDescription, 'NumberSamples')
        NumberSamples.text = str(int(number_of_samples))
        SamplesType = ET.SubElement(DataDescription, 'SamplesType')
        SamplesType.text = 'AWGWaveformSample'
        MarkersIncluded = ET.SubElement(DataDescription, 'MarkersIncluded')
        MarkersIncluded.text = 'true'
        NumberFormat = ET.SubElement(DataDescription, 'NumberFormat')
        NumberFormat.text = 'Single'
        Endian = ET.SubElement(DataDescription, 'Endian')
        Endian.text = 'Little'
        Timestamp = ET.SubElement(DataDescription, 'Timestamp')
        Timestamp.text = '2014-10-28T12:59:52.9004865-07:00'
        ProductSpecific = ET.SubElement(DataSets, 'ProductSpecific', name="")
        ReccSamplingRate = ET.SubElement(ProductSpecific, 'ReccSamplingRate', units="Hz")
        ReccSamplingRate.text = str(self.sample_rate)
        ReccAmplitude = ET.SubElement(ProductSpecific, 'ReccAmplitude', units="Volts")
        ReccAmplitude.text = str(0.5)
        ReccOffset = ET.SubElement(ProductSpecific, 'ReccOffset', units="Volts")
        ReccOffset.text = str(0)
        SerialNumber = ET.SubElement(ProductSpecific, 'SerialNumber')
        SoftwareVersion = ET.SubElement(ProductSpecific, 'SoftwareVersion')
        SoftwareVersion.text = '4.0.0075'
        UserNotes = ET.SubElement(ProductSpecific, 'UserNotes')
        OriginalBitDepth = ET.SubElement(ProductSpecific, 'OriginalBitDepth')
        OriginalBitDepth.text = 'EightBit'
        Thumbnail = ET.SubElement(ProductSpecific, 'Thumbnail')
        CreatorProperties = ET.SubElement(ProductSpecific, 'CreatorProperties',
                                          name='Basic Waveform')
        Setup = ET.SubElement(root, 'Setup')

        filepath = os.path.join(temp_dir, 'header.xml')

        ##### This command creates the first version of the file
        tree = ET.ElementTree(root)
        tree.write(filepath, pretty_print=True, xml_declaration=True)

        # Calculates the length of the header:
        # 40 is subtracted since the first line of the above created file has a length of 39 and is
        # not included later and the last endline (\n) is also not neccessary.
        # The for loop is needed to give a nine digit length: xxxxxxxxx
        length_of_header = ''
        size = str(os.path.getsize(filepath) - 40)

        for ii in range(9 - len(size)):
            length_of_header += '0'
        length_of_header += size

        # The header length is written into the file
        # The first line is not included since it is redundant
        # Also the last endline (\n) is excluded
        text = open(filepath, "U").read()
        text = text.replace("xxxxxxxxx", length_of_header)
        text = bytes(text, 'UTF-8')
        f = open(filepath, "wb")
        f.write(text[39:-1])
        f.close()
"""

s3 = """
    <semantics>
        <cerror>
        <qvar name='a'></qvar>
        <leq/>
        <qvar name='b'></qvar>
        </cerror>
    </semantics>
"""


address = "/home/narya/Dropbox/NTCIR11-Math2-queries-participants.xml" 
doc = etree.parse(address)
formulae = doc.xpath(".//*[local-name() = 'formula']")
for f in formulae:
    idx = f.getparent().getparent()[0].text
    print idx
    #if "10" not in idx: continue
    for ann in f.xpath(".//*[local-name() = 'annotation']") + f.xpath(".//*[local-name() = 'annotation-xml']"):
        ann_p = ann.getparent()
        ann_p.remove(ann)
    for sem in f.xpath(".//*[local-name() = 'semantics']"):
        m = MathML2String()
        print m.convert(etree.ElementTree(sem))
    print

#d1 = etree.fromstring(s3.encode("utf-8"))
#print m.convert(etree.ElementTree(d1))
def main(argv=None):
    """
    Attempt to represent the differences in data records flagged with
    'noupdate' between to different versions of the same OpenERP module.

    Print out a complete XML data file that can be loaded in a post-migration
    script using openupgrade::load_xml().

    Known issues:
    - Does not detect if a deleted value belongs to a field
      which has been removed.
    - Ignores forcecreate=False. This hardly occurs, but you should
      check manually for new data records with this tag. Note that
      'True' is the default value for data elements without this tag.
    - Does not take csv data into account (obviously)
    - Is not able to check cross module data
    - etree's pretty_print is not *that* pretty
    - Does not take translations into account (e.g. in the case of
      email templates)
    - Does not handle the shorthand records <menu>, <act_window> etc.,
      although that could be done using the same expansion logic as
      is used in their parsers in openerp/tools/convert.py
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('olddir', metavar='older_module_directory')
    parser.add_argument('newdir', metavar='newer_module_directory')
    arguments = parser.parse_args(argv)

    old_update, old_noupdate = get_records(arguments.olddir)
    new_update, new_noupdate = get_records(arguments.newdir)

    data = etree.Element("data")

    for xml_id, record_new in new_noupdate.items():
        record_old = None
        if xml_id in old_update:
            record_old = old_update[xml_id]
        elif xml_id in old_noupdate:
            record_old = old_noupdate[xml_id]

        if record_old is None:
            continue

        element = etree.Element("record",
                                id=xml_id,
                                model=record_new.attrib['model'])
        record_old_dict = get_node_dict(record_old)
        record_new_dict = get_node_dict(record_new)
        for key in record_old_dict.keys():
            if not record_new.xpath(key):
                # The element is no longer present.
                # Overwrite an existing value with an
                # empty one. Of course, we do not know
                # if this field has actually been removed
                attribs = deepcopy(record_old_dict[key]).attrib
                for attr in ['eval', 'ref']:
                    if attr in attribs:
                        del attribs[attr]
                element.append(etree.Element(record_old_dict[key].tag,
                                             attribs))
            else:
                oldrepr = get_node_value(record_old_dict[key])
                newrepr = get_node_value(record_new_dict[key])

                if oldrepr != newrepr:
                    element.append(deepcopy(record_new_dict[key]))

        for key in record_new_dict.keys():
            if not record_old.xpath(key):
                element.append(deepcopy(record_new_dict[key]))

        if len(element):
            data.append(element)

    openerp = etree.Element("openerp")
    openerp.append(data)
    document = etree.ElementTree(openerp)

    print etree.tostring(document,
                         pretty_print=True,
                         xml_declaration=True,
                         encoding='utf-8')
Example #30
0
def create_kmz(container, datadir, oceanfile, logger):
    # we're going to combine all these layers into one KMZ file.
    kmz_contents = []

    # create the kml text
    root = etree.Element("kml")
    nlink = etree.SubElement(root, "NetworkLinkControl")
    nperiod = etree.SubElement(nlink, "minRefreshPeriod")
    nperiod.text = '300'
    document = etree.SubElement(root, 'Document')
    name = etree.SubElement(document, 'name')
    info = container.getMetadata()
    eid = info['input']['event_information']['event_id']
    mag = info['input']['event_information']['magnitude']
    timestr = info['input']['event_information']['origin_time']
    namestr = 'ShakeMap %s M%s %s' % (eid, mag, timestr)
    name.text = namestr
    set_look(document, container)

    # create intensity overlay
    logger.debug('Creating intensity overlay...')
    overlay_image = create_overlay(container, oceanfile, datadir, document)
    kmz_contents += [overlay_image]
    logger.debug('Created intensity overlay image %s' % overlay_image)

    # create station kml
    logger.debug('Creating station KML...')
    triangle_file, circle_file = create_stations(container, datadir, document)
    kmz_contents += [triangle_file, circle_file]
    logger.debug('Created station KML')

    # create contour kml
    logger.debug('Creating contour KML...')
    create_contours(container, document)
    logger.debug('Created contour KML')

    # create epicenter KML
    logger.debug('Creating epicenter KML...')
    create_epicenter(container, document)
    logger.debug('Created epicenter KML')

    # place ShakeMap legend on the screen
    legend_file = place_legend(datadir, document)
    kmz_contents.append(legend_file)

    # Write the uber-kml file
    tree = etree.ElementTree(root)
    kmlfile = os.path.join(datadir, KML_FILE)
    tree.write(kmlfile, encoding='utf-8', xml_declaration=True)
    kmz_contents.append(kmlfile)

    # assemble all the pieces into a KMZ file, and delete source files
    # as we go
    kmzfile = os.path.join(datadir, KMZ_FILE)
    kmzip = zipfile.ZipFile(kmzfile,
                            mode='w',
                            compression=zipfile.ZIP_DEFLATED)
    for kfile in kmz_contents:
        _, arcname = os.path.split(kfile)
        kmzip.write(kfile, arcname=arcname)
        os.remove(kfile)
    kmzip.close()

    logger.debug('Wrote KMZ container file %s' % kmzfile)
    return kmzfile