Exemple #1
0
    def save_fig(self, imout_filename=None):
        """Save .png image and append filename to imout_files"""

        # Filename strings
        frame_str = 'F' + ("%d" %
                           (self.current_source.params.frame_number + 1))
        units_per_intensity_str = 'G' + str(
            self.current_source.params.units_per_intensity).replace('.', 'p')
        offset_str = 'B' + str(self.current_source.params.offset).replace(
            '.', 'p').replace('-', 'n')

        # Save full snapshot
        if imout_filename is not None:
            imout_href = dc_value.hrefvalue(
                quote("%s%s%s%s.png" %
                      (posixpath.split(self.current_source.params.input_href.
                                       get_bare_unquoted_filename())[0],
                       frame_str, units_per_intensity_str, offset_str)),
                contexthref=self.current_source.params.input_href)
            imout_filename = imout_href.getpath()
            pass
        print("Saving snapshot {s}".format(s=imout_filename))

        self.figure.savefig(imout_filename, dpi=600)
        self.imout_files.append(imout_filename)

        # Save cropped snapshot
        imout_filename_cropped = imout_filename[:-4] + 'cropped.png'
        print("Saving cropped snapshot {s}".format(s=imout_filename_cropped))

        cropped_extent = self.axes.get_window_extent().transformed(
            self.figure.dpi_scale_trans.inverted())
        self.figure.savefig(imout_filename_cropped, bbox_inches=cropped_extent)
        self.imout_files.append(imout_filename_cropped)
    def hrefxlink(self, context, sourcelink):
        # return xlink:href attribute value for sourcelink
        # in given context

        contextxmldoc = self.finddocument(context.context_node)

        if isinstance(sourcelink, list):
            if len(sourcelink) < 1:
                #raise ValueError("hrefxlink: Empty source node set, context=%s" % (dc_value.hrefvalue.fromelement(contextxmldoc,context.context_node).humanurl()))
                return []
            elif len(sourcelink) > 1:
                raise ValueError(
                    "hrefxlink: Multiple source nodes %s, context=%s" %
                    (",".join([
                        dc_value.hrefvalue.fromelement(
                            self.finddocument(cellc), cellc).humanurl()
                        for cellc in sourcelink
                    ]),
                     dc_value.hrefvalue.fromelement(
                         contextxmldoc, context.context_node).humanurl()))
            sourcelink = sourcelink[0]
            pass

        # create dummy xmldoc in given context
        dummydoc = xmldoc.xmldoc.newdoc(
            "dummy",
            nsmap={"xlink": "http://www.w3.org/1999/xlink"},
            contexthref=contextxmldoc.getcontexthref())

        hrefval = dc_value.hrefvalue(
            sourcelink.attrib["{http://www.w3.org/1999/xlink}href"],
            contexthref=self.finddocument(sourcelink).getcontexthref())
        hrefval.xmlrepr(dummydoc, dummydoc.getroot())
        xlinktext = dummydoc.getattr(dummydoc.getroot(), "xlink:href")
        return xlinktext
def openxmldoc(filename, nsmap=None, use_databrowse=False):
    filenamehref = hrefvalue(filename)
    doc = xmldoc.xmldoc(filenamehref,
                        maintagname=None,
                        nsmap=nsmap,
                        readonly=True,
                        use_databrowse=use_databrowse)
    return doc
def cleanup_dest(input_files, desthref_set, href_set):
    canonpath_set = set([
        canonicalize_path.canonicalize_path(href.getpath())
        for href in href_set if href.isfile()
    ])

    if len(desthref_set) == 0:
        print(
            "cleanup_dest(): Did not find any .xlg or .xlp files containing dc:summary/dc:dest tags"
        )
        pass
    #print("cleanup_dest(): desthref_set = %s" % ([str(desthref) for desthref in desthref_set]))

    for desthref in desthref_set:

        excess_files = []

        destpath = desthref.getpath()
        assert (destpath.endswith('/') or destpath.endswith(os.path.sep))

        for filename in os.listdir(destpath):
            if os.path.isdir(os.path.join(destpath, filename)):
                continue  # We don't currently recurse into subdirectories of dest
            filehref = dc_value.hrefvalue(quote(filename),
                                          contexthref=desthref)
            if filehref not in href_set:
                # This is an excess file
                # Check canonpath
                assert (canonicalize_path.canonicalize_path(filehref.getpath())
                        not in canonpath_set)
                assert (os.path.exists(filehref.getpath()))
                excess_files.append(filehref.getpath())
                pass
            pass
        print("Excess files in %s:" % (destpath))
        print("---------------------------------------------------------")
        for excess_file in excess_files:
            print(excess_file)
            pass

        if len(excess_files) > 0:
            shoulddelete = raw_input(
                "Answer \"YES\" to delete these files --> ")
            if shoulddelete.strip() == "YES":
                print("Deleting files...")
                for excess_file in excess_files:
                    os.remove(excess_file)
                    pass

                pass
            else:
                print("NOT deleting files")
                time.sleep(2)
                pass
            pass
        pass
    pass
Exemple #5
0
def get_unprocessed(input_file_hrefs, cdup, ignore_locking):
    repository_root = dc_value.hrefvalue(pathname2url(cdup) + '/',
                                         contexthref=".")

    input_files = processtrak_cleanup.infiledicts.fromhreflist(
        input_file_hrefs,
        repository_root=repository_root,
        ignore_locking=ignore_locking)

    (completed_set, desthref_set,
     href_set) = processtrak_cleanup.traverse(input_files,
                                              recursive=True,
                                              need_href_set=True,
                                              include_processed=False,
                                              repository_root=repository_root,
                                              ignore_locking=ignore_locking)

    allhrefs_no_dest = [
        href for href in (completed_set | href_set) - desthref_set
        if not href.isabs()
    ]
    allhrefs_rootrel_no_dest = [
        href.attempt_relative_href(pathname2url(cdup) + '/')
        for href in allhrefs_no_dest
    ]
    allurls_rootrel_no_dest = [
        href.attempt_relative_url(pathname2url(cdup) + '/')
        for href in allhrefs_rootrel_no_dest
    ]

    all_inrepository_hrefs_rootrel_no_dest = [
        allhrefs_rootrel_no_dest[urlnum]
        for urlnum in range(len(allurls_rootrel_no_dest))
        if not allurls_rootrel_no_dest[urlnum].startswith('../')
    ]

    # !!!*** getpath() doesn't have the ability to do relative... this is broken***
    allpaths_no_dest = [
        href.getpath() for href in all_inrepository_hrefs_rootrel_no_dest
    ]  # Paths relative to repository root
    xlppaths = [
        path for path in allpaths_no_dest
        if os.path.splitext(path)[1].lower() == ".xlp"
    ]
    unprocessedpaths = [
        path for path in allpaths_no_dest
        if os.path.splitext(path)[1].lower() != ".xlp"
    ]

    unprocessedexistingpaths = [
        path for path in unprocessedpaths if os.path.exists(path)
    ]

    xlpexistingpaths = [path for path in xlppaths if os.path.exists(path)]

    return (unprocessedexistingpaths, xlpexistingpaths)
Exemple #6
0
def get_processed(input_file_hrefs_unprocessed, input_file_hrefs, cdup,
                  ignore_locking):
    repository_root = dc_value.hrefvalue(pathname2url(cdup) + '/',
                                         contexthref=".")

    input_files_up = processtrak_cleanup.infiledicts.fromhreflist(
        input_file_hrefs_unprocessed,
        repository_root=repository_root,
        ignore_locking=ignore_locking)

    (unprocessed_completed_set, unprocessed_desthref_set,
     unprocessed_href_set) = processtrak_cleanup.traverse(
         input_files_up,
         recursive=True,
         need_href_set=True,
         include_processed=False,
         repository_root=repository_root,
         ignore_locking=ignore_locking)

    input_files_pr = processtrak_cleanup.infiledicts.fromhreflist(
        input_file_hrefs,
        repository_root=repository_root,
        ignore_locking=ignore_locking)

    (completed_set, desthref_set,
     href_set) = processtrak_cleanup.traverse(input_files_pr,
                                              recursive=True,
                                              need_href_set=True,
                                              include_processed=True,
                                              repository_root=repository_root,
                                              ignore_locking=ignore_locking)

    #import pdb
    #pdb.set_trace()

    processed_href_set = (completed_set | href_set) - (
        unprocessed_completed_set | unprocessed_href_set) - desthref_set
    allhrefs_no_dest = [
        href for href in processed_href_set if not href.isabs()
    ]

    allhrefs_rootrel_no_dest = [
        href.attempt_relative_href(pathname2url(cdup) + '/')
        for href in allhrefs_no_dest
    ]
    processedpaths = [href.getpath() for href in allhrefs_rootrel_no_dest
                      ]  # Paths relative to repository root

    processedexistingpaths = [
        path for path in processedpaths if os.path.exists(path)
    ]

    return (processedexistingpaths)
def run(_xmldoc,
        _tag,
        dc_dgsfile_href,
        cic_href,
        grow_factor_numericunits=numericunitsvalue(1.2)):

    # dc_dgsfile is dgsfile element under measurement element

    dc_dgsfile_path = dc_dgsfile_href.getpath()

    # cic file is the camera intrinsic calibration
    #cic_file = os.path.join("/dataawareness/Camera_Calibration_files","flir_a35_63201313_calib_20150614_20150517sdh4.cic")
    cic_file = cic_href.getpath()

    # load in calibration
    calib = intrinsic_calibration.intrinsic_calibration.fromfile(cic_file)

    # create undistortion processor
    processor = intrinsic_calibration.undistortion_processor(
        calib, calib.sizex, calib.sizey,
        int(calib.sizex * grow_factor_numericunits.value()),
        int(calib.sizey * grow_factor_numericunits.value()), cv2.INTER_LINEAR,
        1.0)

    # load in .dgs file

    (dgs_metadata, dgs_wfmdict) = dgf.loadsnapshot(dc_dgsfile_path,
                                                   memmapok=True)

    new_wfmdict = dg_undistort.undistort_wfmdict(processor, dgs_wfmdict)

    undistorteddgs = "%s_undistorted.dgs" % (posixpath.splitext(
        dc_dgsfile_href.get_bare_unquoted_filename())[0])
    undistortedhref = hrefvalue(quoted(undistorteddgs),
                                contexthref=dc_dgsfile_href)
    dgf_write = dgf.creat(undistortedhref.getpath())
    dgf.startchunk(dgf_write, "SNAPSHOT")

    # provide identical metadata chunk
    dgf.writemetadata(dgf_write, dgs_metadata)

    # Write channels in same order as original waveform (dgs_wfmdict is an ordereddict)
    for wfmname in dgs_wfmdict:
        dgf.writenamedwfm(dgf_write, new_wfmdict[wfmname])
        pass
    dgf.endchunk(dgf_write)  # SNAPSHOT
    dgf.close(dgf_write)

    # ipython interactive execution only works properly if the results
    # are returned at the very bottom of the function

    return {"dc:dgs_undistorted": undistortedhref}
Exemple #8
0
def cleanup_dest_find_files(desthref):
    destpath = desthref.getpath()

    filelist = []
    dirdict = {}  # Dictionary by directory href of parent directory href
    for filename in os.listdir(destpath):
        if os.path.isdir(os.path.join(destpath, filename)):
            # Recurse into subdirectory
            filehref = dc_value.hrefvalue(quote(filename + "/"),
                                          contexthref=desthref)

            (subfilelist, subdirdict) = cleanup_dest_find_files(filehref)
            filelist.extend(subfilelist)
            dirdict.update(subdirdict)
            dirdict[filehref] = desthref
            pass
        else:
            filehref = dc_value.hrefvalue(quote(filename),
                                          contexthref=desthref)
            filelist.append((desthref, filehref))
            pass
        pass
    return (filelist, dirdict)
 def href_eval_node(self, contextxmldoc, context, node):
     if isinstance(node, basestring):
         hrefobj = dc_value.hrefvalue(
             node, contexthref=contextxmldoc.getcontexthref())
         return self.href_eval_href(hrefobj)
     elif isinstance(node, collections.Sequence):
         result = []
         for subnode in node:
             result.append(
                 self.href_eval_node(contextxmldoc, context, subnode))
             pass
         return result
     else:  # Should be a single node
         hrefobj = dc_value.hrefvalue.fromxml(contextxmldoc, node)
         return self.href_eval_href(hrefobj)
     pass
    def href_eval_node(self, contextxmldoc, context, node):
        if isinstance(node, basestring):
            hrefobj = dc_value.hrefvalue(
                node, contexthref=contextxmldoc.getcontexthref())
            return self.href_eval_href(hrefobj)
        elif isinstance(node, collections.Sequence):
            result = []
            for subnode in node:
                result.append(
                    self.href_eval_node(contextxmldoc, context, subnode))
                pass
            return result
        else:  # Should be a single node
            # Work around lxml bug whereby node doesn't have a
            #parent structure that will get you to the document root...
            if node.getparent() is None:
                contextxmldoc.possible_root_ids.add(id(node))
                pass

            hrefobj = dc_value.hrefvalue.fromxml(contextxmldoc, node)
            return self.href_eval_href(hrefobj)
        pass
Exemple #11
0
def main(args=None):

    if args is None:
        args = sys.argv
        pass

    positionals = []

    argc = 1
    while argc < len(args):
        arg = args[argc]

        if arg == "-h" or arg == "--help":
            print("""Usage: %s parent.chf input.chx output.chf""" % (args[0]))
            sys.exit(0)
            pass
        elif arg.startswith("-"):
            raise ValueError("Unknown flag: %s" % (arg))
        else:
            positionals.append(arg)
            pass
        argc += 1
        pass

    parent = positionals[0]
    infile = positionals[1]
    outfile = positionals[2]

    dc2_misc.chx2chf(
        dc_value.hrefvalue(pathname2url(parent),
                           contexthref=dc_value.hrefvalue("./")),
        dc_value.hrefvalue(pathname2url(infile),
                           contexthref=dc_value.hrefvalue("./")),
        dc_value.hrefvalue(pathname2url(outfile),
                           contexthref=dc_value.hrefvalue("./")))

    pass
Exemple #12
0
def run(_xmldoc, _tag, channel_name_str, frame_number_int,
        pixelsizex_numericunits, pixelsizey_numericunits,
        dc_dgs_undistorted_href):

    # dc_dgs_undistorted is dgs_undistorted element under measurement element

    dc_dgs_undistorted_path = dc_dgs_undistorted_href.getpath()

    # load in .dgs file
    dgfh = dgf.open(dc_dgs_undistorted_path)
    chunk = dgf.nextchunk(dgfh)
    chunk_metadata, wave_forms, wfmdict = dgf.procSNAPSHOT(dgfh, memmapok=True)

    print("")

    xlen = numeric_input("Enter specimen x-axis length and units --> ")

    # print("Got xlen: %f mm" % (xlen.value("mm")))

    ylen = numeric_input("Enter specimen y-axis length and units --> ")
    # print("Got ylen: %f mm" % (ylen.value("mm")))

    #face_viewed=selection_input({"f":"front","b":"back"},"Enter face viewed",default="f")

    #zpos=numeric_input("Enter z position viewed and units --> ",default=numericunitsvalue("0 m"))

    gotimage = wfmdict[channel_name_str].data[:, ::-1, int(frame_number_int)].T
    #origin_pixels=position_input(gotimage,"Select origin")
    pos1_pixels = position_input(gotimage,
                                 "select point at minimum x, minimum y")
    print(" minimum x, minimum y = (%f,%f)" % (pos1_pixels[0], pos1_pixels[1]))

    pos2_pixels = position_input(gotimage,
                                 "select point at minimum x, maximum y")
    print(" minimum x, maximum y = (%f,%f)" % (pos2_pixels[0], pos2_pixels[1]))

    pos3_pixels = position_input(gotimage,
                                 "select point at maximum x, maximum y")
    print(" maximum x, maximum y = (%f,%f)" % (pos3_pixels[0], pos3_pixels[1]))

    pos4_pixels = position_input(gotimage,
                                 "select point at maximum x, minimum y")
    print(" maximum x, minimum y = (%f,%f)" % (pos4_pixels[0], pos4_pixels[1]))

    new_x_dim = int(xlen.value('m') / pixelsizex_numericunits.value('m'))
    new_y_dim = int(ylen.value('m') / pixelsizey_numericunits.value('m'))

    pts1 = np.array((pos1_pixels, pos2_pixels, pos3_pixels, pos4_pixels),
                    dtype='d')
    pts2 = np.array(((0.0, 0.0), (0.0, new_y_dim - 1),
                     (new_x_dim - 1, new_y_dim - 1), (new_x_dim - 1, 0.0)))

    # Reverse sense of pts2 so our transform doesn't give the image an extra flip, considering minimum x is to the lower left
    pts2[:, 1] = new_y_dim - 1 - pts2[:, 1]

    #vecplusy=pts1[1,:]-pts1[0,:]
    #vecplusx=pts1[3,:]-pts1[0,:]
    #origindist=np.sqrt(np.sum((pts1-origin_pixels)**2.0,1))
    #originidx=np.argmin(origindist)

    #print(origin_pixels)
    #print(origindist)
    #print(originidx)
    #originstrings=("minxminy","minxmaxy","maxxmaxy","maxxminy")

    #if (origindist[originidx] > 30):
    #    print("WARNING... CLICKED ORIGIN DOES NOT SEEM TO LINE UP")
    #    print("WITH A CORNER. USING NEAREST CORNER ANYWAY")
    #    pass

    #pts2=pts2-originshift[originidx]

    transformation_matrix = cv2.getPerspectiveTransform(
        np.float32(pts1), np.float32(pts2))

    #dst = cv2.warpPerspective(im_undistorted,transformation_matrix,(xlen.value('m')/pixelsizex.value('m'),ylen.value('m')/pixelsizey.value('m')))

    new_wfmdict = perspective_wfmdict(
        transformation_matrix, pixelsizex_numericunits,
        pixelsizey_numericunits, new_x_dim, new_y_dim, wfmdict,
        dg_eval.geom(wfmdict[channel_name_str], raw=True))

    orthographicdgs = "%s_orthographic.dgs" % (posixpath.splitext(
        dc_dgs_undistorted_href.get_bare_unquoted_filename())[0])
    orthographichref = hrefvalue(quote(orthographicdgs),
                                 contexthref=dc_dgs_undistorted_href)

    dgf_write = dgf.creat(orthographichref.getpath())
    dgf.startchunk(dgf_write, "SNAPSHOT")

    # provide empty metadata chunk
    EmptyMetadata = {}
    dgf.writemetadata(dgf_write, EmptyMetadata)

    # Write channels in same order as original waveform
    for origwfm in wave_forms:
        dgf.writenamedwfm(dgf_write, new_wfmdict[origwfm.Name])
        pass
    dgf.endchunk(dgf_write)  # SNAPSHOT
    dgf.close(dgf_write)

    dgf.close(dgfh)
    # ipython interactive execution only works properly if the results
    # are returned at the very bottom of the function

    return {
        "dc:dgs_orthographic":
        orthographichref,
        "dc:dgs_orthographic_xlen":
        xlen,
        "dc:dgs_orthographic_ylen":
        ylen,
        "dc:dgs_orthographic_cornerpts_imagepixels_origin_incr_y_then_x":
        arrayvalue(pts1),
        "dc:dgs_orthographic_pixelsizex":
        pixelsizex_numericunits,
        "dc:dgs_orthographic_pixelsizey":
        pixelsizey_numericunits,
    }
def run(_prxdoc,
        _step,
        _xmldoc,
        _element,
        prx_sheetspec_doc,
        prx_sheetspec,
        prx_outfilenamexpath_doc=None,
        prx_outfilenamexpath=None,
        prx_outfilename_str=None,
        linktag="prx:spreadsheet"):

    docdb = {}
    docdb[_prxdoc.get_filehref()] = _prxdoc
    docdb[_xmldoc.get_filehref()] = _xmldoc

    #sheetspec=prx_sheetspec_xmltree.get_xmldoc()
    docdb[prx_sheetspec_doc.get_filehref(
    )] = prx_sheetspec_doc  # don't wrap it in another xmldoc, so context lookups work properly

    stylesheet = etree.XML(transformation)

    prx_lookupfcn = prx_lookupfcn_ext(docdb, prx_sheetspec, _xmldoc,
                                      _element)  # .getroot())

    # Monkeypatch in nsmap from sheetspec into
    # all xsl:elements of stylesheet with a select attribute that contains
    # dyn:evaluate
    els_to_patch = stylesheet.xpath(
        "//xsl:*[contains(@select,'dyn:evaluate')]",
        namespaces={"xsl": "http://www.w3.org/1999/XSL/Transform"})
    for el in els_to_patch:
        parent = el.getparent()
        index = parent.index(el)

        parent.remove(el)

        # New element, with desired nsmap and copying all attributes
        newel = etree.Element(el.tag,
                              nsmap=prx_sheetspec.nsmap,
                              attrib=el.attrib)
        # Move element's children
        newel[:] = el[:]

        newel.text = el.text
        newel.tail = el.tail

        parent.insert(index, newel)
        pass

    # stylesheetdoc=etree.ElementTree(stylesheet)
    # stylesheetdoc.write("/tmp/foo.xsl")

    transform = etree.XSLT(stylesheet, extensions=prx_lookupfcn.extensions)
    ods = transform(
        etree.XML("<dummy/>")
    )  # Stylesheet calls sheetspec() function to get actual sheetspec. This avoids cutting sheespec out of its source document.

    # result=etree.tostring(ods)

    resultdoc = xmldoc.xmldoc.frometree(ods,
                                        contexthref=_xmldoc.getcontexthref())

    # evaluate prx_outfilename_str or prx_outfilenamexpath
    if prx_outfilename_str is None:

        namespaces = copy.deepcopy(prx_outfilenamexpath.nsmap)
        if None in namespaces:
            del namespaces[None]  # nsmap param cannot have None
            pass

        prx_outfilename = _xmldoc.xpathcontext(
            _element,
            prx_outfilenamexpath_doc.getattr(prx_outfilenamexpath, "select"),
            namespaces=namespaces)

        if not prx_outfilename.endswith(".ods"):
            raise ValueError("Output spreadsheet requires .ods extension")

        pass
    else:
        if prx_outfilenamexpath is not None:
            raise ValueError(
                "Both prx_outfilenamexpath and prx_outfilename specified (only one at a time is permitted)"
            )
        prx_outfilename = prx_outfilename_str
        pass

    ## put in dest dir if present
    #dest=_xmldoc.xpathsingle("dc:summary/dc:dest",default=None,namespaces={"dc": "http://limatix.org/datacollect"} )

    #if dest is None:
    # Put in same directory as _xmldoc
    outdirhref = _xmldoc.getcontexthref().leafless()
    #    pass
    #else:
    #    outdirhref=dc_value.hrefvalue.fromxml(_xmldoc,dest).leafless()
    #    pass

    prx_outfilehref = dc_value.hrefvalue(quote(prx_outfilename),
                                         contexthref=outdirhref)

    # ods spreadsheet context is a "content.xml" file inside the .ods file interpreted as a directory
    odscontext = dc_value.hrefvalue(quote(prx_outfilename) + "/",
                                    contexthref=outdirhref)
    resultdoc.setcontexthref(
        dc_value.hrefvalue("content.xml",
                           contexthref=odscontext))  # fix up xlink hrefs

    write_output(prx_outfilehref.getpath(), resultdoc.tostring())

    return {linktag: prx_outfilehref}
def rununlocked(_dest_href,dc_dgsfile_href,dc_density_numericunits,dc_specificheat_numericunits,dc_alphaz_numericunits,dc_alphaxy_numericunits,dc_nominal_lamina_thickness_numericunits,dc_lamina_thickness_numericunits,dc_numlayers_numericunits,dc_inversion_tile_size_y_numericunits,dc_inversion_tile_size_x_numericunits,dc_inversion_channel_str,dc_inversion_startframe_numericunits,dc_flashtime_numericunits,dc_inversion_reflectors_str,xydownsample_numericunits,tikparam_numericunits,dc_cadmodel_channel_str,dc_scalefactor_x_numericunits=dc_value.numericunitsvalue(1.0,"Unitless"),dc_scalefactor_y_numericunits=dc_value.numericunitsvalue(1.0,"Unitless"),dc_numplotrows_int=3,dc_numplotcols_int=4,do_singlestep_bool=True,dc_holesadjusted_xmltree=None,dc_source_approx_dx_numericunits=None,dc_source_approx_dy_numericunits=None):

    tikparam=tikparam_numericunits.value()

    dc_prefix_str="greensinversion_"



    reslist=[]

   
    if tikparam==0.0:
        tikparam=None  # 0 and disabled are equivalent
        pass
        
 
    #rho=float(1.555e3) # kg/m^3
    #c=float(850.0) # J/(kg* deg K)

    rho=dc_density_numericunits.value('kg/m^3')
    c=dc_specificheat_numericunits.value('J/(kg*K)')

    # alpha units are m^2/s
    #alphaz=float(.54e-6) # average value from measurements (Thermal_Properties.ods 11/25/15, averaging in-plane value from 90deg specimen and flash method values)
    alphaz=dc_alphaz_numericunits.value('m^2/s')

    #alphaxy=float(3.00e-6) # best evaluation based on Thermal_Properties.ods 3/19/16 based on 0/90 and quasi-isotropic layups
    alphaxy=dc_alphaxy_numericunits.value('m^2/s')


    # Lamina thickness based on thermal_properties.ods average thickness of 8.05 mm for 3(?) layers of 16 plies
    # nominal_lamina_thickness=8.05e-3/(3.0*16.0)
    nominal_lamina_thickness=dc_nominal_lamina_thickness_numericunits.value('m')

    # Load input file
    # NOTE: When changing input file: 
    #  1. Verify flashtime. Adjust as appropriate
    #  2. Verify startframe. Adjust as appropriate
    #  3. Execute file load code (below) and evaluate
    #    a) XStepMeters (must match dx)
    #    b) YStepMeters (must match dy)
    #    c) TStep (must match dt)
    #    d) bases[2][startframe]-flashtrigtime  (must match t0)
    #    e) bases[2][startframe:endframe].shape[0] (must match nt)
    #  4. Adjust dx, dy, dt, t0, and/or nt to satisfy above criteria
    #  5. Once adjusted, assert()s below should pass. 
    
    inputfile=dc_dgsfile_href.getpath()  # was "/tmp/CA-1_Bottom_2015_11_19_undistorted_orthographic.dgs"
    (inputfile_basename,inputfile_ext) = posixpath.splitext(dc_dgsfile_href.get_bare_unquoted_filename())

    if inputfile_ext==".bz2" or inputfile_ext==".gz":  # .dgs.bz2 or .dgs.gz
        orig_inputfile_basename = inputfile_basename
        inputfile_basename=posixpath.splitext(orig_inputfile_basename)[0]
        inputfile_ext = posixpath.splitext(orig_inputfile_basename)[1] + inputfile_ext
        pass


    #flashtrigtime=0.2 # seconds -- from pequod system
    #flashtime=flashtrigtime+1.0/100.0 # add 1/100th second delay of flash peak (wild guess!)
    flashtime=dc_flashtime_numericunits.value('s')

    #channel="DiffStack"
    channel=dc_inversion_channel_str
    # frame #165: Time relative to trigger = bases[2][165]-flashtrigtime
    #                                      = 0.052869999999999973
    #startframe=13  # zero-based, not one-based
    startframe=int(round(dc_inversion_startframe_numericunits.value('unitless')))

    (junkmd,wfmdict)=dgf.loadsnapshot(inputfile,memmapok=True)

    channel3d = "Proj" + dc_inversion_channel_str[:-4] # Proj + diffstack channel with _tex stripped
    objframe=coordframe()
    (obj, TexChanPrefix) = ndepart_from_dataguzzler_wfm(wfmdict[channel3d],wfmdict,objframe)


    channel_weights=channel+"_weights"
    if channel_weights not in wfmdict:
        channel_weights = None
        pass


    (ndim,DimLen,IniVal,Step,bases)=dg_eval.geom(wfmdict[channel],raw=True)
    (ndim,Coord,Units,AmplCoord,AmplUnits)=dg_eval.axes(wfmdict[channel],raw=True)
    XIniValMeters=dc_value.numericunitsvalue(IniVal[0],Units[0]).value('m')
    YIniValMeters=dc_value.numericunitsvalue(IniVal[1],Units[1]).value('m')

    # Apply scaling factor to XStepMeters (note that Coord, above, is not corrected!!!)
    XStepMeters=dc_value.numericunitsvalue(Step[0],Units[0]).value('m')*dc_scalefactor_x_numericunits.value()
    YStepMeters=dc_value.numericunitsvalue(Step[1],Units[1]).value('m')*dc_scalefactor_y_numericunits.value()
    TStep=Step[2]

    (saturation_fraction,saturation_map)=greensinversion.saturationcheck(wfmdict[channel].data.transpose((2,1,0)),startframe) 
    if saturation_fraction > .2: 
        raise ValueError("greensinversionstep: ERROR: %.1f%% of pixels are saturated at least once beyond start frame!" % (saturation_fraction*100.0))
    if saturation_fraction > .02:
        sys.stderr.write("greensinversionstep: WARNING: %.1f%% of pixels are saturated at least once beyond start frame!\n" % (saturation_fraction*100.0))
        pass

    # Apply spatial downsampling to keep inversion complexity under control
    #xydownsample=2

    xydownsample=int(round(xydownsample_numericunits.value("unitless")))

    # reflectors is a tuple of (z,ny,nx) tuples representing
    # possible z values for reflectors and how many y and x pieces
    # they should be split into.
    # it should be ordered from the back surface towards the
    # front surface. 

    # reflectors is (depth, reflector_ny,reflector_nx)

    # # need pre-calculation of z_bnd to determine reflectors
    # z_bnd=np.arange(nz+1,dtype='d')*dz  # z boundary starts at zero

    # reflectors=( (z_bnd[15],4,4),
    #              (z_bnd[9],4,4),
    #              (z_bnd[5],6,6),
    #              (z_bnd[2],10,10))
    
    reflectors_float=ast.literal_eval(dc_inversion_reflectors_str)
    
    # reflectors can just be reflectors_float but this is here to avoid
    # some temporary recalculations 3/29/16
    reflectors=tuple([ (np.float64(reflector[0]),reflector[1],reflector[2]) for reflector in reflectors_float])
    deepest_tstar = reflectors[0][0]**2.0/(np.pi*alphaz)

    endframe = np.argmin(np.abs(bases[2]-flashtime-deepest_tstar*2.0))   # see also generateinversionsteps() call to timelimitmatrix()

    # step sizes for inversion
    dx=XStepMeters*1.0*xydownsample
    dy=YStepMeters*1.0*xydownsample
    dt=TStep
    t0=bases[2][startframe]-flashtime
    nt=bases[2][startframe:endframe].shape[0]

    dz=nominal_lamina_thickness  # use nominal value so we don't recalculate everything for each sample
    
    # These now satisfied by definition
    #assert(XStepMeters==dx)
    #assert(YStepMeters==dy)
    #assert(TStep==dt)
    #assert(bases[2][startframe]-flashtrigtime==t0)  # Start time matches  NOTE.... CHANGED FROM flashtrigtime to flashtime
    #assert(bases[2][startframe:].shape[0]==nt) # Number of frames match

    # These are parameters for the reconstruction, not the expermental data
        
    #nz=16   # NOTE: nz*dz should match specimen thickness
    nz=int(round(dc_numlayers_numericunits.value('unitless')))
    
    # size of each tile for tiled inversion
    #maxy=38.0e-3
    #maxx=36.0e-3
    maxy=dc_inversion_tile_size_y_numericunits.value('m')
    maxx=dc_inversion_tile_size_x_numericunits.value('m')

    source_approx_dy=None
    source_approx_dx=None
    
    if dc_source_approx_dy_numericunits is not None:
        source_approx_dy=dc_source_approx_dy_numericunits.value('m')
        pass

    if dc_source_approx_dx_numericunits is not None:
        source_approx_dx=dc_source_approx_dx_numericunits.value('m')
        pass

    greensconvolution_params=read_greensconvolution()

    greensconvolution_params.get_opencl_context("GPU",None)
    
    

    #(kx,ky,kz,
    # ny,nx,
    # z,y,x,
    # zgrid,ygrid,xgrid,
    # z_bnd,y_bnd,x_bnd,
    # flashsourcevecs,
    # reflectorsourcevecs,
    # depths,tstars,
    # conditions,prevconditions,prevscaledconditions,
    # rowselects,
    # inversions,
    # inversionsfull,
    # inverses,
    # nresults,
    # ss_rowselects,
    # ss_inversions,
    # ss_inversionsfull,
    # ss_inverses,
    # ss_nresults)=greensinversion.greensinversion_lookup(cache_dir,rho,c,alphaz,alphaxy,dz,dy,dx,nz,maxy,maxx,t0,dt,nt,reflectors)

    (kx,ky,kz,
     ny,nx,
     y,x,
     ygrid,xgrid,
     y_bnd,x_bnd,
     num_sources_y,num_sources_x,
     trange,
     rowscaling,
     flashsourcecolumnscaling,flashsourcevecs,
     reflectorcolumnscaling,reflectorsourcevecs,
     depths,tstars,
     conditions,prevconditions,prevscaledconditions,
     rowselects,inversions,inversionsfull,inverses,nresults)=greensinversion.setupinversionprob(rho,c,alphaz,alphaxy,dy,dx,maxy,maxx,t0,dt,nt,reflectors,source_approx_dy=source_approx_dy,source_approx_dx=source_approx_dx)


    # can view individual source maps with
    # reflectorsourcevecs[:,0].reshape(ny,nx,nt),
    # e.g. imshow(reflectorsourcevecs[:,5].reshape(ny,nx,nt)[:,:,200])


    #pl.figure(1)
    #pl.clf()
    #pl.imshow(reflectorsourcevecs[0][:,5].reshape(ny,nx,nt)[:,:,200])


    
    #pl.figure(2)
    #pl.clf()
    #pl.imshow(reflectorsourcevecs[1][:,5].reshape(ny,nx,nt)[:,:,200])
    
    #print("Generating inversion steps")
    #
    #(rowselects,inversions,inversionsfull,inverses,nresults)=greensinversion.generateinversionsteps(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,tstars,ny,nx,trange,depths)

    if do_singlestep_bool:
        print("Generating single-step inversion")

        (ss_rowselects,ss_inversions,ss_inversionsfull,ss_inverses,ss_nresults)=greensinversion.generatesinglestepinversion(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,tstars,ny,nx,trange,depths)
        pass
    # To plot: 
    # loglog(trange+dt/2,T[20,20,:])
    # imshow(T[:,:,200]

    # Break object into tiles, perform inversion on each tile

    (minyminx_corners,yranges,xranges,contributionprofiles)=greensinversion.build_tiled_rectangle(ny,nx,dy,dx,reflectors,wfmdict[channel].data.transpose((2,1,0)),xydownsample)



    inputmats = [ wfmdict[channel].data[(xidx*xydownsample):((xidx+nx)*xydownsample):xydownsample,(yidx*xydownsample):((yidx+ny)*xydownsample):xydownsample,startframe:endframe].transpose((2,1,0)) for (yidx,xidx) in minyminx_corners ]  # transpose to convert dataguzzler axis ordering (x,y,t) to greensinversion ordering (t,y,x)

    print("Filling holes...")
    inputmats_holesfilled = [ greensinversion.fillholes.fillholes_flat(inputmat) for inputmat in inputmats ]
    print("Done filling holes.")

    parallelevaluate=False   # GPU is currently slightly SLOWER here (WHY?) so we don't use it
    if parallelevaluate:
        inversionevalfunc=greensinversion.inversion.parallelperforminversionsteps
        OpenCL_CTX=greensconvolution_params.get_opencl_context()   #greensinversion.inversion.Get_OpenCL_Context()
        pass
    else:
        inversionevalfunc=greensinversion.inversion.serialperforminversionsteps
        OpenCL_CTX=None
        pass
    
    nextfignum=1

    # tikparam diagnostic plots (multi-step)
    pl.figure(nextfignum)
    pl.clf()
    for inversioncnt in range(len(inversions)):
        pl.plot(inverses[inversioncnt][1])
        pass
    pl.xlabel('Singular value index')
    pl.ylabel('Magnitude')
    nextfignum+=1

    if do_singlestep_bool:
        pl.figure(nextfignum)
        pl.clf()
        pl.plot(ss_inverses[0][1])
        pl.xlabel('Singular value index (single step)')
        pl.ylabel('Magnitude')
        nextfignum+=1
        pass


    # scaled tikparam
    #raise ValueError("foo!")
    
    #z_reference=reflectors[-1][0]  # z coordinate of shallowest reflectors (recall reflectors are deepest first)
    #scaledtikparams=greensinversion.scale_tikparam(tikparam,z_reference,reflectors)

    #if tikparam is not None:
    #    # tikparam scaled diagnostic plot (multi-step)
    #    pl.figure(nextfignum)
    #    pl.clf()
    #    for inversioncnt in range(len(inversions)):
    #        pl.plot(inverses[inversioncnt][1] * (tikparam/scaledtikparams[inversioncnt])) #  * z_values[inversioncnt]/z_reference)
    #        pass
    #        pl.xlabel('Scaled singular value index')
    #        pl.ylabel('Magnitude')
    #        nextfignum+=1
    #    pass
    
    

    (inversioncoeffs_list,errs_list,tikparams_list) = inversionevalfunc(OpenCL_CTX,
                                                                        rowselects,
                                                                        inversions,
                                                                        inversionsfull,
                                                                        inverses,
                                                                        nresults,
                                                                        inputmats_holesfilled,
                                                                        tikparam)
    


    fullinverse=np.zeros((len(reflectors)+1,wfmdict[channel].data.shape[1]//xydownsample,wfmdict[channel].data.shape[0]//xydownsample),dtype='d')
    fullinverse_x_bnd=IniVal[0]-Step[0]*xydownsample/2.0 + np.arange(DimLen[0]//xydownsample+1,dtype='d')*Step[0]*xydownsample
    fullinverse_y_bnd=IniVal[1]-Step[1]*xydownsample/2.0 + np.arange(DimLen[1]//xydownsample+1,dtype='d')*Step[1]*xydownsample
    
    for tile_idx in range(len(minyminx_corners)):
        (yidx,xidx)=minyminx_corners[tile_idx]
        
        fullinverse[:,yidx:(yidx+ny),xidx:(xidx+nx)] += greensinversion.buildconcreteinverse(inversioncoeffs_list[tile_idx],reflectors,ygrid,xgrid,y_bnd,x_bnd,ny,nx,num_sources_y,num_sources_x)*contributionprofiles[tile_idx]
        pass

    # raise ValueError("Debugging!")
        
    if do_singlestep_bool:

        (ss_inversioncoeffs_list,ss_errs_list,ss_tikparams_list) = inversionevalfunc(OpenCL_CTX,
                                                                                     ss_rowselects,
                                                                                     ss_inversions,
                                                                                     ss_inversionsfull,
                                                                                     ss_inverses,
                                                                                     ss_nresults,
                                                                                     inputmats_holesfilled,
                                                                                     tikparam)
        

        ss_fullinverse=np.zeros((len(reflectors)+1,wfmdict[channel].data.shape[1]//xydownsample,wfmdict[channel].data.shape[0]//xydownsample),dtype='d')

        for tile_idx in range(len(minyminx_corners)):
            (yidx,xidx)=minyminx_corners[tile_idx]
        
            ss_fullinverse[:,yidx:(yidx+ny),xidx:(xidx+nx)] += greensinversion.buildconcreteinverse(ss_inversioncoeffs_list[tile_idx],reflectors,ygrid,xgrid,y_bnd,x_bnd,ny,nx,num_sources_y,num_sources_x)*contributionprofiles[tile_idx]
            pass

        # for tile_idx in range(len(minyminx_corners)):
        #    (yidx,xidx)=minyminx_corners[tile_idx]
        #    #
        #    (ss_inversioncoeffs,ss_residual,errs,ss_tikparams)=greensinversion.performinversionsteps(ss_rowselects,ss_inversions,ss_inversionsfull,ss_inverses,ss_nresults,wfmdict[channel].data[(xidx*xydownsample):((xidx+nx)*xydownsample):xydownsample,(yidx*xydownsample):((yidx+ny)*xydownsample):xydownsample,startframe:endframe].transpose((2,1,0)),tikparam) # transpose to convert dataguzzler axis ordering (x,y,t) to greensinversion ordering (t,y,x)
        #    #
        #    ss_concreteinverse=greensinversion.buildconcreteinverse(ss_inversioncoeffs,reflectors,ygrid,xgrid,y_bnd,x_bnd,ny,nx)
        #    # concreteinverse is (len(reflectors)+1,ny,nx)... first layer is surface
        #    # ... accumulate contributions of each tile to full inverse
        #    ss_fullinverse[:,yidx:(yidx+ny),xidx:(xidx+nx)] += ss_concreteinverse*contributionprofiles[tile_idx]
        #    pass
        pass
        
    (fig,subplots,images)=greensinversion.plotconcreteinverse(nextfignum,dc_numplotrows_int,dc_numplotcols_int,saturation_map,fullinverse,reflectors,-10000.0,30000.0,fullinverse_y_bnd,fullinverse_x_bnd,num_sources_y,num_sources_x)
    nextfignum+=1
    if tikparam is None:
        outpng_fname="%s_greensinversion.png" % (inputfile_basename)
        movieoutdirname="%s_greensinversion_movie/" % (inputfile_basename)
        movieoutfilename="%s_greensinversion_movie_depth_%%05.2f.png" % (inputfile_basename)
        pass
    else:
        outpng_fname="%s_greensinversion_tik_%g.png" % (inputfile_basename,tikparam)
        movieoutdirname="%s_greensinversion_tik_%g_movie/" % (inputfile_basename,tikparam)
        movieoutfilename="%s_greensinversion_tik_%g_movie_depth_%%05.2f.png" % (inputfile_basename,tikparam)
        pass

    outpng_href=dc_value.hrefvalue(quote(outpng_fname),_dest_href)
    fig.savefig(outpng_href.getpath())
    reslist.append( (("dc:greensinversion_figure",{ "tikparam": str(tikparam)}), outpng_href))
    
    movieoutdirhref=dc_value.hrefvalue(quote(movieoutdirname),contexthref=_dest_href)
    
    (nextfignum,plots,images,plothrefs,depths) = greensinversion.inversion.plotconcreteinversemovie(nextfignum,movieoutdirhref,movieoutfilename,saturation_map,fullinverse,reflectors,-10000.0,30000.0,fullinverse_y_bnd,fullinverse_x_bnd,num_sources_y,num_sources_x,dpi=300)

        
    if dc_holesadjusted_xmltree is not None:
        for plot in plots:
            ax=plot.gca()
            ax.xaxis.label.set_size(20)
            ax.yaxis.label.set_size(20)
            ax.title.set_size(20)
            pass

        # Add hole drawings for paper
        holesdoc=dc_holesadjusted_xmltree.get_xmldoc()
        for hole in holesdoc.xpath("(dc:hole|dc:annulus)[@num]"):
            numstr=holesdoc.xpathcontext(hole,"@num")[0]
            numnum=re.match(r"""(\d+)""",numstr).group(1)
            if hole.tag.endswith("hole") and len(holesdoc.xpath("dc:annulus[translate(@num,translate(@num,'0123456789',''),'') = '%s']" % (numnum))) > 0: 
                # if there is an annulus with this number, ignore the hole.
                continue

            holecenterx=dc_value.numericunitsvalue.fromxml(holesdoc,holesdoc.child(hole,"dc:xpos"))
            holecentery=dc_value.numericunitsvalue.fromxml(holesdoc,holesdoc.child(hole,"dc:ypos"))
            holediameter=dc_value.numericunitsvalue.fromxml(holesdoc,holesdoc.child(hole,"dc:diameter"))
            holeradius=holediameter/2.0
            holedepth=dc_value.numericunitsvalue.fromxml(holesdoc,holesdoc.child(hole,"dc:depth"))
            for plot in plots:
                ax=plot.gca()
                circ=pl.Circle((holecenterx.inunits('mm').value(),
                                holecentery.inunits('mm').value()),
                               holeradius.inunits('mm').value(),
                               facecolor='none')
                ax.add_artist(circ)
                pass
            pass
        for plotcnt in range(len(plots)):
            # rewrite plot files
            plot=plots[plotcnt]
            plothref=plothrefs[plotcnt]
            
            plot.savefig(plothref.getpath(),dpi=300)

            pass

        pass
    

    for cnt in range(len(plothrefs)):
        reslist.append( (("dc:greensinversion_movie_frame",{ "tikparam": str(tikparam),"depth":str(depths[cnt])}), plothrefs[cnt]))



        pass



    
    if do_singlestep_bool:
        (ss_fig,ss_subplots,ss_images)=greensinversion.plotconcreteinverse(nextfignum,dc_numplotrows_int,dc_numplotcols_int,saturation_map,ss_fullinverse,reflectors,-10000.0,30000.0,fullinverse_y_bnd,fullinverse_x_bnd,num_sources_y,num_sources_x)
        nextfignum+=1

        if tikparam is None:
            ss_outpng_fname="%s_ss_greensinversion.png" % (inputfile_basename)
            ss_movieoutdirname="%s_ss_greensinversion_movie/" % (inputfile_basename)
            ss_movieoutfilename="%s_ss_greensinversion_movie_depth_%%05.2f.png" % (inputfile_basename)
            pass
        else: 
            ss_outpng_fname="%s_ss_greensinversion_tik_%g.png" % (inputfile_basename,tikparam)
            ss_movieoutdirname="%s_ss_greensinversion_tik_%g_movie/" % (inputfile_basename,tikparam)
            ss_movieoutfilename="%s_ss_greensinversion_tik_%g_movie_depth_%%05.2f.png" % (inputfile_basename,tikparam)
            pass
        ss_outpng_href=dc_value.hrefvalue(quote(ss_outpng_fname),_dest_href)
        ss_fig.savefig(ss_outpng_href.getpath())
        reslist.append( (("dc:greensinversion_singlestep_figure", {"tikparam": str(tikparam) }), ss_outpng_href) )

        ss_movieoutdirhref=dc_value.hrefvalue(quote(ss_movieoutdirname),contexthref=_dest_href)

        (nextfignum,ss_plots,ss_images,ss_plothrefs,ss_depths) = greensinversion.inversion.plotconcreteinversemovie(nextfignum,ss_movieoutdirhref,ss_movieoutfilename,saturation_map,ss_fullinverse,reflectors,-10000.0,30000.0,fullinverse_y_bnd,fullinverse_x_bnd,num_sources_y,num_sources_x,resolution=300)

        for cnt in range(len(ss_plothrefs)):
            reslist.append( (("dc:ss_greensinversion_movie_frame",{ "tikparam": str(tikparam),"depth":str(ss_depths[cnt])}), ss_plothrefs[cnt]))
            pass
            

        pass
    
        
    outwfmdict={}

    outwfmdict[dc_cadmodel_channel_str]=copy.deepcopy(wfmdict[dc_cadmodel_channel_str])
    SplitTextureChans=dgm.GetMetaDatumWIStr(wfmdict[dc_cadmodel_channel_str],"TextureChans","").split("|")
    PrefixedTextureChans="|".join([ dc_prefix_str + TexChanPrefix + TexChan for TexChan in SplitTextureChans ])

    gi_3d=dg.wfminfo()
    #gi_3d.Name=dc_prefix_str+dc_cadmodel_channel_str
    gi_3d.Name="Proj"+dc_prefix_str+TexChanPrefix+dc_cadmodel_channel_str
    gi_3d.dimlen=np.array((1,),dtype='i8')
    gi_3d.data=np.array((1,),dtype='f')
    dgm.AddMetaDatumWI(gi_3d,dgm.MetaDatum("VRML97GeomRef",dc_cadmodel_channel_str))
    dgm.AddMetaDatumWI(gi_3d,dgm.MetaDatum("X3DGeomRef",dc_cadmodel_channel_str))
    #texchanprefix=gi_3d.Name[:gi_3d.Name.find(dc_unprefixed_texname_str)]
    dgm.AddMetaDatumWI(gi_3d,dgm.MetaDatum("TexChanPrefix",dc_prefix_str+TexChanPrefix))
    dgm.AddMetaDatumWI(gi_3d,dgm.MetaDatum("TextureChans",PrefixedTextureChans))
    outwfmdict[gi_3d.Name]=gi_3d

    

    
    outwfm=dg.wfminfo()
    #outwfm.Name="greensinversion"
    outwfm.Name=dc_prefix_str+dc_inversion_channel_str

    outwfmdict[outwfm.Name]=outwfm

    # Shift IniVals according to xydownsample:
    # IniVal[0] is X coordinate of center of corner pixel of undownsampled image
    # IniVal[0] is X coordinate of center of corner pixel downsampled image
    # but that pixel is twice as big, so the corner of the image itself
    # has changed!
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("IniVal1",IniVal[0]))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("IniVal2",IniVal[1]))
    
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("Step1",XStepMeters*xydownsample))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("Step2",YStepMeters*xydownsample))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("Coord1",Coord[0]))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("Coord2",Coord[1]))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("Units1",Units[0]))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("Units2",Units[1]))
    
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("IniVal3",0.0))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("Step3",1.0))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("Coord3","Depth Index"))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("Units3","unitless"))

    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("AmplCoord","Heating intensity"))
    dgm.AddMetaDatumWI(outwfm,dgm.MetaDatum("AmplUnits","J/m^2"))

    # Copy landmark metadata
    LandmarkMD = [ MDName for MDName in list(wfmdict[channel].MetaData.keys()) if MDName.startswith("LANDMARK_") ]
    for LandmarkName in LandmarkMD:
        dgm.AddMetaDatumWI(outwfm,copy.deepcopy(wfmdict[channel].MetaData[LandmarkName]))
        pass

    
    if channel_weights is not None:
        #outwfm_weights=copy.deepcopy(wfmdict[channel_weights])#dg.wfminfo()
        #outwfm_weights.Name="greensinversion_weights"
        outwfm_weights=dg.wfminfo()
        outwfm_weights.Name=dc_prefix_str+dc_inversion_channel_str+"_weights"
        outwfm_weights.data=wfmdict[channel_weights].data[::xydownsample,::xydownsample]
        outwfm_weights.dimlen=np.array(outwfm_weights.data.shape)
        outwfm_weights.ndim=2
        outwfm_weights.MetaData=copy.deepcopy(outwfm.MetaData)
        dgm.AddMetaDatumWI(outwfm_weights,dgm.MetaDatum("AmplCoord","Weighting"))
        dgm.AddMetaDatumWI(outwfm_weights,dgm.MetaDatum("AmplUnits","Unitless"))
        
        
        outwfmdict[outwfm_weights.Name]=outwfm_weights
        pass
    

    if do_singlestep_bool:
        ss_outwfm=copy.deepcopy(outwfm)
        ss_outwfm.Name="ss_greensinversion"
        
        outwfmdict[ss_outwfm.Name]=ss_outwfm
        pass

    
    # dgs file is written in (X,Y,Z) fortran order, so we write
    # dimlen in reverse order and transpose the data
    outwfm.ndim=3
    outwfm.dimlen=np.array(fullinverse.shape[::-1])
    outwfm.data=fullinverse.transpose().astype(np.float32)
    outwfm.NeedData=False
    outwfm.NeedMetaData=False
    outwfm.HaveData=True
    outwfm.HaveMetaData=True

    outwfm_saturationmap=dg.wfminfo()
    outwfm_saturationmap.Name="saturation_map"
    outwfmdict[outwfm_saturationmap.Name]=outwfm_saturationmap
    outwfm_saturationmap.dimlen=np.array(saturation_map.shape[::-1])
    outwfm_saturationmap.data=saturation_map.transpose().astype(np.float32)
    outwfm_saturationmap.ndim=outwfm_saturationmap.dimlen.shape[0]
    outwfm_saturationmap.NeedData=False
    outwfm_saturationmap.NeedMetaData=False
    outwfm_saturationmap.HaveData=True
    outwfm_saturationmap.HaveMetaData=True
    outwfm_saturationmap.MetaData=copy.deepcopy(outwfm.MetaData)
    
    if do_singlestep_bool:
        ss_outwfm.ndim=3
        ss_outwfm.dimlen=np.array(ss_fullinverse.shape[::-1])
        ss_outwfm.data=ss_fullinverse.transpose().astype(np.float32)
        ss_outwfm.NeedData=False
        ss_outwfm.NeedMetaData=False
        ss_outwfm.HaveData=True
        ss_outwfm.HaveMetaData=True
        pass


    if tikparam is None:
        outdgs_fname="%s_greensinversion.dgs" % (inputfile_basename)        
        pass
    else:
        outdgs_fname="%s_greensinversion_tik_%g.dgs" % (inputfile_basename,tikparam)
        pass
    outdgs_href=dc_value.hrefvalue(quote(outdgs_fname),_dest_href)
    dgf.savesnapshot(outdgs_href.getpath(),outwfmdict)

    reslist.append( (("dc:greensinversion_dgsfile",{"tikparam": str(tikparam)}), outdgs_href))
    
    if do_singlestep_bool:
        pass

    # 
    # greensconvolution_params.get_opencl_context()
    # tile_idx=14
    # (yidx,xidx)=minyminx_corners[tile_idx]
    # 
    # inputmats=[wfmdict[channel].data[(xidx*xydownsample):((xidx+nx)*xydownsample):xydownsample,(yidx*xydownsample):((yidx+ny)*xydownsample):xydownsample,startframe:endframe].transpose((2,1,0))]
    # greeninversion.inversion.parallelperforminversionsteps(greensconvolution_params.OpenCL_CTX,rowselects,inversions,inversionsfull,inverses,nresults,inputmats,None)    
    return reslist
Exemple #15
0
def run(_xmldoc, _tag, _dest_href, frequency_float, leftlong_float,
        rightlong_float, botlat_float, toplat_float, xpixels_int, ypixels_int,
        windowwidth_meters_float):

    latbase = np.arange(ypixels_int, dtype='d') * (
        botlat_float - toplat_float) / ypixels_int + toplat_float

    longbase = np.arange(xpixels_int, dtype='d') * (
        rightlong_float - leftlong_float) / xpixels_int + leftlong_float

    # sys.stderr.write("latbase=%s\n" % (str(latbase)))
    # sys.stderr.write("longbase=%s\n" % (str(longbase)))

    hzlow = _xmldoc.xpathsinglefloat(
        "(dc:measurement/spectrumlog/mergedspectrumlog)[1]/hzlow")
    hzhigh = _xmldoc.xpathsinglefloat(
        "(dc:measurement/spectrumlog/mergedspectrumlog)[1]/hzhigh")
    hzstep = _xmldoc.xpathsinglefloat(
        "(dc:measurement/spectrumlog/mergedspectrumlog)[1]/hzstep")

    freqidx = int(round((frequency_float - hzlow) / hzstep))

    logentries = _xmldoc.xpath(
        "dc:measurement/spectrumlog/mergedspectrumlog[gpscoords]"
    )  # extract all log entries with gps coordinates

    # filter log entries, making sure we have the right number of frequency indices in them
    numfreqs = round((hzhigh - hzlow) / hzstep)

    #print numfreqs
    #print

    uselogentries = [
        logentries[entrynum] for entrynum in range(len(logentries)) if len(
            _xmldoc.xpathsinglecontextstr(logentries[entrynum], "dBs")
            [1:-1].split(',')) == numfreqs
    ]

    # print("Energymap: Checking number of frequency lines filtered %d entries down to %d" % (len(logentries),len(uselogentries)))

    dBss = _xmldoc.xpathcontextnumpystr(uselogentries, "dBs")

    SignalAmplitudes = np.array(
        [dBs[1:-1].split(',')[freqidx] for dBs in dBss], dtype='d')

    gpscoord_strs = _xmldoc.xpathcontextnumpystr(uselogentries, "gpscoords")

    # create list of gps coordinates of log entries
    gpscoords = np.array(
        [ast.literal_eval(gpscoord_str) for gpscoord_str in gpscoord_strs],
        dtype='d')

    # distmtx is distance in meters
    numlong = longbase.shape[0]
    numlat = latbase.shape[0]
    numgps = gpscoords.shape[0]
    #distmtx=np.zeros((numlong,numlat,numgps),dtype='d')
    Rearth = 6378e3
    meters_per_degree_latitude = 2.0 * np.pi * Rearth / 360.0
    radius_at_latitude = Rearth * np.cos(latbase[0] * np.pi / 180.0)
    meters_per_degree_longitude = 2.0 * np.pi * radius_at_latitude / 360.0

    (longmtx, latmtx) = np.meshgrid(longbase, latbase, indexing='ij')
    #sys.stderr.write("\n\ngpscoords shape"+str(gpscoords.shape)+"\n\n")
    longdistmtx = longmtx.reshape(numlong, numlat,
                                  1) - gpscoords[:, 1].reshape(1, 1, numgps)
    latdistmtx = latmtx.reshape(numlong, numlat, 1) - gpscoords[:, 0].reshape(
        1, 1, numgps)
    distmtx = np.sqrt((longdistmtx * meters_per_degree_longitude)**2.0 +
                      (latdistmtx * meters_per_degree_latitude)**2.0)

    #for latcnt in range(latbase.shape[0]):
    #    sys.stderr.write("latcnt=%d/%d\n" % (latcnt,latbase.shape[0]))
    #    for longcnt in range(longbase.shape[0]):
    #
    #        distmtx[latcnt,longcnt,:] = [ great_circle((latbase[latcnt],longbase[longcnt]),gpscoord).meters for gpscoord in gpscoords ]
    #
    #        pass
    #    pass
    # Weight accourding to Gaussian basis funtion
    #Weights=(1.0/(windowwidth_meters_float*np.sqrt(2*np.pi)))*np.exp(-distmtx**2.0/(2.0*windowwidth_meters_float**2.0))
    # Weight accourding to Exponential basis funtion
    Weights = (1.0 / (windowwidth_meters_float * np.sqrt(2 * np.pi))) * np.exp(
        -np.abs(distmtx) / (2.0 * windowwidth_meters_float))
    # Weights axis: 0: latcnt
    #               1: longcnt
    #               2: logentry

    #sys.stderr.write("Weights=%s\n" % (str(Weights)))
    #sys.stderr.write("distmtx=%s\n" % (str(distmtx)))
    #sys.stderr.write("Shortest distance=%f meters\n" % (np.min(distmtx)))
    # sum Weight*signal amplitudes... and divide by total weight for each pixel
    amplmtx = np.tensordot(Weights, SignalAmplitudes,
                           (2, 0)) / np.sum(Weights, 2)

    #outpng="%s_9.0f%fl%fr%fb%ft%f.png" % (os.path.splitext(os.path.split(_xmldoc.filehref.getpath())[1])[0],hzlow+hzstep*freqidx,leftlong.value(),rightlong.value(),botlat.value(),toplat.value())

    outpng = "%s_9.0f%fl%fr%fb%ft%f.png" % (
        posixpath.splitext(_xmldoc.filehref.get_bare_unquoted_filename())[0],
        hzlow + hzstep * freqidx, leftlong_float, rightlong_float,
        botlat_float, toplat_float)

    #outpng_path=os.path.join(os.path.split(rflogpath)[0],outpng)
    outpng_href = hrefvalue(outpng, contexthref=_dest_href)

    #outpng_href=hrefvalue(urllib.pathname2url(outpng_path),contextdir=_xmldoc.getcontextdir())

    outkml = "%s_9.0f%fl%fr%fb%ft%f.kml" % (
        os.path.splitext(os.path.split(
            _xmldoc.filehref.getpath())[1])[0], hzlow + hzstep * freqidx,
        leftlong_float, rightlong_float, botlat_float, toplat_float)
    outkml_href = hrefvalue(
        outkml, _dest_href)  #os.path.join(os.path.split(rflogpath)[0],outkml)

    PILimg = scipy.misc.toimage(amplmtx.transpose(),
                                cmin=amplmtx.min(),
                                cmax=amplmtx.max())

    infotext = "min=%3.0f dB; max=%3.0f dB f=%.1f MHz" % (
        amplmtx.min(), amplmtx.max(), frequency_float / 1.e6)
    draw = PIL.ImageDraw.Draw(PILimg)
    draw.text((0, 0), infotext, 255,
              font=PIL.ImageFont.load_default())  #(255,255,255)
    draw.text((0, 18), infotext, 0, font=PIL.ImageFont.load_default())

    PILimg.save(outpng_href.getpath())

    # Generate kml file
    rflogs = _xmldoc.xpath("rflog")  # extract all rflog entries
    kmlgpscoordinates = []  # list of coordinate strings
    for rflog in rflogs:
        rflogentries = _xmldoc.xpathcontext(
            rflog, "mergedspectrumlog[gpscoords]"
        )  # extract all log entries with gps coordinates
        rflogentry_gpscoord_strs = _xmldoc.xpathcontextnumpystr(
            rflogentries, "gpscoords")

        # create list of gps coordinates of log entries
        rflogentry_gpscoords = np.array([
            ast.literal_eval(gpscoord_str)
            for gpscoord_str in rflogentry_gpscoord_strs
        ],
                                        dtype='d')

        kmlgpscoordinates.append("\n".join([
            "%.12f,%.12f,0.0" % (gpscoord[1], gpscoord[0])
            for gpscoord in rflogentry_gpscoords
        ]))
        pass

    kmlgpstracks = ""
    for kmlgpscoordstr in kmlgpscoordinates:
        kmlgpstracks += r"""
    <Placemark>
      <name>GPS Track</name>
      <description>GPS Track</description>
      <styleUrl>#trackstyle</styleUrl>
      <LineString>
        <altitudeMode>clampToGround</altitudeMode>
        <coordinates>%s
        </coordinates>
      </LineString>
    </Placemark>
""" % (kmlgpscoordstr)
        pass
        #kmlgpscoordinates="\n".join(["%.12f,%.12f,0.0" % (gpscoord[1],gpscoord[0]) for gpscoord in gpscoords])  # kmls take longitude, latitude, altitude
    outkmfh = open(outkml_href.getpath(), "w")
    outkmfh.write(r"""<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
  <Folder>
    <name>%s</name>
    <description>%s</description>
    <Style id="trackstyle">
      <LineStyle>
        <color>ff0000ff</color>
        <width>3</width>
      </LineStyle>
    </Style>
    <GroundOverlay>
      <name>%f Hz</name>
      <description>%f Hz</description>
      <color>7f00ff00</color> <!-- ABGR: alpha 7f blue 00 green ff red 00 -->
      <Icon>
        <href>%s</href>
      </Icon>
      <LatLonBox>
        <north>%.15f</north>
        <south>%.15f</south>
        <east>%.15f</east>
        <west>%.15f</west>
      </LatLonBox>
    </GroundOverlay>
    %s
  </Folder>
    </kml>""" % (infotext, infotext, hzlow + hzstep * freqidx,
                 hzlow + hzstep * freqidx,
                 outpng_href.attempt_relative_url(outkml_href), toplat_float,
                 botlat_float, rightlong_float, leftlong_float, kmlgpstracks))
    outkmfh.close()

    metadata = {
        "frequency": str(hzlow + hzstep * freqidx),
        "leftlong": str(leftlong_float),
        "rightlong": str(rightlong_float),
        "botlat": str(botlat_float),
        "toplat": str(toplat_float)
    }

    return {
        "amplitudematrix": (metadata, repr(amplmtx)),
        "imagemap": (metadata, outpng_href),
        "kmlmap": (metadata, outkml_href)
    }
def traverse_one(infiles,
                 infileobj,
                 pending,
                 completed,
                 dests,
                 hrefs,
                 recursive=False,
                 include_processed=True,
                 repository_root=None,
                 ignore_locking=False):
    # go through infile, searching for links

    assert (infileobj.href in pending)

    if infileobj.ftype == infileobj.IFT_OTHERUNK:
        pending.remove(infileobj.href)
        completed.add(infileobj.href)
        return  # cannot contain links

    infileobj.xmldocu.lock_ro()

    try:
        # print("traverse_one: ftype=%d" % (infileobj.ftype))
        if infileobj.ftype == infileobj.IFT_XLG:
            # .XLG file has implicit link to its .XLP file
            barefilename = infileobj.href.get_bare_unquoted_filename()
            (barename, ext) = posixpath.splitext(barefilename)
            if ext == ".xlg" and include_processed:
                xlpfile = barename + ".xlp"
                xlphref = dc_value.hrefvalue(quote(xlpfile),
                                             contexthref=infileobj.href)
                if hrefs is not None:
                    hrefs.add(xlphref)
                    pass
                if recursive:
                    add_to_traverse(repository_root,
                                    infiles,
                                    pending,
                                    completed,
                                    xlphref,
                                    ignore_locking=ignore_locking)
                    pass
                pass
            pass

        if infileobj.ftype == infileobj.IFT_XLG or (
                infileobj.ftype == infileobj.IFT_XLP and include_processed):
            # XLG and XLP files can have dest references
            # and we are tracking those
            # print("got xlg or xlp. infileobj.href=%s" % (infileobj.href.humanurl()))
            desttags = infileobj.xmldocu.xpath("//dc:dest[@xlink:href]")
            for desttag in desttags:
                #print("got desttag!")
                desthref = dc_value.hrefvalue.fromxml(infileobj.xmldocu,
                                                      desttag)
                if check_inside_root(
                        repository_root, desthref
                ):  # we don't add hrefs outside the specified root
                    dests.add(desthref)
                    pass
                pass
            pass

        if infileobj.ftype == infileobj.IFT_PRX:
            # .PRX file has implicit links to its input and output files

            # ... We follow links to .xlp files whether or not the recursive flag is set as long as we are doing include_processed

            (prx_inputfiles_element,
             prx_inputfiles_with_hrefs) = processtrak_common.getinputfiles(
                 infileobj.xmldocu)
            prx_outputdict = processtrak_common.build_outputdict(
                infileobj.xmldocu, prx_inputfiles_with_hrefs, ignore_locking)

            for prx_inputfile_href in prx_outputdict:
                if hrefs is not None:
                    if check_inside_root(
                            repository_root, prx_inputfile_href
                    ):  # we don't add hrefs outside the specified root
                        hrefs.add(prx_inputfile_href.fragless())
                        if include_processed:
                            if check_inside_root(
                                    repository_root,
                                    prx_outputdict[prx_inputfile_href].
                                    outputfilehref.fragless()
                            ):  # we don't add hrefs outside the specified root
                                hrefs.add(prx_outputdict[prx_inputfile_href].
                                          outputfilehref.fragless())
                                pass
                            pass
                        pass
                    pass

                if recursive:
                    add_to_traverse(repository_root,
                                    infiles,
                                    pending,
                                    completed,
                                    prx_inputfile_href.fragless(),
                                    ignore_locking=ignore_locking)
                    pass

                # follow link to output whether or not recursive is set
                if include_processed:
                    add_to_traverse(repository_root,
                                    infiles,
                                    pending,
                                    completed,
                                    prx_outputdict[prx_inputfile_href].
                                    outputfilehref.fragless(),
                                    ignore_locking=ignore_locking)
                    pass

                pass
            pass

        # Now go through all explicit links if we need hrefs
        #   ... unless we not including processed output and this is an .xlp file
        if (hrefs is not None
                or recursive) and (include_processed
                                   or infileobj.ftype != infileobj.IFT_XLP):
            if include_processed:
                all_links = infileobj.xmldocu.xpath("//*[@xlink:href]")
                pass
            else:
                all_links = infileobj.xmldocu.xpath(
                    "//*[not(self::prx:outputfile) and @xlink:href]",
                    namespaces={
                        "prx":
                        "http://limatix.org/processtrak/processinginstructions"
                    })
                pass

            for link in all_links:
                href = dc_value.hrefvalue.fromxml(infileobj.xmldocu,
                                                  link).fragless()
                if href.ismem():
                    continue  # ignore mem:// hrefs
                if check_inside_root(repository_root, href):
                    if hrefs is not None:
                        hrefs.add(href)
                        pass
                    if recursive:
                        add_to_traverse(repository_root,
                                        infiles,
                                        pending,
                                        completed,
                                        href,
                                        ignore_locking=ignore_locking)
                        pass
                    pass
                pass
            pass
        pass

    finally:
        infileobj.xmldocu.unlock_ro()
        pass

    pending.remove(infileobj.href)
    completed.add(infileobj.href)

    pass
Exemple #17
0
def add(args):
    argc = 0
    positionals = []
    all = False
    dryrun = False
    ignore_locking = False

    while argc < len(args):
        arg = args[argc]

        if arg == '-a':
            all = True
            pass
        elif arg == '-h' or arg == "--help":
            add_usage()
            sys.exit(0)
        elif arg == "--dry-run":
            dryrun = True
            pass
        elif arg == "--ignore-locking":
            ignore_locking = True
            pass
        elif arg.startswith('-'):
            raise ValueError("Unknown parameter: \"%s\"" % (arg))
        else:
            positionals.append(arg)
            pass
        argc += 1
        pass

    repo = Repo(".", search_parent_directories=True)

    (rootpath, cdup, prefix) = git_dir_context(repo)

    if "processed" in repo.active_branch.name:
        sys.stderr.write(
            "Will not add raw input files/scripts/etc. to processed\nbranch.\nSwitch branches with \"git checkout\" first!\n"
        )
        sys.exit(1)
        pass

    to_consider = [
        os.path.join(prefix, positional) for positional in positionals
    ]

    if all:
        autofound_files = find_recursive_xlg_prx_py(cdup)

        to_consider.extend(autofound_files)

        pass

    # fixup e.g. './filename.xlg' into 'filename.xlg' to avoid inconsistent references
    pathname_fixup = [
        input_file_name if os.path.split(input_file_name)[0] != '.' else
        os.path.split(input_file_name)[1] for input_file_name in to_consider
    ]

    input_file_hrefs = [
        dc_value.hrefvalue(pathname2url(input_file_name), contexthref=".")
        for input_file_name in pathname_fixup
    ]

    #import pdb
    #pdb.set_trace()

    (unprocessedpaths, xlppaths) = get_unprocessed(input_file_hrefs, cdup,
                                                   ignore_locking)

    print("Adding paths for commit:")
    for unprocessedpath in unprocessedpaths:
        print("   %s" % (unprocessedpath))
        pass
    print(" ")
    if not dryrun:
        # If we add too many paths in one step,
        # we get an 'argument list too long'
        #repo.git.add(unprocessedpaths)

        for pos in range((len(unprocessedpaths) + 9) // 10):
            repo.git.add(unprocessedpaths[(pos * 10):((pos * 10) + 10)])
            pass
        pass

    if len(xlppaths) > 0:
        print("Omitted processed output:")
        for xlppath in xlppaths:
            print("   %s" % (xlppath))
            pass
        pass

    print("\nNow run \"git commit\"")
    pass
Exemple #18
0
def add_processed(args):
    argc = 0
    positionals = []
    all = False
    dryrun = False
    ignore_locking = False

    while argc < len(args):
        arg = args[argc]

        if arg == '-a':
            all = True
            pass
        elif arg == '-h' or arg == "--help":
            add_processed_usage()
            sys.exit(0)
        elif arg == "--dry-run":
            dryrun = True
            pass
        elif arg == "--ignore-locking":
            ignore_locking = True
            pass
        elif arg.startswith('-'):
            raise ValueError("Unknown parameter: \"%s\"" % (arg))
        else:
            positionals.append(arg)
            pass
        argc += 1
        pass

    repo = Repo(".", search_parent_directories=True)

    (rootpath, cdup, prefix) = git_dir_context(repo)

    to_consider = [
        os.path.join(prefix, positional) for positional in positionals
    ]

    autofound_files = find_recursive_xlg_prx_py(cdup)
    to_consider_unprocessed = to_consider + autofound_files

    if all:
        to_consider.extend(autofound_files)
        pass

    # fixup e.g. './filename.xlg' into 'filename.xlg' to avoid inconsistent references
    to_consider_pathname_fixup = [
        input_file_name if os.path.split(input_file_name)[0] != '.' else
        os.path.split(input_file_name)[1] for input_file_name in to_consider
    ]
    to_consider_unprocessed_pathname_fixup = [
        input_file_name if os.path.split(input_file_name)[0] != '.' else
        os.path.split(input_file_name)[1]
        for input_file_name in to_consider_unprocessed
    ]

    input_file_hrefs_unprocessed = [
        dc_value.hrefvalue(
            pathname2url(input_file_name),
            contexthref=dc_value.hrefvalue(pathname2url(cdup) + '/'))
        for input_file_name in to_consider_unprocessed_pathname_fixup
    ]

    input_file_hrefs = [
        dc_value.hrefvalue(
            pathname2url(input_file_name),
            contexthref=dc_value.hrefvalue(pathname2url(cdup) + '/'))
        for input_file_name in to_consider_pathname_fixup
    ]

    (unprocessedpaths,
     xlppaths) = get_unprocessed(input_file_hrefs_unprocessed, cdup,
                                 ignore_locking)

    # Check that all unprocessedpaths are unmodified
    unprocessedpaths_fixup = [
        input_file_name if os.path.split(input_file_name)[0] != '.' else
        os.path.split(input_file_name)[1]
        for input_file_name in unprocessedpaths
    ]
    if len(unprocessedpaths_fixup) > 0:

        modified_unprocessed = repo.index.diff(None,
                                               paths=unprocessedpaths_fixup)
        if len(modified_unprocessed) > 0:
            sys.stderr.write("Modifed raw input files present:\n")
            for diff in modified_unprocessed:
                fname = diff.a_path
                sys.stderr.write("   %s\n" % (fname))
                pass
            sys.stderr.write(
                "\nAdd these to non-processed branch with git checkout <unprocessed_branch>;limatix-git add -a;git commit\n"
            )
            sys.exit(1)
            pass

        # Check for unprocessedabspaths that match untracked files.
        # Sort paths out by filename so that we can relatively
        # quickly match them with samepath()
        untracked_byname = {}
        for untracked in repo.untracked_files:
            untracked_fname = os.path.split(untracked)[1]
            if not untracked_fname in untracked_byname:
                untracked_byname[untracked_fname] = []
                pass
            untracked_byname[untracked_fname].append(
                (untracked, os.path.join(rootpath, untracked)))
            pass

        unprocessed_byname = {}
        for unprocessed in unprocessedpaths_fixup:
            unprocessed_fname = os.path.split(unprocessed)[1]
            if not unprocessed_fname in unprocessed_byname:
                unprocessed_byname[unprocessed_fname] = []
                pass
            unprocessed_byname[unprocessed_fname].append(unprocessed)
            pass
        # Find matches between the two
        commonnames = frozenset(untracked_byname.keys()).intersection(
            frozenset(unprocessed_byname.keys()))

        untracked_unprocessed = []
        for commonname in commonnames:
            for (untracked,
                 untracked_fullpath) in untracked_byname[commonname]:
                for unprocessed in unprocessed_byname[commonname]:
                    if (os.path.samefile(untracked_fullpath, unprocessed)):
                        if len(untracked_unprocessed) == 0:
                            sys.stderr.write(
                                "Untracked raw input files present:\n")
                            pass
                        untracked_unprocessed.append(untracked)
                        sys.stderr.write("   %s\n" % (untracked))
                        pass
                    pass
                pass
            pass
        if len(untracked_unprocessed) > 0:
            sys.stderr.write(
                "\nAdd these to non-processed branch with git checkout <unprocessed_branch>;limatix-git add -a;git commit\n"
            )
            sys.exit(0)

        pass

    if not "processed" in repo.active_branch.name:
        sys.stderr.write(
            "Will not add processed output to\nbranch without \"processed\" in its name.\nSwitch to a different branch with \"git checkout\" or Create a\nnew branch with \"git checkout -b\" to store\nprocessed output first!\n"
        )
        sys.exit(1)
        pass

    processedpaths = get_processed(input_file_hrefs_unprocessed,
                                   input_file_hrefs, cdup, ignore_locking)

    print("Adding paths for commit:")
    for processedpath in processedpaths:
        print("   %s" % (processedpath))
        pass
    print(" ")
    if not dryrun:
        repo.git.add(processedpaths)
        pass
    print("\nNow run \"git commit\"")
    pass
def rununlocked(_dest_href,
                dc_dgsfile_href,
                dc_density_numericunits,
                dc_specificheat_numericunits,
                dc_alphaz_numericunits,
                dc_alphaxy_numericunits,
                dc_nominal_lamina_thickness_numericunits,
                dc_lamina_thickness_numericunits,
                dc_numlayers_numericunits,
                dc_inversion_tile_size_y_numericunits,
                dc_inversion_tile_size_x_numericunits,
                dc_inversion_channel_str,
                dc_inversion_startframe_numericunits,
                dc_flashtime_numericunits,
                dc_inversion_reflectors_str,
                xydownsample_numericunits,
                tikparam_numericunits,
                dc_cadmodel_channel_str,
                dc_scalefactor_x_numericunits=dc_value.numericunitsvalue(
                    1.0, "Unitless"),
                dc_scalefactor_y_numericunits=dc_value.numericunitsvalue(
                    1.0, "Unitless"),
                dc_numplotrows_int=3,
                dc_numplotcols_int=4,
                do_singlestep_bool=True,
                dc_holesadjusted_xmltree=None,
                dc_source_approx_dx_numericunits=None,
                dc_source_approx_dy_numericunits=None):

    tikparam = tikparam_numericunits.value()

    dc_prefix_str = "greensinversion_"

    reslist = []

    if tikparam == 0.0:
        tikparam = None  # 0 and disabled are equivalent
        pass

    #rho=float(1.555e3) # kg/m^3
    #c=float(850.0) # J/(kg* deg K)

    rho = dc_density_numericunits.value('kg/m^3')
    c = dc_specificheat_numericunits.value('J/(kg*K)')

    # alpha units are m^2/s
    #alphaz=float(.54e-6) # average value from measurements (Thermal_Properties.ods 11/25/15, averaging in-plane value from 90deg specimen and flash method values)
    alphaz = dc_alphaz_numericunits.value('m^2/s')

    #alphaxy=float(3.00e-6) # best evaluation based on Thermal_Properties.ods 3/19/16 based on 0/90 and quasi-isotropic layups
    alphaxy = dc_alphaxy_numericunits.value('m^2/s')

    # Lamina thickness based on thermal_properties.ods average thickness of 8.05 mm for 3(?) layers of 16 plies
    # nominal_lamina_thickness=8.05e-3/(3.0*16.0)
    nominal_lamina_thickness = dc_nominal_lamina_thickness_numericunits.value(
        'm')

    # Load input file
    # NOTE: When changing input file:
    #  1. Verify flashtime. Adjust as appropriate
    #  2. Verify startframe. Adjust as appropriate
    #  3. Execute file load code (below) and evaluate
    #    a) XStepMeters (must match dx)
    #    b) YStepMeters (must match dy)
    #    c) TStep (must match dt)
    #    d) bases[2][startframe]-flashtrigtime  (must match t0)
    #    e) bases[2][startframe:endframe].shape[0] (must match nt)
    #  4. Adjust dx, dy, dt, t0, and/or nt to satisfy above criteria
    #  5. Once adjusted, assert()s below should pass.

    inputfile = dc_dgsfile_href.getpath(
    )  # was "/tmp/CA-1_Bottom_2015_11_19_undistorted_orthographic.dgs"
    (inputfile_basename, inputfile_ext) = posixpath.splitext(
        dc_dgsfile_href.get_bare_unquoted_filename())

    if inputfile_ext == ".bz2" or inputfile_ext == ".gz":  # .dgs.bz2 or .dgs.gz
        orig_inputfile_basename = inputfile_basename
        inputfile_basename = posixpath.splitext(orig_inputfile_basename)[0]
        inputfile_ext = posixpath.splitext(
            orig_inputfile_basename)[1] + inputfile_ext
        pass
    #flashtrigtime=0.2 # seconds -- from pequod system
    #flashtime=flashtrigtime+1.0/100.0 # add 1/100th second delay of flash peak (wild guess!)
    flashtime = dc_flashtime_numericunits.value('s')

    #channel="DiffStack"
    channel = dc_inversion_channel_str
    # frame #165: Time relative to trigger = bases[2][165]-flashtrigtime
    #                                      = 0.052869999999999973
    #startframe=13  # zero-based, not one-based
    startframe = int(
        round(dc_inversion_startframe_numericunits.value('unitless')))

    (junkmd, wfmdict) = dgf.loadsnapshot(inputfile, memmapok=True)

    channel3d = "Proj" + dc_inversion_channel_str[:
                                                  -4]  # Proj + diffstack channel with _tex stripped
    objframe = coordframe()
    (obj,
     TexChanPrefix) = ndepart_from_dataguzzler_wfm(wfmdict[channel3d], wfmdict,
                                                   objframe)

    channel_weights = channel + "_weights"
    if channel_weights not in wfmdict:
        channel_weights = None
        pass

    (ndim, DimLen, IniVal, Step, bases) = dg_eval.geom(wfmdict[channel],
                                                       raw=True)
    (ndim, Coord, Units, AmplCoord, AmplUnits) = dg_eval.axes(wfmdict[channel],
                                                              raw=True)
    XIniValMeters = dc_value.numericunitsvalue(IniVal[0], Units[0]).value('m')
    YIniValMeters = dc_value.numericunitsvalue(IniVal[1], Units[1]).value('m')

    # Apply scaling factor to XStepMeters (note that Coord, above, is not corrected!!!)
    XStepMeters = dc_value.numericunitsvalue(
        Step[0], Units[0]).value('m') * dc_scalefactor_x_numericunits.value()
    YStepMeters = dc_value.numericunitsvalue(
        Step[1], Units[1]).value('m') * dc_scalefactor_y_numericunits.value()
    TStep = Step[2]

    (saturation_fraction, saturation_map) = greensinversion.saturationcheck(
        wfmdict[channel].data.transpose((2, 1, 0)), startframe)
    if saturation_fraction > .2:
        raise ValueError(
            "greensinversionstep: ERROR: %.1f%% of pixels are saturated at least once beyond start frame!"
            % (saturation_fraction * 100.0))
    if saturation_fraction > .02:
        sys.stderr.write(
            "greensinversionstep: WARNING: %.1f%% of pixels are saturated at least once beyond start frame!\n"
            % (saturation_fraction * 100.0))
        pass

    # Apply spatial downsampling to keep inversion complexity under control
    #xydownsample=2

    xydownsample = int(round(xydownsample_numericunits.value("unitless")))

    # reflectors is a tuple of (z,ny,nx) tuples representing
    # possible z values for reflectors and how many y and x pieces
    # they should be split into.
    # it should be ordered from the back surface towards the
    # front surface.

    # reflectors is (depth, reflector_ny,reflector_nx)

    # # need pre-calculation of z_bnd to determine reflectors
    # z_bnd=np.arange(nz+1,dtype='d')*dz  # z boundary starts at zero

    # reflectors=( (z_bnd[15],4,4),
    #              (z_bnd[9],4,4),
    #              (z_bnd[5],6,6),
    #              (z_bnd[2],10,10))

    reflectors_float = ast.literal_eval(dc_inversion_reflectors_str)

    # reflectors can just be reflectors_float but this is here to avoid
    # some temporary recalculations 3/29/16
    reflectors = tuple([(np.float64(reflector[0]), reflector[1], reflector[2])
                        for reflector in reflectors_float])
    deepest_tstar = reflectors[0][0]**2.0 / (np.pi * alphaz)

    endframe = np.argmin(
        np.abs(bases[2] - flashtime - deepest_tstar * 2.0)
    )  # see also generateinversionsteps() call to timelimitmatrix()

    # step sizes for inversion
    dx = XStepMeters * 1.0 * xydownsample
    dy = YStepMeters * 1.0 * xydownsample
    dt = TStep
    t0 = bases[2][startframe] - flashtime
    nt = bases[2][startframe:endframe].shape[0]

    dz = nominal_lamina_thickness  # use nominal value so we don't recalculate everything for each sample

    # These now satisfied by definition
    #assert(XStepMeters==dx)
    #assert(YStepMeters==dy)
    #assert(TStep==dt)
    #assert(bases[2][startframe]-flashtrigtime==t0)  # Start time matches  NOTE.... CHANGED FROM flashtrigtime to flashtime
    #assert(bases[2][startframe:].shape[0]==nt) # Number of frames match

    # These are parameters for the reconstruction, not the expermental data

    #nz=16   # NOTE: nz*dz should match specimen thickness
    nz = int(round(dc_numlayers_numericunits.value('unitless')))

    # size of each tile for tiled inversion
    #maxy=38.0e-3
    #maxx=36.0e-3
    maxy = dc_inversion_tile_size_y_numericunits.value('m')
    maxx = dc_inversion_tile_size_x_numericunits.value('m')

    source_approx_dy = None
    source_approx_dx = None

    if dc_source_approx_dy_numericunits is not None:
        source_approx_dy = dc_source_approx_dy_numericunits.value('m')
        pass

    if dc_source_approx_dx_numericunits is not None:
        source_approx_dx = dc_source_approx_dx_numericunits.value('m')
        pass

    greensconvolution_params = read_greensconvolution()

    greensconvolution_params.get_opencl_context("GPU", None)

    #(kx,ky,kz,
    # ny,nx,
    # z,y,x,
    # zgrid,ygrid,xgrid,
    # z_bnd,y_bnd,x_bnd,
    # flashsourcevecs,
    # reflectorsourcevecs,
    # depths,tstars,
    # conditions,prevconditions,prevscaledconditions,
    # rowselects,
    # inversions,
    # inversionsfull,
    # inverses,
    # nresults,
    # ss_rowselects,
    # ss_inversions,
    # ss_inversionsfull,
    # ss_inverses,
    # ss_nresults)=greensinversion.greensinversion_lookup(cache_dir,rho,c,alphaz,alphaxy,dz,dy,dx,nz,maxy,maxx,t0,dt,nt,reflectors)

    kx = alphaxy * rho * c
    ky = alphaxy * rho * c
    kz = alphaz * rho * c

    trange = t0 + np.arange(nt, dtype='d') * dt

    gi_params = (rho, c, alphaz, alphaxy, dy, dx, maxy, maxx, t0, dt, nt,
                 reflectors, trange, greensconvolution_params)

    flat_gi_grid = build_gi_grid(dy, maxy, dx, maxx)
    (ny, nx, y, x, ygrid, xgrid, y_bnd, x_bnd) = flat_gi_grid

    num_sources_y = 2
    num_sources_x = 2

    if source_approx_dy is not None or source_approx_dx is not None:
        (num_sources_y,
         num_sources_x) = greensinversion.num_sources(y, x, y_bnd, x_bnd,
                                                      source_approx_dy,
                                                      source_approx_dx)
        pass

    # can view individual source maps with
    # reflectorsourcevecs[:,0].reshape(ny,nx,nt),
    # e.g. imshow(reflectorsourcevecs[:,5].reshape(ny,nx,nt)[:,:,200])

    #pl.figure(1)
    #pl.clf()
    #pl.imshow(reflectorsourcevecs[0][:,5].reshape(ny,nx,nt)[:,:,200])

    #pl.figure(2)
    #pl.clf()
    #pl.imshow(reflectorsourcevecs[1][:,5].reshape(ny,nx,nt)[:,:,200])

    # To plot:
    # loglog(trange+dt/2,T[20,20,:])
    # imshow(T[:,:,200]

    # Break object into tiles, perform inversion on each tile

    (minyminx_corners, yranges, xranges,
     contributionprofiles) = greensinversion.build_tiled_rectangle(
         ny, nx, dy, dx, reflectors, wfmdict[channel].data.transpose(
             (2, 1, 0)), xydownsample)

    inputmats = [
        wfmdict[channel].data[(xidx *
                               xydownsample):((xidx + nx) *
                                              xydownsample):xydownsample,
                              (yidx *
                               xydownsample):((yidx + ny) *
                                              xydownsample):xydownsample,
                              startframe:endframe].transpose((2, 1, 0))
        for (yidx, xidx) in minyminx_corners
    ]  # transpose to convert dataguzzler axis ordering (x,y,t) to greensinversion ordering (t,y,x)

    print("Filling holes...")
    inputmats_holesfilled = [
        greensinversion.fillholes.fillholes_flat(inputmat)
        for inputmat in inputmats
    ]
    print("Done filling holes.")

    parallelevaluate = False  # GPU is currently slightly SLOWER here (WHY?) so we don't use it
    if parallelevaluate:
        inversionevalfunc = greensinversion.inversion.parallelperforminversionsteps
        OpenCL_CTX = greensconvolution_params.get_opencl_context(
        )  #greensinversion.inversion.Get_OpenCL_Context()
        pass
    else:
        inversionevalfunc = greensinversion.inversion.serialperforminversionsteps
        OpenCL_CTX = None
        pass

    print("Evaluating curvatures")
    hires_factor = 2
    curvmat = obj.implpart.surfaces[
        0].intrinsicparameterization.interpolate_curvature(
            obj.implpart.surfaces[0],
            wfmdict[channel].data.shape[1] / xydownsample,
            wfmdict[channel].data.shape[0] / xydownsample)

    # curvmat is uv_channame ny x nx x 2x2 matrix representing the shape operator

    curvmat_hires = obj.implpart.surfaces[
        0].intrinsicparameterization.interpolate_curvature(
            obj.implpart.surfaces[0],
            wfmdict[channel].data.shape[1] * hires_factor // xydownsample,
            wfmdict[channel].data.shape[0] * hires_factor // xydownsample)

    # Set unknown curvatures to zero
    curvmat[np.isnan(curvmat)] = 0.0
    curvmat_hires[np.isnan(curvmat_hires)] = 0.0

    # These are only nominal physical sizes (in terms of nominal dx and dy of parameterization)
    curvmat_sizex = dx * wfmdict[channel].data.shape[0] / xydownsample
    curvmat_sizey = dy * wfmdict[channel].data.shape[1] / xydownsample

    print("Determining maximum principal curvatures")
    #maxabs_princcurvs = np.max(np.abs(eigvals_broadcast_nans(curvmat)),2)
    maxabs_princcurvs = np.max(np.abs(fast2x2evals(curvmat)), 2)

    print("Evaluating step sizes")
    stepsizemat = obj.implpart.surfaces[
        0].intrinsicparameterization.interpolate_stepsizes(
            obj.implpart.surfaces[0],
            wfmdict[channel].data.shape[1] // xydownsample,
            wfmdict[channel].data.shape[0] // xydownsample)

    stepsizemat_hires = obj.implpart.surfaces[
        0].intrinsicparameterization.interpolate_stepsizes(
            obj.implpart.surfaces[0],
            wfmdict[channel].data.shape[1] * hires_factor // xydownsample,
            wfmdict[channel].data.shape[0] * hires_factor // xydownsample)

    # Fill in invalid stepsizes sith dx,dy
    ssm_xy_nelem = stepsizemat.shape[0] * stepsizemat.shape[1]
    ssm_nan_dx = np.isnan(stepsizemat.reshape(ssm_xy_nelem, 2)[:, 0])
    stepsizemat.reshape(ssm_xy_nelem, 2)[ssm_nan_dx, 0] = dx
    ssm_nan_dy = np.isnan(stepsizemat.reshape(ssm_xy_nelem, 2)[:, 1])
    stepsizemat.reshape(ssm_xy_nelem, 2)[ssm_nan_dx, 1] = dy

    ssm_hires_xy_nelem = stepsizemat_hires.shape[0] * stepsizemat_hires.shape[1]
    ssm_hires_nan_dx = np.isnan(
        stepsizemat_hires.reshape(ssm_hires_xy_nelem, 2)[:, 0])
    stepsizemat_hires.reshape(ssm_hires_xy_nelem, 2)[ssm_hires_nan_dx,
                                                     0] = dx / hires_factor
    ssm_hires_nan_dy = np.isnan(
        stepsizemat_hires.reshape(ssm_hires_xy_nelem, 2)[:, 1])
    stepsizemat_hires.reshape(ssm_hires_xy_nelem, 2)[ssm_hires_nan_dy,
                                                     1] = dy / hires_factor

    minimal_curvature = maxabs_princcurvs < 1.0 / (20 * reflectors[0][0])
    nominal_scaling = (
        (np.abs((stepsizemat[:, :, 0] - dx) / dx) < 0.05)
        &  # less than 5% scaling error using nominal scaling factors
        (np.abs((stepsizemat[:, :, 1] - dy) / dy) < 0.05))

    use_flat = minimal_curvature & nominal_scaling

    # scaled tikparam
    #raise ValueError("foo!")

    #z_reference=reflectors[-1][0]  # z coordinate of shallowest reflectors (recall reflectors are deepest first)
    #scaledtikparams=greensinversion.scale_tikparam(tikparam,z_reference,reflectors)

    #if tikparam is not None:
    #    # tikparam scaled diagnostic plot (multi-step)
    #    pl.figure(nextfignum)
    #    pl.clf()
    #    for inversioncnt in range(len(inversions)):
    #        pl.plot(inverses[inversioncnt][1] * (tikparam/scaledtikparams[inversioncnt])) #  * z_values[inversioncnt]/z_reference)
    #        pass
    #        pl.xlabel('Scaled singular value index')
    #        pl.ylabel('Magnitude')
    #        nextfignum+=1
    #    pass

    fullinverse = np.zeros(
        (len(reflectors) + 1, wfmdict[channel].data.shape[1] // xydownsample,
         wfmdict[channel].data.shape[0] // xydownsample),
        dtype='d')
    fullinverse_x_bnd = IniVal[0] - Step[0] * xydownsample / 2.0 + np.arange(
        DimLen[0] // xydownsample + 1, dtype='d') * Step[0] * xydownsample
    fullinverse_y_bnd = IniVal[1] - Step[1] * xydownsample / 2.0 + np.arange(
        DimLen[1] // xydownsample + 1, dtype='d') * Step[1] * xydownsample

    flat_tile = [
        use_flat[yidx:(yidx + ny), xidx:(xidx + nx)].all()
        for (yidx, xidx) in minyminx_corners
    ]
    valid_tile = [
        not ((np.isnan(curvmat[yidx:(yidx + ny),
                               xidx:(xidx + nx), :, :])).any())
        for (yidx, xidx) in minyminx_corners
    ]

    if channel_weights is None:
        # Assume all tiles have nonzero weights
        weighted_tile = [True for (yidx, xidx) in minyminx_corners]
        pass
    else:
        # True only for tiles with a non-zero weight

        weights_data = wfmdict[
            channel_weights].data[::xydownsample, ::xydownsample].T
        weighted_tile = [
            (weights_data[yidx:(yidx + ny), xidx:(xidx + nx)] > 0.0).any()
            for (yidx, xidx) in minyminx_corners
        ]

        pass

    #eval_linelength_avgcurvature_mirroredbox = lambda boxu1,boxv1,boxu2,boxv2,u1,v1,u2,v2: obj.implpart.surfaces[0].intrinsicparameterization.linelength_avgcurvature_mirroredbox_meshbased(obj.implpart.surfaces[0],curvmat_hires,stepsizemat_hires,obj.implpart.surfaces[0].intrinsicparameterization.lowerleft_meaningfulunits[0],obj.implpart.surfaces[0].intrinsicparameterization.lowerleft_meaningfulunits[1],curvmat_sizex*1.0/curvmat_hires.shape[1],curvmat_sizey*1.0/curvmat_hires.shape[0],boxu1,boxv1,boxu2,boxv2,dx,dy,u1,v1,u2,v2)
    #boxu1=-0.000447803
    #boxv1=-0.000447803
    #boxu2=0.021643787
    #boxv2=0.023434997
    #u1=-0.00014926799999999998
    #v1=-0.00014926799999999998
    #u2=-0.001343408
    #v2=-0.000746338
    #if np.isnan(eval_linelength_avgcurvature_mirroredbox(boxu1,boxv1,boxu2,boxv2,u1,v1,u2,v2)).any():
    #    raise ValueError("NAN")
    #break linelength_avgcurvature_mirroredbox_meshbased_c_one

    print("Defining flat surface inversion")
    # (rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,depths,tstars,conditions,prevconditions,prevscaledconditions,rowselects,inversions,inversionsfull,inverses,nresults)
    flat_inversion = define_flat_inversion(gi_params, flat_gi_grid,
                                           num_sources_y, num_sources_x)

    if do_singlestep_bool:
        print("Generating single-step inversion")

        # should be define_flat_inversion here  probably
        (ss_rowselects, ss_inversions, ss_inversionsfull, ss_inverses,
         ss_nresults) = greensinversion.generatesinglestepinversion(
             rowscaling, flashsourcecolumnscaling, flashsourcevecs,
             reflectorcolumnscaling, reflectorsourcevecs, tstars, ny, nx,
             trange, depths)
        pass

    nextfignum = 1

    # tikparam diagnostic plots (multi-step)
    if False:
        pl.figure(nextfignum)
        pl.clf()
        for inversioncnt in range(len(inversions)):
            pl.plot(inverses[inversioncnt][1])
            pass
        pl.xlabel('Singular value index')
        pl.ylabel('Magnitude')
        nextfignum += 1

        if do_singlestep_bool:
            pl.figure(nextfignum)
            pl.clf()
            pl.plot(ss_inverses[0][1])
            pl.xlabel('Singular value index (single step)')
            pl.ylabel('Magnitude')
            nextfignum += 1
            pass
        pass

    print("Iterating over %d tiles" % (len(minyminx_corners)))

    for tile_idx in range(len(minyminx_corners)):
        (yidx, xidx) = minyminx_corners[tile_idx]

        print("Tile %d/%d" % (tile_idx, len(minyminx_corners)))

        #if tile_idx==27:
        #    raise ValueError("FOO!")

        inputmat = inputmats_holesfilled[tile_idx]

        if (flat_tile[tile_idx]
            ) or not valid_tile[tile_idx] or not weighted_tile[
                tile_idx]:  #  or tile idx > 75 or tile_idx != 27 # or tile_idx < 26 or tile_idx > 28
            (ny, nx, y, x, ygrid, xgrid, y_bnd, x_bnd) = flat_gi_grid
            (rowscaling, flashsourcecolumnscaling, flashsourcevecs,
             reflectorcolumnscaling, reflectorsourcevecs, depths, tstars,
             conditions, prevconditions, prevscaledconditions, rowselects,
             inversions, inversionsfull, inverses, nresults) = flat_inversion
            print("inverting with flat_inversion")
            pass
        elif valid_tile[tile_idx]:
            # build grid at this location
            gi_grid = build_gi_grid(dy,
                                    maxy,
                                    dx,
                                    maxx,
                                    firstcentery=IniVal[1] + yidx * dy,
                                    firstcenterx=IniVal[0] + xidx * dx)

            (ny, nx, y, x, ygrid, xgrid, y_bnd, x_bnd) = gi_grid

            try:
                (rowscaling, flashsourcecolumnscaling, flashsourcevecs,
                 reflectorcolumnscaling, reflectorsourcevecs, depths, tstars,
                 conditions, prevconditions, prevscaledconditions, rowselects,
                 inversions, inversionsfull, inverses,
                 nresults) = define_curved_inversion(
                     gi_params,
                     gi_grid,
                     obj,
                     curvmat[yidx:(yidx + ny), xidx:(xidx + nx)],
                     stepsizemat[yidx:(yidx + ny), xidx:(xidx + nx)],
                     curvmat_hires,
                     stepsizemat_hires,
                     curvmat_sizex,
                     curvmat_sizey,
                     num_sources_y=num_sources_y,
                     num_sources_x=num_sources_x)
                pass
            except NotANumberError as e:
                sys.stderr.write(
                    "WARNING: Found NAN in sourcevecs... using flat (tile idx %d; yidx=%d, xidx=%d): %s\n"
                    % (tile_idx, yidx, xidx, str(e)))
                (ny, nx, y, x, ygrid, xgrid, y_bnd, x_bnd) = flat_gi_grid
                (rowscaling, flashsourcecolumnscaling, flashsourcevecs,
                 reflectorcolumnscaling, reflectorsourcevecs, depths, tstars,
                 conditions, prevconditions, prevscaledconditions, rowselects,
                 inversions, inversionsfull, inverses,
                 nresults) = flat_inversion
                #raise #!!!
                pass

            print("inverting with curved_inversion")
            pass
        else:
            continue

        (inversioncoeffs_list, errs_list, tikparams_list) = inversionevalfunc(
            OpenCL_CTX, rowselects, inversions, inversionsfull, inverses,
            nresults, [inputmat], tikparam)

        fullinverse[:, yidx:(yidx + ny),
                    xidx:(xidx + nx)] += greensinversion.buildconcreteinverse(
                        inversioncoeffs_list[0], reflectors, ygrid, xgrid,
                        y_bnd, x_bnd, ny, nx, num_sources_y,
                        num_sources_x) * contributionprofiles[tile_idx]

        pass

    # raise ValueError("Debugging!")

    print("Performing assumed-flat inversion")
    print("Iterating over %d tiles" % (len(minyminx_corners)))
    flatfullinverse = np.zeros(
        (len(reflectors) + 1, wfmdict[channel].data.shape[1] // xydownsample,
         wfmdict[channel].data.shape[0] // xydownsample),
        dtype='d')

    for tile_idx in range(len(minyminx_corners)):
        (yidx, xidx) = minyminx_corners[tile_idx]
        print("Tile %d/%d" % (tile_idx, len(minyminx_corners)))

        inputmat = inputmats_holesfilled[tile_idx]

        (ny, nx, y, x, ygrid, xgrid, y_bnd, x_bnd) = flat_gi_grid
        (rowscaling, flashsourcecolumnscaling, flashsourcevecs,
         reflectorcolumnscaling, reflectorsourcevecs, depths, tstars,
         conditions, prevconditions, prevscaledconditions, rowselects,
         inversions, inversionsfull, inverses, nresults) = flat_inversion

        (inversioncoeffs_list, errs_list, tikparams_list) = inversionevalfunc(
            OpenCL_CTX, rowselects, inversions, inversionsfull, inverses,
            nresults, [inputmat], tikparam)
        flatfullinverse[:, yidx:(yidx + ny), xidx:(
            xidx + nx)] += greensinversion.buildconcreteinverse(
                inversioncoeffs_list[0],
                reflectors,
                ygrid,
                xgrid,
                y_bnd,
                x_bnd,
                ny,
                nx,
                num_sources_y=num_sources_y,
                num_sources_x=num_sources_x) * contributionprofiles[tile_idx]
        pass

    if do_singlestep_bool:

        (ss_inversioncoeffs_list,
         ss_errs_list, ss_tikparams_list) = inversionevalfunc(
             OpenCL_CTX, ss_rowselects, ss_inversions, ss_inversionsfull,
             ss_inverses, ss_nresults, inputmats_holesfilled, tikparam)

        ss_fullinverse = np.zeros(
            (len(reflectors) + 1, wfmdict[channel].data.shape[1] //
             xydownsample, wfmdict[channel].data.shape[0] // xydownsample),
            dtype='d')

        for tile_idx in range(len(minyminx_corners)):
            (yidx, xidx) = minyminx_corners[tile_idx]

            ss_fullinverse[:, yidx:(yidx + ny), xidx:(
                xidx + nx)] += greensinversion.buildconcreteinverse(
                    ss_inversioncoeffs_list[tile_idx], reflectors, ygrid,
                    xgrid, y_bnd, x_bnd, ny, nx, num_sources_y,
                    num_sources_x) * contributionprofiles[tile_idx]
            pass

        # for tile_idx in range(len(minyminx_corners)):
        #    (yidx,xidx)=minyminx_corners[tile_idx]
        #    #
        #    (ss_inversioncoeffs,ss_residual,errs,ss_tikparams)=greensinversion.performinversionsteps(ss_rowselects,ss_inversions,ss_inversionsfull,ss_inverses,ss_nresults,wfmdict[channel].data[(xidx*xydownsample):((xidx+nx)*xydownsample):xydownsample,(yidx*xydownsample):((yidx+ny)*xydownsample):xydownsample,startframe:endframe].transpose((2,1,0)),tikparam) # transpose to convert dataguzzler axis ordering (x,y,t) to greensinversion ordering (t,y,x)
        #    #
        #    ss_concreteinverse=greensinversion.buildconcreteinverse(ss_inversioncoeffs,reflectors,ygrid,xgrid,y_bnd,x_bnd,ny,nx)
        #    # concreteinverse is (len(reflectors)+1,ny,nx)... first layer is surface
        #    # ... accumulate contributions of each tile to full inverse
        #    ss_fullinverse[:,yidx:(yidx+ny),xidx:(xidx+nx)] += ss_concreteinverse*contributionprofiles[tile_idx]
        #    pass
        pass

    if tikparam is None:
        outpng_fname = "%s_greensinversion.png" % (inputfile_basename)
        movieoutdirname = "%s_greensinversion_movie/" % (inputfile_basename)
        movieoutfilename = "%s_greensinversion_movie_depth_%%05.2f.png" % (
            inputfile_basename)
        outpngflat_fname = "%s_greensinversionflat.png" % (inputfile_basename)
        movieoutflatfilename = "%s_greensinversionflat_movie_depth_%%05.2f.png" % (
            inputfile_basename)
        pass
    else:
        outpng_fname = "%s_greensinversion_tik_%g.png" % (inputfile_basename,
                                                          tikparam)
        movieoutdirname = "%s_greensinversion_tik_%g_movie/" % (
            inputfile_basename, tikparam)
        movieoutfilename = "%s_greensinversion_tik_%g_movie_depth_%%05.2f.png" % (
            inputfile_basename, tikparam)
        outpngflat_fname = "%s_greensinversionflat_tik_%g.png" % (
            inputfile_basename, tikparam)
        movieoutflatfilename = "%s_greensinversionflat_tik_%g_movie_depth_%%05.2f.png" % (
            inputfile_basename, tikparam)
        pass

    (fig, subplots, images) = greensinversion.plotconcreteinverse(
        nextfignum, dc_numplotrows_int, dc_numplotcols_int, saturation_map,
        fullinverse, reflectors, -10000.0, 30000.0, fullinverse_y_bnd,
        fullinverse_x_bnd, num_sources_y, num_sources_x)
    nextfignum += 1

    outpng_href = dc_value.hrefvalue(quote(outpng_fname), _dest_href)
    fig.savefig(outpng_href.getpath())
    reslist.append((("dc:greensinversion_figure", {
        "tikparam": str(tikparam)
    }), outpng_href))

    (fig, subplots, images) = greensinversion.plotconcreteinverse(
        nextfignum, dc_numplotrows_int, dc_numplotcols_int, saturation_map,
        flatfullinverse, reflectors, -10000.0, 30000.0, fullinverse_y_bnd,
        fullinverse_x_bnd, num_sources_y, num_sources_x)
    nextfignum += 1

    outpngflat_href = dc_value.hrefvalue(quote(outpngflat_fname), _dest_href)
    fig.savefig(outpngflat_href.getpath())
    reslist.append((("dc:greensinversion_figure", {
        "tikparam": str(tikparam)
    }), outpngflat_href))

    movieoutdirhref = dc_value.hrefvalue(quote(movieoutdirname),
                                         contexthref=_dest_href)

    (nextfignum, plots, images, plothrefs,
     depths) = greensinversion.inversion.plotconcreteinversemovie(
         nextfignum,
         movieoutdirhref,
         movieoutfilename,
         saturation_map,
         fullinverse,
         reflectors,
         -10000.0,
         30000.0,
         fullinverse_y_bnd,
         fullinverse_x_bnd,
         num_sources_y,
         num_sources_x,
         dpi=300)

    for cnt in range(len(plothrefs)):
        reslist.append((("dc:greensinversion_movie_frame", {
            "tikparam": str(tikparam),
            "depth": str(depths[cnt])
        }), plothrefs[cnt]))
        pass

    (nextfignum, plots, images, plotflathrefs,
     depths) = greensinversion.inversion.plotconcreteinversemovie(
         nextfignum,
         movieoutdirhref,
         movieoutflatfilename,
         saturation_map,
         flatfullinverse,
         reflectors,
         -10000.0,
         30000.0,
         fullinverse_y_bnd,
         fullinverse_x_bnd,
         num_sources_y,
         num_sources_x,
         dpi=300)

    for cnt in range(len(plotflathrefs)):
        reslist.append((("dc:greensinversionflat_movie_frame", {
            "tikparam": str(tikparam),
            "depth": str(depths[cnt])
        }), plotflathrefs[cnt]))
        pass

    if do_singlestep_bool:
        (ss_fig, ss_subplots, ss_images) = greensinversion.plotconcreteinverse(
            nextfignum, dc_numplotrows_int, dc_numplotcols_int, saturation_map,
            ss_fullinverse, reflectors, -10000.0, 30000.0, fullinverse_y_bnd,
            fullinverse_x_bnd, num_sources_y, num_sources_x)
        nextfignum += 1

        if tikparam is None:
            ss_outpng_fname = "%s_ss_greensinversion.png" % (
                inputfile_basename)
            ss_movieoutdirname = "%s_ss_greensinversion_movie/" % (
                inputfile_basename)
            ss_movieoutfilename = "%s_ss_greensinversion_movie_depth_%%05.2f.png" % (
                inputfile_basename)
            pass
        else:
            ss_outpng_fname = "%s_ss_greensinversion_tik_%g.png" % (
                inputfile_basename, tikparam)
            ss_movieoutdirname = "%s_ss_greensinversion_tik_%g_movie/" % (
                inputfile_basename, tikparam)
            ss_movieoutfilename = "%s_ss_greensinversion_tik_%g_movie_depth_%%05.2f.png" % (
                inputfile_basename, tikparam)
            pass
        ss_outpng_href = dc_value.hrefvalue(quote(ss_outpng_fname), _dest_href)
        ss_fig.savefig(ss_outpng_href.getpath())
        reslist.append((("dc:greensinversion_singlestep_figure", {
            "tikparam": str(tikparam)
        }), ss_outpng_href))

        ss_movieoutdirhref = dc_value.hrefvalue(quote(ss_movieoutdirname),
                                                contexthref=_dest_href)

        (nextfignum, ss_plots, ss_images, ss_plothrefs,
         ss_depths) = greensinversion.inversion.plotconcreteinversemovie(
             nextfignum,
             ss_movieoutdirhref,
             ss_movieoutfilename,
             saturation_map,
             ss_fullinverse,
             reflectors,
             -10000.0,
             30000.0,
             fullinverse_y_bnd,
             fullinverse_x_bnd,
             num_sources_y,
             num_sources_x,
             resolution=300)

        for cnt in range(len(ss_plothrefs)):
            reslist.append((("dc:ss_greensinversion_movie_frame", {
                "tikparam": str(tikparam),
                "depth": str(ss_depths[cnt])
            }), ss_plothrefs[cnt]))
            pass

        pass

    outwfmdict = {}

    outwfmdict[dc_cadmodel_channel_str] = copy.deepcopy(
        wfmdict[dc_cadmodel_channel_str])
    SplitTextureChans = dgm.GetMetaDatumWIStr(wfmdict[dc_cadmodel_channel_str],
                                              "TextureChans", "").split("|")
    PrefixedTextureChans = "|".join([
        dc_prefix_str + TexChanPrefix + TexChan
        for TexChan in SplitTextureChans
    ])
    PrefixedFlatTextureChans = "|".join([
        dc_prefix_str + "_flat" + TexChanPrefix + TexChan
        for TexChan in SplitTextureChans
    ])

    gi_3d = dg.wfminfo()
    #gi_3d.Name=dc_prefix_str+dc_cadmodel_channel_str
    gi_3d.Name = "Proj" + dc_prefix_str + TexChanPrefix + dc_cadmodel_channel_str
    gi_3d.dimlen = np.array((1, ), dtype='i8')
    gi_3d.data = np.array((1, ), dtype='f')
    dgm.AddMetaDatumWI(gi_3d,
                       dgm.MetaDatum("VRML97GeomRef", dc_cadmodel_channel_str))
    dgm.AddMetaDatumWI(gi_3d,
                       dgm.MetaDatum("X3DGeomRef", dc_cadmodel_channel_str))
    #texchanprefix=gi_3d.Name[:gi_3d.Name.find(dc_unprefixed_texname_str)]
    dgm.AddMetaDatumWI(
        gi_3d, dgm.MetaDatum("TexChanPrefix", dc_prefix_str + TexChanPrefix))
    dgm.AddMetaDatumWI(gi_3d,
                       dgm.MetaDatum("TextureChans", PrefixedTextureChans))
    outwfmdict[gi_3d.Name] = gi_3d

    giflat_3d = dg.wfminfo()
    #gi_3d.Name=dc_prefix_str+dc_cadmodel_channel_str
    giflat_3d.Name = "Proj" + dc_prefix_str + "flat_" + TexChanPrefix + dc_cadmodel_channel_str
    giflat_3d.dimlen = np.array((1, ), dtype='i8')
    giflat_3d.data = np.array((1, ), dtype='f')
    dgm.AddMetaDatumWI(giflat_3d,
                       dgm.MetaDatum("VRML97GeomRef", dc_cadmodel_channel_str))
    dgm.AddMetaDatumWI(giflat_3d,
                       dgm.MetaDatum("X3DGeomRef", dc_cadmodel_channel_str))
    #texchanprefix=giflat_3d.Name[:gi_3d.Name.find(dc_unprefixed_texname_str)]
    dgm.AddMetaDatumWI(
        giflat_3d,
        dgm.MetaDatum("TexChanPrefix",
                      dc_prefix_str + "_flat" + TexChanPrefix))
    dgm.AddMetaDatumWI(giflat_3d,
                       dgm.MetaDatum("TextureChans", PrefixedFlatTextureChans))
    outwfmdict[giflat_3d.Name] = giflat_3d
    #outwfm_flat.Name=dc_prefix_str+dc_inversion_channel_str+"flat"

    outwfm = dg.wfminfo()
    #outwfm.Name="greensinversion"
    outwfm.Name = dc_prefix_str + dc_inversion_channel_str

    outwfmdict[outwfm.Name] = outwfm

    outwfm_flat = dg.wfminfo()
    #outwfm.Name="greensinversion"
    outwfm_flat.Name = dc_prefix_str + "_flat" + dc_inversion_channel_str

    outwfmdict[outwfm_flat.Name] = outwfm_flat

    # Shift IniVals according to xydownsample:
    # IniVal[0] is X coordinate of center of corner pixel of undownsampled image
    # IniVal[0] is X coordinate of center of corner pixel downsampled image
    # but that pixel is twice as big, so the corner of the image itself
    # has changed!
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("IniVal1", IniVal[0]))
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("IniVal2", IniVal[1]))

    dgm.AddMetaDatumWI(outwfm,
                       dgm.MetaDatum("Step1", XStepMeters * xydownsample))
    dgm.AddMetaDatumWI(outwfm,
                       dgm.MetaDatum("Step2", YStepMeters * xydownsample))
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("Coord1", Coord[0]))
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("Coord2", Coord[1]))
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("Units1", Units[0]))
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("Units2", Units[1]))

    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("IniVal3", 0.0))
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("Step3", 1.0))
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("Coord3", "Depth Index"))
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("Units3", "unitless"))

    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("AmplCoord", "Heating intensity"))
    dgm.AddMetaDatumWI(outwfm, dgm.MetaDatum("AmplUnits", "J/m^2"))

    # Copy landmark metadata
    LandmarkMD = [
        MDName for MDName in list(wfmdict[channel].MetaData.keys())
        if MDName.startswith("LANDMARK_")
    ]
    for LandmarkName in LandmarkMD:
        dgm.AddMetaDatumWI(
            outwfm, copy.deepcopy(wfmdict[channel].MetaData[LandmarkName]))
        pass

    outwfm_flat.MetaData = copy.deepcopy(outwfm.MetaData)

    if channel_weights is not None:
        #outwfm_weights=copy.deepcopy(wfmdict[channel_weights])#dg.wfminfo()
        #outwfm_weights.Name="greensinversion_weights"
        outwfm_weights = dg.wfminfo()
        outwfm_weights.Name = dc_prefix_str + dc_inversion_channel_str + "_weights"
        outwfm_weights.data = wfmdict[
            channel_weights].data[::xydownsample, ::xydownsample]
        outwfm_weights.dimlen = np.array(outwfm_weights.data.shape)
        outwfm_weights.ndim = 2
        outwfm_weights.MetaData = copy.deepcopy(outwfm.MetaData)
        dgm.AddMetaDatumWI(outwfm_weights,
                           dgm.MetaDatum("AmplCoord", "Weighting"))
        dgm.AddMetaDatumWI(outwfm_weights,
                           dgm.MetaDatum("AmplUnits", "Unitless"))

        outwfmdict[outwfm_weights.Name] = outwfm_weights
        pass

    if do_singlestep_bool:
        ss_outwfm = copy.deepcopy(outwfm)
        ss_outwfm.Name = "ss_greensinversion"

        outwfmdict[ss_outwfm.Name] = ss_outwfm
        pass

    # dgs file is written in (X,Y,Z) fortran order, so we write
    # dimlen in reverse order and transpose the data
    outwfm.ndim = 3
    outwfm.dimlen = np.array(fullinverse.shape[::-1])
    outwfm.data = fullinverse.transpose().astype(np.float32)
    outwfm.NeedData = False
    outwfm.NeedMetaData = False
    outwfm.HaveData = True
    outwfm.HaveMetaData = True

    outwfm_flat.ndim = 3
    outwfm_flat.dimlen = np.array(flatfullinverse.shape[::-1])
    outwfm_flat.data = flatfullinverse.transpose().astype(np.float32)
    outwfm_flat.NeedData = False
    outwfm_flat.NeedMetaData = False
    outwfm_flat.HaveData = True
    outwfm_flat.HaveMetaData = True

    outwfm_saturationmap = dg.wfminfo()
    outwfm_saturationmap.Name = "saturation_map"
    outwfmdict[outwfm_saturationmap.Name] = outwfm_saturationmap
    outwfm_saturationmap.dimlen = np.array(saturation_map.shape[::-1])
    outwfm_saturationmap.data = saturation_map.transpose().astype(np.float32)
    outwfm_saturationmap.ndim = outwfm_saturationmap.dimlen.shape[0]
    outwfm_saturationmap.NeedData = False
    outwfm_saturationmap.NeedMetaData = False
    outwfm_saturationmap.HaveData = True
    outwfm_saturationmap.HaveMetaData = True
    outwfm_saturationmap.MetaData = copy.deepcopy(outwfm.MetaData)

    if do_singlestep_bool:
        ss_outwfm.ndim = 3
        ss_outwfm.dimlen = np.array(ss_fullinverse.shape[::-1])
        ss_outwfm.data = ss_fullinverse.transpose().astype(np.float32)
        ss_outwfm.NeedData = False
        ss_outwfm.NeedMetaData = False
        ss_outwfm.HaveData = True
        ss_outwfm.HaveMetaData = True
        pass

    if tikparam is None:
        outdgs_fname = "%s_greensinversion.dgs" % (inputfile_basename)
        pass
    else:
        outdgs_fname = "%s_greensinversion_tik_%g.dgs" % (inputfile_basename,
                                                          tikparam)
        pass
    outdgs_href = dc_value.hrefvalue(quote(outdgs_fname), _dest_href)
    dgf.savesnapshot(outdgs_href.getpath(), outwfmdict)

    reslist.append((("dc:greensinversion_dgsfile", {
        "tikparam": str(tikparam)
    }), outdgs_href))

    if do_singlestep_bool:
        pass

    #
    # greensconvolution_params.get_opencl_context()
    # tile_idx=14
    # (yidx,xidx)=minyminx_corners[tile_idx]
    #
    # inputmats=[wfmdict[channel].data[(xidx*xydownsample):((xidx+nx)*xydownsample):xydownsample,(yidx*xydownsample):((yidx+ny)*xydownsample):xydownsample,startframe:endframe].transpose((2,1,0))]
    # greeninversion.inversion.parallelperforminversionsteps(greensconvolution_params.OpenCL_CTX,rowselects,inversions,inversionsfull,inverses,nresults,inputmats,None)
    return reslist
Exemple #20
0
def main(args=None):
    if args is None:
        args=sys.argv
        pass
    
    fname=None
    argc=1
    date=None
    connect_dg=False
    configfiles=[]   # note... this list is accessed from runcheckliststep so that the same config files can be passed to the sub-checklist

    while argc < len(args):
        arg=args[argc]
        
        if arg=="-d":
            connect_dg=True
            pass
        elif arg=="--date":
            argc+=1
            date=args[argc]
            pass
        
        elif arg=="-f":
            argc+=1
            configfiles.append(args[argc])
            pass
        elif arg=="--gtk3":
            # arg handled with imports, above
            pass
        elif arg=='-h' or arg=="--help":
            print("""Usage: %s [-f <config.dcc>] [--date 201x-xx-xx] [-d] checklist.chx (or plan.plx)...
            
Flags:
  -f config.dcc       Load datacollect config
  -d                  Connect to dataguzzler
  --gtk3              Use gtk3 instead of gtk2
""" % (args[0]))
            sys.exit(0)
            pass
        elif arg[0]=='-':
            raise ValueError("Unknown command line switch %s" % (arg))
        else :
            fname=arg
            pass
        argc+=1
        pass
    
    if fname is None:
        sys.stderr.write("Must provide at least one positional parameter. -h for help\n")
        sys.exit(1)
        pass
        
    iohandlers={}  #=dg_io.io()
    if connect_dg:
        if dg_io is not None:
            dg_io.addhandler(iohandlers)
            pass
        else:
            sys.stderr.write("dc_checklist: dg_io not available (ImportError)\n")
            pass
        pass
    
    paramdb=pdb.paramdb(iohandlers)
    
    # define parameters
    # paramdb.addparam("clinfo",stringv,build=lambda param: xmldoc.synced(param))  # clinfo auto-added to checklist-private paramdb
    paramdb.addparam("specimen",stringv,build=lambda param: xmldoc.synced(param))
    paramdb.addparam("perfby",stringv,build=lambda param: xmldoc.synced(param))
    #paramdb.addparam("date",stringv,build=lambda param: xmldoc.synced(param))
    #paramdb.addparam("date",datesetv,build=lambda param: xmldoc.synced_accumulating_dates(param))
    paramdb.addparam("date",accumulatingdatesetv,build=lambda param: xmldoc.synced(param))
    # paramdb.addparam("dest",stringv,build=lambda param: xmldoc.synced(param)) # clinfo auto-added to checklist-private paramdb
    paramdb.addparam("notes",stringv,build=lambda param: xmldoc.synced(param))
    
    # FIXES: 
    #  * Some of above parameters could be capable of syncing with dataguzzler, BUT
    #  * If they are, when you load a completed checklist, it may load the new parameters in (!)
    #  * Which may not be what you expect or want, especially if it is someone else who is running
    #    dataguzzler on that computer. 
    #  * So if this change is made, should require an explicit command line argument in order to 
    #    connect to dataguzzler. 
    
    # auto-set date 
    if date is not None:
        paramdb["date"].requestvalstr_sync(date)
        pass
    elif paramdb["date"].dcvalue.isblank():
        curdate=datetime.date.today()
        paramdb["date"].requestvalstr_sync(curdate.isoformat())
        pass
    



    for configfile in configfiles:
        dc2_misc.load_config(configfile,paramdb,iohandlers,createparamserver)
        #configfh=file(configfile)
        #configstr=configfh.read()
        #exec(configstr,globals(),{"paramdb":paramdb,"dgio":iohandler,"createparamserver":createparamserver})
        #configfh.close()
        pass
    
    
    
    # Connect to dbus (barcode reader)
    dbuslink=dc_dbus_barcode(paramdb)
    
    
    # create context from fname and directory
    (direc,filename)=os.path.split(fname)

    if direc=="":
        direc="."
        pass
    filehref=dc_value.hrefvalue(pathname2url(fname),contexthref=dc_value.hrefvalue(pathname2url(direc)+posixpath.sep,contexthref=dc_value.hrefvalue("./")))
    
    try: 
        standalone_checklist.open_checklist(filehref,paramdb,iohandlers)
        pass
    except:
        (exctype,excvalue)=sys.exc_info()[:2]
        sys.stderr.write("%s opening checklist: %s\n" % (exctype.__name__,str(excvalue)))
        traceback.print_exc()
        import pdb as pythondb
        pythondb.post_mortem()
        pass
 
    gtk.main()
    pass
Exemple #21
0
def main(args=None):
    if args is None:
        args = sys.argv
        pass

    argc = 1
    input_file_names = []
    show_steps = False
    verbose = False
    cleanup_provenance = False
    cleanup_dest_files = False
    cleanup_obsolete = False
    recursive = False
    repository_root = None
    remove_steps = []

    while argc < len(args):
        arg = args[argc]
        if arg == "-h" or arg == "--help":
            usage()
            sys.exit(0)
            pass
        elif arg == "--steps":
            show_steps = True
            pass
        elif arg == "-b":
            # cleanup obsolete content
            cleanup_obsolete = True
            pass
        elif arg == "-s":
            argc += 1
            remove_steps.append(args[argc])
        elif arg == "-p":
            # cleanup provenance
            cleanup_provenance = True
            pass
        elif arg == "-d":
            # cleanup dest
            cleanup_dest_files = True
            pass
        elif arg == "-r":
            # recursive
            recursive = True
            pass
        elif arg == "-v":
            verbose = True  # ***!!! Does not currently do anything
            pass
        elif arg == "--root":
            argc += 1
            repository_root = dc_value.hrefvalue(args[argc], contexthref=".")
            pass
        else:
            input_file_names.append(arg)
            pass
        argc += 1
        pass

    if len(input_file_names) < 1:
        usage()
        sys.exit(0)
        pass

    input_file_hrefs = [
        dc_value.hrefvalue(pathname2url(input_file_name),
                           contexthref=dc_value.hrefvalue("./"))
        for input_file_name in input_file_names
    ]

    input_files = processtrak_cleanup.infiledicts.fromhreflist(
        input_file_hrefs)

    #import pdb
    #pdb.set_trace()

    if recursive and repository_root is None:
        raise ValueError("Recursive flag set and --root unset")

    (completed_set, desthref_set, href_set) = processtrak_cleanup.traverse(
        input_files,
        recursive=recursive,
        need_href_set=not (cleanup_obsolete or len(remove_steps) > 0)
        and cleanup_dest_files,
        repository_root=repository_root)

    if show_steps:
        for xlpfilehref in input_files.xlp:
            print("Showing steps in %s..." % (xlpfilehref.humanurl()))
            xlpdocu = input_files.xlp[xlpfilehref].xmldocu
            xlpdocu.lock_ro()
            try:
                show_steps_in_xlp(xlpdocu)
                pass
            finally:
                xlpdocu.unlock_ro()
                pass

            pass

    if cleanup_obsolete:

        for xlpfilehref in input_files.xlp:
            print("Cleaning up obsolete tags in %s..." %
                  (xlpfilehref.humanurl()))
            xlpdocu = input_files.xlp[xlpfilehref].xmldocu
            xlpdocu.set_readonly(False)
            xlpdocu.lock_rw()
            try:
                cleanup_obsolete_tags(xlpdocu)

                pass
            finally:
                xlpdocu.unlock_rw()
                xlpdocu.set_readonly(True)
                pass

            pass

        pass

    for remove_step in remove_steps:
        for xlpfilehref in input_files.xlp:
            print("Removing output from step %s in %s..." %
                  (remove_step, xlpfilehref.humanurl()))
            xlpdocu = input_files.xlp[xlpfilehref].xmldocu
            xlpdocu.set_readonly(False)
            xlpdocu.lock_rw()
            try:
                remove_step_output(xlpdocu, remove_step)

                pass
            finally:
                xlpdocu.unlock_rw()
                xlpdocu.set_readonly(True)
                pass

            pass

        pass

    if cleanup_obsolete or len(remove_steps) > 0:
        # Re-call traverse(), this time getting the href_set if neede , but we don't need to recurse because we would have done that last time
        (completed_set, desthref_set, href_set) = processtrak_cleanup.traverse(
            input_files,
            recursive=False,
            need_href_set=cleanup_dest_files,
            repository_root=repository_root)

        pass

    if cleanup_dest_files:
        cleanup_dest(input_files, desthref_set, href_set)
        pass

    if cleanup_provenance:
        # import pdb
        # pdb.set_trace()

        for xlpfilehref in input_files.xlp:
            print("Cleaning up provenance in %s..." % (xlpfilehref.humanurl()))
            xlpdocu = input_files.xlp[xlpfilehref].xmldocu
            xlpdocu.set_readonly(False)
            xlpdocu.lock_rw()
            try:
                msg = provenance.cleanobsoleteprovenance(xlpdocu)
                pass
            finally:
                xlpdocu.unlock_rw()
                xlpdocu.set_readonly(True)
                pass
            print("Cleanup provenance: %s" % (msg))
            pass
        pass
    pass
def traverse_one(infiles,
                 infileobj,
                 pending,
                 completed,
                 dests,
                 hrefs,
                 recursive=False):
    # go through infile, searching for links

    assert (infileobj.href in pending)

    if infileobj.ftype == infileobj.IFT_OTHERUNK:
        pending.remove(infileobj.href)
        completed.add(infileobj.href)
        return  # cannot contain links

    infileobj.xmldocu.lock_ro()

    try:
        # print("traverse_one: ftype=%d" % (infileobj.ftype))
        if infileobj.ftype == infileobj.IFT_XLG:
            # .XLG file has implicit link to its .XLP file
            barefilename = infileobj.href.get_bare_unquoted_filename()
            (barename, ext) = posixpath.splitext(barefilename)
            if ext == ".xlg":
                xlpfile = barename + ".xlp"
                xlphref = dc_value.hrefvalue(quote(xlpfile),
                                             contexthref=infileobj.href)
                if hrefs is not None:
                    hrefs.add(xlphref)
                    pass
                if recursive:
                    add_to_traverse(infiles, pending, completed, xlphref)
                    pass
                pass
            pass

        if infileobj.ftype == infileobj.IFT_XLG or infileobj.ftype == infileobj.IFT_XLP:
            # XLG and XLP files can have dest references
            # and we are tracking those
            # print("got xlg or xlp. infileobj.href=%s" % (infileobj.href.humanurl()))
            desttags = infileobj.xmldocu.xpath(
                "dc:summary/dc:dest[@xlink:href]")
            for desttag in desttags:
                #print("got desttag!")
                desthref = dc_value.hrefvalue.fromxml(infileobj.xmldocu,
                                                      desttag)
                dests.add(desthref)

                pass
            pass

        if infileobj.ftype == infileobj.IFT_PRX:
            # .PRX file has implicit links to its input and output files

            # ... We follow links to .xlp files whether or not the recursive flag is set

            prx_inputfiles_with_hrefs = processtrak_common.getinputfiles(
                infileobj.xmldocu)
            prx_outputdict = processtrak_common.build_outputdict(
                infileobj.xmldocu, prx_inputfiles_with_hrefs)

            for prx_inputfile_href in prx_outputdict:
                if hrefs is not None:
                    hrefs.add(prx_inputfile_href)
                    hrefs.add(
                        prx_outputdict[prx_inputfile_href].outputfilehref)
                    pass

                if recursive:
                    add_to_traverse(infiles, pending, completed,
                                    prx_inputfile_href)
                    pass

                # follow link to output whether or not recursive is set
                add_to_traverse(
                    infiles, pending, completed,
                    prx_outputdict[prx_inputfile_href].outputfilehref)
                pass
            pass

        # Now go through all explicit links if we need hrefs

        if hrefs is not None or recursive:
            all_links = infileobj.xmldocu.xpath("//*[@xlink:href]")

            for link in all_links:
                href = dc_value.hrefvalue.fromxml(infileobj.xmldocu, link)
                if hrefs is not None:
                    hrefs.add(href)
                    pass
                if recursive:
                    add_to_traverse(infiles, pending, completed, href)
                    pass
                pass
            pass
        pass

    finally:
        infileobj.xmldocu.unlock_ro()
        pass

    pending.remove(infileobj.href)
    completed.add(infileobj.href)

    pass
Exemple #23
0
def main(args=None):
    if args is None:
        args = sys.argv
        pass

    argv_inputfileurls = set([])

    overall_starttime = timestamp.now().isoformat()

    argc = 1
    positionals = []
    stepnames = []
    filters = []
    allsteps = False
    liststeps = False
    listfiles = False
    ignore_locking = False
    status = False
    needed = False
    paramdebug = False
    debugmode = False
    ipythonmodelist = [
        False
    ]  # ipythonmode is contained within a list so it is mutable by functions and these changes will be persistent

    while argc < len(args):
        arg = args[argc]

        if arg == "-s":  # -s <step>: Only run this (or these) steps
            stepnames.append(args[argc + 1])
            argc += 1
            pass
        elif arg == "--steps":  # just list steps
            liststeps = True
            pass
        elif arg == "-l":  # -l <filter>: Apply additional filtering constraint
            # to elements operated on
            filters.append(args[argc + 1])
            argc += 1
            pass
        elif arg == "-f":  # -f <inputfile>: Operate only on the specified input url
            argv_inputfileurls.add(args[argc + 1])
            argc += 1
            pass
        elif arg == "--files":  # just list files
            listfiles = True
            pass
        elif arg == "--status":
            status = True
            pass
        elif arg == "--needed":
            needed = True
            pass
        elif arg == "-a":  # run all steps
            allsteps = True
            pass
        elif arg == "-i":  # enable ipython qtconsole mode
            ipythonmodelist.pop()
            ipythonmodelist.append(True)
            pass
        elif arg == "-d":  # enable debugging mode
            debugmode = True
            pass
        elif arg == "--ignore-locking":  # disable file locking
            ignore_locking = True
            pass
        elif arg == '--gtk3':
            # handled at imports, above
            pass
        elif arg == '--gtk2':
            # handled at imports, above
            pass
        #elif arg=="-p":  # insert path into search path for steps
        #    processtrak_procstep.steppath.insert(1,args[argc+1])
        #    argc+=1
        #    pass
        elif arg == '-h' or arg == "--help":
            usage()
            sys.exit(0)
            pass
        elif arg == "--param-debug":
            paramdebug = True
            pass
        elif arg[0] == '-':
            raise ValueError("Unknown command line switch %s" % (arg))
        else:
            positionals.append(arg)
            pass
        argc += 1
        pass

    if len(positionals) > 1:
        raise ValueError(
            "Too many positional parameters (see -h for command line help")

    if len(positionals) < 1:
        usage()
        sys.exit(0)
        pass

    prxfile = positionals[0]
    prxfilehref = dcv.hrefvalue(pathname2url(prxfile),
                                contexthref=dcv.hrefvalue("."))

    # prxdoc is loaded into memory once, so we don't use locking on it.
    prxdoc = xmldoc.xmldoc.loadhref(
        prxfilehref,
        nsmap=processtrak_common.prx_nsmap,
        readonly=True,
        use_locking=False,
        debug=True
    )  #!!!*** Should turn debug mode off eventually... it will speed things up
    # prxdoc.merge_namespace("prx",)
    assert (prxdoc.gettag(prxdoc.getroot()) == "prx:processinginstructions")

    # See if a specific hostname was specified
    hostname = prxdoc.xpathsinglestr("prx:hostname", default=None)

    if hostname is not None and hostname.split(
            ".")[0] != provenance.determinehostname().split(".")[0]:
        sys.stderr.write(
            "Hostname mismatch: %s in <prx:hostname> tag vs. this computer is %s.\nPlease adjust <prx:hostname> tag to match if you really want to run on this computer.\nRemove <prx:hostname> completely if this should be allowed to run on any computer.\n"
            % (hostname.split(".")[0],
               provenance.determinehostname().split(".")[0]))
        sys.exit(1)
        pass

    all_step_elements = [None] + prxdoc.xpath("prx:step")

    if allsteps or liststeps or status:
        steps = [None] + prxdoc.xpath("prx:step")
        pass
    else:  # Convert list of step names into list of step elements
        steps = [
            processtrak_prxdoc.findstep(prxdoc, stepname)
            for stepname in stepnames
        ]
        pass

    if liststeps:
        print("")
        print("List of steps for -s option")
        print("---------------------------")
        for step_el in steps:
            if step_el is None:  # "None" means the copyinput step
                print(
                    "copyinput (WILL OVERWRITE CURRENT OUTPUT) or mergeinput")
                pass
            else:
                print(processtrak_prxdoc.getstepname(prxdoc, step_el))
                pass
            pass

        sys.exit(0)
        pass

    (inputfiles_element,
     inputfiles_with_hrefs) = processtrak_common.getinputfiles(prxdoc)

    if listfiles:
        print("")
        print("List of input file urls for -f option")
        print("-------------------------------------")
        for (inputfile, inputfilehref) in inputfiles_with_hrefs:
            print(inputfilehref.humanurl())
            pass

        sys.exit(0)
        pass

    for argv_inputfileurl in argv_inputfileurls:
        if argv_inputfileurl not in [
                inputfilehref.humanurl()
                for (inputfile, inputfilehref) in inputfiles_with_hrefs
        ]:
            sys.stderr.write(
                "Specified input file url %s is not listed in %s\nTry listing available input file urls with --files.\n"
                % (argv_inputfileurl, prxfilehref.absurl()))
            sys.exit(1)
            pass
        pass
    # inputfile = prxdoc.xpathsinglestr("prx:inputfile")

    # If any input files are specified on the command line, use only
    # those input files

    useinputfiles_with_hrefs = [
        (inputfile, inputfile_href)
        for (inputfile, inputfile_href) in inputfiles_with_hrefs
        if len(argv_inputfileurls) == 0
        or inputfile_href.humanurl() in argv_inputfileurls
    ]

    if status:
        # print out status information
        processtrak_status.print_status(useinputfiles_with_hrefs, prxdoc,
                                        prxfilehref, steps, ignore_locking)
        sys.exit(0)
        pass

    if len(steps) == 0:
        print(
            "Nothing to do! (try specifying a step with -s <step> or all steps with -a);\nlist steps with --steps\n"
        )
        sys.exit(0)
        pass

    #print("steps=%s" % str(steps))

    # Build dictionary by input file of output files
    outputdict = processtrak_common.build_outputdict(prxdoc,
                                                     inputfiles_with_hrefs,
                                                     ignore_locking)

    # Run the specified steps, on the specified files
    if needed:
        processtrak_common.outputdict_run_needed_steps(
            prxdoc, prxfilehref, outputdict, inputfiles_element,
            useinputfiles_with_hrefs, all_step_elements, steps, filters,
            overall_starttime, debugmode, stdouthandler, stderrhandler,
            ipythonmodelist, paramdebug)
        pass
    else:
        processtrak_common.outputdict_run_steps(
            prxdoc, outputdict, inputfiles_element, useinputfiles_with_hrefs,
            steps, filters, overall_starttime, debugmode, stdouthandler,
            stderrhandler, ipythonmodelist, paramdebug)
        pass

    pass