コード例 #1
0
 def get_source(self, environment, template):
     from PYME.IO import unifiedIO
     try:
         if os.path.exists(os.path.join(os.path.dirname(__file__), template)):
             source = unifiedIO.read(os.path.join(os.path.dirname(__file__), template)).decode('utf-8')
         else:
             source = unifiedIO.read(template).decode('utf-8')
     except:
         logger.exception('Error loading template')
         raise jinja2.TemplateNotFound
     return source, template, lambda: False
コード例 #2
0
ファイル: views.py プロジェクト: b3nroll1ns/python-microscopy
def run_template(request):
    from PYME import config
    from PYME.IO import unifiedIO
    from PYME.recipes.modules import ModuleCollection

    if config.get('PYMERuleserver-use', True):
        from PYME.cluster.HTTPRulePusher import RecipePusher
    else:
        from PYME.cluster.HTTPTaskPusher import RecipePusher

    recipeURI = 'pyme-cluster://%s/%s' % (
        server_filter, request.POST.get('recipeURL').lstrip('/'))
    output_directory = 'pyme-cluster://%s/%s' % (
        server_filter, request.POST.get('recipeOutputPath').lstrip('/'))

    recipe_text = unifiedIO.read(recipeURI)
    recipe = ModuleCollection.fromYAML(recipe_text)

    for file_input in recipe.file_inputs:
        input_url = 'pyme-cluster://%s/%s' % (
            server_filter, request.POST.get('%sURL' % file_input).lstrip('/'))
        recipe_text = recipe_text.replace('{' + file_input + '}', input_url)

    pusher = RecipePusher(recipe=recipe_text, output_dir=output_directory)

    fileNames = request.POST.getlist('files', [])
    pusher.fileTasksForInputs(input=fileNames)

    return HttpResponseRedirect('/status/queues/')
コード例 #3
0
    def __init__(self, recipe=None, recipeURI=None, output_dir=None):
        from PYME.recipes.modules import ModuleCollection
        if recipe:
            if isinstance(recipe, string_types):
                self.recipe_text = recipe
                self.recipe = ModuleCollection.fromYAML(recipe)
            else:
                self.recipe_text = recipe.toYAML()
                self.recipe = recipe

            self.recipeURI = None
        else:
            self.recipe = None
            if recipeURI is None:
                raise ValueError(
                    'recipeURI must be defined if no recipe given')
            else:
                from PYME.IO import unifiedIO
                self.recipeURI = recipeURI
                self.recipe = ModuleCollection.fromYAML(
                    unifiedIO.read(recipeURI))

        self.output_dir = output_dir

        self.taskQueueURI = _getTaskQueueURI()

        #generate a queue ID as a hash of the recipe and the current time
        to_hash = self.recipeURI if self.recipeURI else self.recipe_text
        try:  # hashlib requires bytes on py3
            to_hash = to_hash.encode()
        except TypeError:  # encoding without a string argument, i.e. already bytes
            pass
        h = hashlib.md5(to_hash)
        h.update(str(time.time()).encode())
        self.queueID = h.hexdigest()  # hexdigest returns str
コード例 #4
0
def load_json(filename):
    import json
    from PYME.IO import unifiedIO
    mdh = NestedClassMDHandler()
    mdh.update(json.loads(unifiedIO.read(filename)))

    return mdh
コード例 #5
0
def load_shiftmap(uri):
    """
    helper function to handle I/O of two versions of shiftmaps. Note that HDF is prefered
    :param uri: str
        path or url to shiftmap-containing file (hdf, or [less ideal] json)
    :return: dict
        shiftmap
    """
    from PYME.IO import unifiedIO, tabular
    from PYME.IO.MetaDataHandler import HDFMDHandler
    import tables
    import json

    try:  # try loading shift map as hdf file
        with unifiedIO.local_or_temp_filename(uri) as f:
            t = tables.open_file(f)
            shift_map_source = tabular.HDFSource(t, 'shift_map')  # todo - is there a cleaner way to do this?
            shift_map_source.mdh = HDFMDHandler(t)

        # build dict of dicts so we can easily rebuild shiftfield objects in multiview.calc_shifts_for_points
        shift_map = {'shiftModel': shift_map_source.mdh['Multiview.shift_map.model']}
        legend = shift_map_source.mdh['Multiview.shift_map.legend']
        for l in legend.keys():
            keys = shift_map_source.keys()
            shift_map[l] = dict(zip(keys, [shift_map_source[k][legend[l]] for k in keys]))

        t.close()
    except tables.HDF5ExtError:  # file is probably saved as json (legacy)
        s = unifiedIO.read(uri)
        shift_map = json.loads(s)

    return shift_map
コード例 #6
0
    def execute(self, namespace):
        from PYME.Analysis.points.astigmatism import astigTools
        from PYME.IO import unifiedIO
        import json

        inp = namespace[self.input_name]

        if 'mdh' not in dir(inp):
            raise RuntimeError('MapAstigZ needs metadata')

        if self.astigmatism_calibration_location == '':  # grab calibration from the metadata
            calibration_location = inp.mdh['Analysis.AstigmatismMapID']
        else:
            calibration_location = self.astigmatism_calibration_location

        s = unifiedIO.read(calibration_location)

        astig_calibrations = json.loads(s)

        mapped = tabular.MappingFilter(inp)

        z, zerr = astigTools.lookup_astig_z(mapped,
                                            astig_calibrations,
                                            self.rough_knot_spacing,
                                            plot=False)

        mapped.addColumn('astigmatic_z', z)
        mapped.addColumn('astigmatic_z_lookup_error', zerr)
        mapped.setMapping('z', 'astigmatic_z + z')

        mapped.mdh = inp.mdh
        mapped.mdh[
            'Analysis.astigmatism_calibration_used'] = calibration_location

        namespace[self.output_name] = mapped
コード例 #7
0
ファイル: views.py プロジェクト: Hannahmar/python-microscopy
def run_template(request):
    from PYME import config
    from PYME.IO import unifiedIO
    from PYME.recipes import Recipe
    from PYME.recipes import modules
    from PYME.cluster.rules import RecipeRule

    recipeURI = 'pyme-cluster://%s/%s' % (
        server_filter, request.POST.get('recipeURL').lstrip('/'))
    output_directory = 'pyme-cluster://%s/%s' % (
        server_filter, request.POST.get('recipeOutputPath').lstrip('/'))

    recipe_text = unifiedIO.read(recipeURI).decode('utf-8')
    recipe = Recipe.fromYAML(recipe_text)

    # handle templated userfile inputs - these will be loaded by e.g. unifiedIO later
    for file_input in recipe.file_inputs:
        input_url = 'pyme-cluster://%s/%s' % (
            server_filter, request.POST.get('%sURL' % file_input).lstrip('/'))
        recipe_text = recipe_text.replace('{' + file_input + '}', input_url)

    rule = RecipeRule(recipe=recipe_text,
                      output_dir=output_directory,
                      inputs={'input': request.POST.getlist('files', [])})
    rule.push()

    return HttpResponseRedirect('/status/queues/')
コード例 #8
0
    def __init__(self, recipe=None, recipeURI=None):
        from PYME.recipes.modules import ModuleCollection
        if recipe:
            if isinstance(recipe, string_types):
                self.recipe_text = recipe
                self.recipe = ModuleCollection.fromYAML(recipe)
            else:
                self.recipe_text = recipe.toYAML()
                self.recipe = recipe

            self.recipeURI = None
        else:
            self.recipe = None
            if recipeURI is None:
                raise ValueError('recipeURI must be defined if no recipe given')
            else:
                from PYME.IO import unifiedIO
                self.recipeURI = recipeURI
                self.recipe = ModuleCollection.fromYAML(unifiedIO.read(recipeURI))

        self.taskQueueURI = _getTaskQueueURI()

        #generate a queue ID as a hash of the recipe and the current time
        h = hashlib.md5(self.recipeURI if self.recipeURI else self.recipe_text)
        h.update('%s' % time.time())
        self.queueID = h.hexdigest()
コード例 #9
0
ファイル: views.py プロジェクト: b3nroll1ns/python-microscopy
def view_svg(request):
    from PYME.IO import unifiedIO
    from PYME.recipes.modules import ModuleCollection
    from PYME.recipes import recipeLayout

    recipeURI = ('pyme-cluster://%s/' %
                 server_filter) + request.GET.get('recipeURL').lstrip('/')

    recipe = ModuleCollection.fromYAML(unifiedIO.read(recipeURI))

    svg = recipeLayout.to_svg(recipe.dependancyGraph())

    return HttpResponse(svg, content_type='image/svg+xml')
コード例 #10
0
ファイル: views.py プロジェクト: b3nroll1ns/python-microscopy
def extra_inputs(request):
    from PYME.IO import unifiedIO
    from PYME.recipes.modules import ModuleCollection

    recipeURI = ('pyme-cluster://%s/' %
                 server_filter) + request.GET.get('recipeURL').lstrip('/')

    recipe = ModuleCollection.fromYAML(unifiedIO.read(recipeURI))

    return render(request, 'recipes/extra_inputs.html', {
        'file_inputs': recipe.file_inputs,
        'serverfilter': server_filter
    })
コード例 #11
0
    def __init__(self,
                 seriesName,
                 analysisMetadata,
                 resultsFilename=None,
                 startAt=0,
                 dataSourceModule=None,
                 serverfilter=clusterIO.local_serverfilter,
                 **kwargs):
        from PYME.IO import MetaDataHandler
        from PYME.Analysis import MetaData
        from PYME.IO.FileUtils.nameUtils import genClusterResultFileName
        from PYME.IO import unifiedIO

        unifiedIO.assert_uri_ok(seriesName)

        if resultsFilename is None:
            resultsFilename = genClusterResultFileName(seriesName)

        resultsFilename = verify_cluster_results_filename(resultsFilename)
        logger.info('Results file: ' + resultsFilename)

        resultsMdh = MetaDataHandler.NestedClassMDHandler()
        # NB - anything passed in analysis MDH will wipe out corresponding entries in the series metadata
        resultsMdh.update(
            json.loads(unifiedIO.read(seriesName + '/metadata.json')))
        resultsMdh.update(analysisMetadata)

        resultsMdh['EstimatedLaserOnFrameNo'] = resultsMdh.getOrDefault(
            'EstimatedLaserOnFrameNo',
            resultsMdh.getOrDefault('Analysis.StartAt', 0))
        MetaData.fixEMGain(resultsMdh)

        self._setup(seriesName, resultsMdh, resultsFilename, startAt,
                    serverfilter)

        #load data source
        if dataSourceModule is None:
            DataSource = DataSources.getDataSourceForFilename(seriesName)
        else:
            DataSource = __import__(
                'PYME.IO.DataSources.' + dataSourceModule,
                fromlist=['PYME', 'io',
                          'DataSources']).DataSource  #import our data source

        self.ds = DataSource(seriesName)

        logger.debug('DataSource.__class__: %s' % self.ds.__class__)

        Rule.__init__(self, **kwargs)
コード例 #12
0
def createFitTaskFromTaskDef(task):
    """
    Creates a fit task from a new-style json task definition
    Parameters
    ----------
    task : dict
        The parsed task definition. As the task definition will need to be parsed by the worker before we get here,
        we expect this to take the form of a python dictionary.

    Returns
    -------

    a fitTask instance

    """
    from PYME.IO import MetaDataHandler

    dataSourceID = task['inputs']['frames']
    frameIndex = int(task['taskdef']['frameIndex'])

    #logger.debug('Creating a task for %s - frame %d' % (dataSourceID, frameIndex))

    md = task['taskdef']['metadata']

    #sort out our metadata
    #TODO - Move this somewhere saner - e.g. a helper function in the MetaDataHandler module
    mdh = MetaDataHandler.NestedClassMDHandler()
    if isinstance(md, dict):
        #metadata was parsed with the enclosing json
        mdh.update(md)
    elif isinstance(md, six.string_types):
        if md.startswith('{'):
            #metadata is a quoted json dump
            import json
            mdh.update(json.loads(md))
        else:
            #metadata entry is a filename/URI
            from PYME.IO import unifiedIO
            if md.endswith('.json'):
                import json
                mdh.update(json.loads(unifiedIO.read(md)))
            else:
                raise NotImplementedError(
                    'Loading metadata from a URI in task description is not yet supported'
                )

    return fitTask(dataSourceID=dataSourceID,
                   frameIndex=frameIndex,
                   metadata=mdh)
コード例 #13
0
def launch_localize(analysisMDH, seriesName):
    """
    Pushes an analysis task for a given series to the distributor

    Parameters
    ----------
    analysisMDH : dictionary-like
        MetaDataHandler describing the analysis tasks to launch
    seriesName : str
        cluster path, e.g. pyme-cluster:///example_folder/series
    Returns
    -------

    """
    #import logging
    import json
    #from PYME.ParallelTasks import HTTPTaskPusher
    from PYME.IO import MetaDataHandler
    from PYME.Analysis import MetaData
    from PYME.IO.FileUtils.nameUtils import genClusterResultFileName
    from PYME.IO import unifiedIO

    unifiedIO.assert_uri_ok(seriesName)
    seriesName = seriesName

    resultsFilename = verify_cluster_results_filename(
        genClusterResultFileName(seriesName))
    logger.info('Results file: ' + resultsFilename)

    resultsMdh = MetaDataHandler.NestedClassMDHandler()
    # NB - anything passed in analysis MDH will wipe out corresponding entries in the series metadata
    resultsMdh.update(json.loads(unifiedIO.read(seriesName +
                                                '/metadata.json')))
    resultsMdh.update(analysisMDH)

    resultsMdh['EstimatedLaserOnFrameNo'] = resultsMdh.getOrDefault(
        'EstimatedLaserOnFrameNo',
        resultsMdh.getOrDefault('Analysis.StartAt', 0))
    MetaData.fixEMGain(resultsMdh)
    # resultsMdh['DataFileID'] = fileID.genDataSourceID(image.dataSource)

    # TODO - do we need to keep track of the pushers in some way (we currently rely on the fact that the pushing thread
    # will hold a reference
    pusher = HTTPRulePusher(dataSourceID=seriesName,
                            metadata=resultsMdh,
                            resultsFilename=resultsFilename)

    logging.debug('Queue created')
コード例 #14
0
    def execute(self, namespace):
        from PYME.Analysis.points import multiview
        from PYME.IO import unifiedIO
        from PYME.IO.MetaDataHandler import HDFMDHandler
        import tables
        import json

        inp = namespace[self.input_name]

        if 'mdh' not in dir(inp):
            raise RuntimeError('ShiftCorrect needs metadata')

        if self.shift_map_path == '':  # grab shftmap from the metadata
            loc = inp.mdh['Shiftmap']
        else:
            loc = self.shift_map_path

        try:  # try loading shift map as hdf file
            with unifiedIO.local_or_temp_filename(loc) as f:
                t = tables.open_file(f)
                shift_map_source = tabular.HDFSource(
                    t,
                    'shift_map')  # todo - is there a cleaner way to do this?
                shift_map_source.mdh = HDFMDHandler(t)

            # build dict of dicts so we can easily rebuild shiftfield objects in multiview.calc_shifts_for_points
            shift_map = {
                'shiftModel': shift_map_source.mdh['Multiview.shift_map.model']
            }
            legend = shift_map_source.mdh['Multiview.shift_map.legend']
            for l in legend.keys():
                keys = shift_map_source.keys()
                shift_map[l] = dict(
                    zip(keys, [shift_map_source[k][legend[l]] for k in keys]))

            t.close()
        except tables.HDF5ExtError:  # file is probably saved as json (legacy)
            s = unifiedIO.read(self.shift_map_path)
            shift_map = json.loads(s)

        mapped = tabular.MappingFilter(inp)

        multiview.apply_shifts_to_points(mapped, shift_map)
        # propagate metadata
        mapped.mdh = inp.mdh
        mapped.mdh['Multiview.shift_map.location'] = loc

        namespace[self.output_name] = mapped
コード例 #15
0
    def update_from_file(self, filename):
        """
        Update the contents of the recipe from a .yaml file

        WARNING: This function will likely be REMOVED WITHOUT NOTICE. It is a quick hack to get the prototype web-based
        recipe editor working, but will be surplus to requirements once we have a proper recipe manager in the web based
        editor. It's logically obtuse to consider something the same recipe once you've completely replaced it with a
        recipe that has been loaded from file. It is much more sensible to create a new recipe instance when loading
        a recipe from file, and this is the recommended approach.

        Parameters
        ----------
        filename: str
            filename or PYME-CLUSTER:// URI

        """
        from PYME.IO import unifiedIO

        self.update_from_yaml(unifiedIO.read(filename).decode())
コード例 #16
0
    def computeLoop(self):
        while self._loop_alive:
            #loop over tasks - we pop each task and then delete it after processing
            #to keep memory usage down

            queueURL, taskDescr = self.inputQueue.get()
            if taskDescr['type'] == 'localization':
                try:
                    task = remFitBuf.createFitTaskFromTaskDef(taskDescr)
                    res = task()

                    self.resultsQueue.put((queueURL, taskDescr, res))

                except:
                    import traceback
                    traceback.print_exc()
                    tb = traceback.format_exc()
                    logger.exception(tb)
                    self.resultsQueue.put(
                        (queueURL, taskDescr, TaskError(taskDescr, tb)))
                    #self.resultsQueue.put((queueURL, taskDescr, None))

            elif taskDescr['type'] == 'recipe':
                from PYME.recipes import Recipe
                from PYME.recipes import modules

                try:
                    taskdefRef = taskDescr.get('taskdefRef', None)
                    if taskdefRef:  #recipe is defined in a file - go find it
                        recipe_yaml = unifiedIO.read(taskdefRef)

                    else:  #recipe is defined in the task
                        recipe_yaml = taskDescr['taskdef']['recipe']

                    recipe = Recipe.fromYAML(recipe_yaml)

                    #load recipe inputs
                    logging.debug(taskDescr)
                    for key, url in taskDescr['inputs'].items():
                        logging.debug('RECIPE: loading %s as %s' % (url, key))
                        recipe.loadInput(url, key)

                    #print recipe.namespace
                    recipe.execute()

                    #save results
                    context = {
                        'data_root': clusterIO.local_dataroot,
                        'task_id': taskDescr['id'].split('~')[0]
                    }

                    #update context with file stub and input directory
                    try:
                        principle_input = taskDescr['inputs'][
                            'input']  #default input
                        context['file_stub'] = os.path.splitext(
                            os.path.basename(principle_input))[0]
                        context['input_dir'] = unifiedIO.dirname(
                            principle_input)
                    except KeyError:
                        pass

                    try:
                        od = taskDescr['output_dir']
                        # make sure we have a trailing slash
                        # TODO - this should be fine for most windows use cases, as you should generally
                        # use POSIX urls for the cluster/cluster of one, but might need checking
                        if not od.endswith('/'):
                            od = od + '/'

                        context['output_dir'] = unifiedIO.dirname(od)
                    except KeyError:
                        pass

                    #print taskDescr['inputs']
                    #print context

                    #abuse outputs as context
                    outputs = taskDescr.get('outputs', None)
                    if not outputs is None:
                        context.update(outputs)
                    #print context, context['input_dir']
                    recipe.save(context)

                    self.resultsQueue.put((queueURL, taskDescr, True))

                except Exception:
                    import traceback
                    traceback.print_exc()
                    tb = traceback.format_exc()
                    logger.exception(tb)
                    self.resultsQueue.put(
                        (queueURL, taskDescr, TaskError(taskDescr, tb)))
コード例 #17
0
    def _findAndParseMetadata(self, filename):
        """Try and find and load a .xml or .md metadata file that might be ascociated
        with a given image filename. See the relevant metadatahandler classes
        for details."""
        import xml.parsers.expat
        from PYME.IO import unifiedIO

        if not self.mdh is None:
            return  #we already have metadata (probably passed in on command line)

        mdf = None
        xmlfn = os.path.splitext(filename)[0] + '.xml'
        xmlfnmc = os.path.splitext(filename)[0].split('__')[0] + '.xml'
        if os.path.exists(xmlfn):
            try:
                self.mdh = MetaDataHandler.NestedClassMDHandler(
                    MetaData.TIRFDefault)
                self.mdh.copyEntriesFrom(MetaDataHandler.XMLMDHandler(xmlfn))
                mdf = xmlfn
            except xml.parsers.expat.ExpatError:
                #fix for bug in which PYME .md was written with a .xml extension
                self.mdh = MetaDataHandler.NestedClassMDHandler(
                    MetaData.BareBones)
                self.mdh.copyEntriesFrom(
                    MetaDataHandler.SimpleMDHandler(xmlfn))
                mdf = xmlfn

        elif os.path.exists(
                xmlfnmc):  #this is a single colour channel of a pair
            self.mdh = MetaDataHandler.NestedClassMDHandler(
                MetaData.TIRFDefault)
            self.mdh.copyEntriesFrom(MetaDataHandler.XMLMDHandler(xmlfnmc))
            mdf = xmlfnmc
        else:
            self.mdh = MetaDataHandler.NestedClassMDHandler(MetaData.BareBones)

            #check for simple metadata (python code with an .md extension which
            #fills a dictionary called md)
            mdfn = os.path.splitext(filename)[0] + '.md'
            jsonfn = os.path.splitext(filename)[0] + '.json'
            if os.path.exists(mdfn):
                self.mdh.copyEntriesFrom(MetaDataHandler.SimpleMDHandler(mdfn))
                mdf = mdfn
            elif os.path.exists(jsonfn):
                import json
                with open(jsonfn, 'r') as f:
                    mdd = json.load(f)
                    self.mdh.update(mdd)
            elif filename.endswith('.lsm'):
                #read lsm metadata
                from PYME.contrib.gohlke.tifffile import TIFFfile
                tf = TIFFfile(filename)
                lsm_info = tf[0].cz_lsm_scan_information
                self.mdh['voxelsize.x'] = lsm_info['line_spacing']
                self.mdh['voxelsize.y'] = lsm_info['line_spacing']
                self.mdh['voxelsize.z'] = lsm_info['plane_spacing']

                def lsm_pop(basename, dic):
                    for k, v in dic.items():
                        if isinstance(v, list):
                            #print k, v
                            for i, l_i in enumerate(v):
                                #print i, l_i, basename
                                lsm_pop(
                                    basename + k + '.' + k[:-1] + '%i.' % i,
                                    l_i)

                        else:
                            self.mdh[basename + k] = v

                lsm_pop('LSM.', lsm_info)

            elif filename.endswith('.tif'):
                #look for OME data...
                from PYME.contrib.gohlke.tifffile import TIFFfile
                tf = TIFFfile(filename)

                if tf.is_ome:
                    try:
                        omemdh = MetaDataHandler.OMEXMLMDHandler(
                            tf.pages[0].tags['image_description'].value)

                        self.mdh.copyEntriesFrom(omemdh)
                    except IndexError:
                        pass

            elif filename.endswith('.dcimg'):  #Bewersdorf lab Biplane
                # FIXME load seriesXX.json for seriesXX_chunkXX.dcimg files more elegantly
                jsonfn = filename[:-22] + '.json'

                import json
                try:
                    mdd = json.loads(unifiedIO.read(jsonfn))
                    self.mdh.update(mdd)

                except IOError:
                    pass

            elif filename.endswith('.dbl'):  #Bewersdorf lab STED
                mdfn = filename[:-4] + '.txt'
                entrydict = {}

                try:  #try to read in extra metadata if possible
                    with unifiedIO.openFile(mdfn, 'r') as mf:
                        for line in mf:
                            s = line.split(':')
                            if len(s) == 2:
                                entrydict[s[0]] = s[1]

                except IOError:
                    pass


#                vx, vy = entrydict['Pixel size (um)'].split('x')
#                self.mdh['voxelsize.x'] = float(vx)
#                self.mdh['voxelsize.y'] = float(vy)
#                self.mdh['voxelsize.z'] = 0.2 #FIXME for stacks ...
#
#                sx, sy = entrydict['Image format'].split('x')
#                self.mdh['Camera.ROIWidth'] = int(sx)
#                self.mdh['Camera.ROIHeight'] = int(sy)
#
#                self.mdh['NumImages'] = int(entrydict['# Images'])

                with unifiedIO.openFile(filename) as df:
                    s = df.read(8)
                    Z, X, Y, T = numpy.fromstring(s, '>u2')
                    s = df.read(16)
                    depth, width, height, elapsed = numpy.fromstring(s, '<f4')

                    self.mdh['voxelsize.x'] = width / X
                    self.mdh['voxelsize.y'] = height / Y
                    self.mdh['voxelsize.z'] = depth

                    self.mdh['Camera.ROIWidth'] = X
                    self.mdh['Camera.ROIHeight'] = Y
                    self.mdh['NumImages'] = Z * T

                def _sanitise_key(key):
                    k = key.replace('#', 'Num')
                    k = k.replace('(%)', '')
                    k = k.replace('(', '')
                    k = k.replace(')', '')
                    k = k.replace('.', '')
                    k = k.replace('/', '')
                    k = k.replace('?', '')
                    k = k.replace(' ', '')
                    if not k[0].isalpha():
                        k = 's' + k
                    return k

                for k, v in entrydict.items():
                    self.mdh['STED.%s' % _sanitise_key(k)] = v

            #else: #try bioformats
            #    OMEXML = bioformats.get_omexml_metadata(filename).encode('utf8')
            #    OMEmd = MetaDataHandler.OMEXMLMDHandler(OMEXML)
            #    self.mdh.copyEntriesFrom(OMEmd)

        if self.haveGUI and not ('voxelsize.x' in self.mdh.keys()
                                 and 'voxelsize.y' in self.mdh.keys()):
            from PYME.DSView.voxSizeDialog import VoxSizeDialog

            dlg = VoxSizeDialog(None)
            dlg.ShowModal()

            self.mdh.setEntry('voxelsize.x', dlg.GetVoxX())
            self.mdh.setEntry('voxelsize.y', dlg.GetVoxY())
            self.mdh.setEntry('voxelsize.z', dlg.GetVoxZ())

        return mdf