Beispiel #1
0
    def _loadBioformats(self, filename):
        #from PYME.IO.FileUtils import readTiff
        from PYME.IO.DataSources import BioformatsDataSource

        try:
            import bioformats
        except ImportError:
            logger.exception(
                'Error importing bioformats - is the python-bioformats module installed?'
            )
            raise

        #mdfn = self.FindAndParseMetadata(filename)
        print("Bioformats:loading data")
        self.dataSource = BioformatsDataSource.DataSource(filename, None)
        self.mdh = MetaDataHandler.NestedClassMDHandler(MetaData.BareBones)

        print("Bioformats:loading metadata")
        OMEXML = bioformats.get_omexml_metadata(filename).encode('utf8')
        print("Bioformats:parsing metadata")
        OMEmd = MetaDataHandler.OMEXMLMDHandler(OMEXML)
        self.mdh.copyEntriesFrom(OMEmd)
        print("Bioformats:done")

        print(self.dataSource.shape)
        self.dataSource = BufferedDataSource.DataSource(
            self.dataSource, min(self.dataSource.getNumSlices(), 50))
        self.data = self.dataSource  #this will get replaced with a wrapped version

        print(self.data.shape)

        #from PYME.ParallelTasks.relativeFiles import getRelFilename
        self.seriesName = getRelFilename(filename)

        self.mode = 'default'
Beispiel #2
0
    def _loadh5(self, filename):
        """Load PYMEs semi-custom HDF5 image data format. Offloads all the
        hard work to the HDFDataSource class"""
        import tables
        from PYME.IO.DataSources import HDFDataSource, BGSDataSource
        from PYME.IO import tabular

        self.dataSource = HDFDataSource.DataSource(filename, None)
        #chain on a background subtraction data source, so we can easily do
        #background subtraction in the GUI the same way as in the analysis
        self.data = BGSDataSource.DataSource(
            self.dataSource)  #this will get replaced with a wrapped version

        if 'MetaData' in self.dataSource.h5File.root:  #should be true the whole time
            self.mdh = MetaData.TIRFDefault
            self.mdh.copyEntriesFrom(
                MetaDataHandler.HDFMDHandler(self.dataSource.h5File))
        else:
            self.mdh = MetaData.TIRFDefault
            import wx
            wx.MessageBox(
                "Carrying on with defaults - no gaurantees it'll work well",
                'ERROR: No metadata found in file ...', wx.OK)
            print(
                "ERROR: No metadata fond in file ... Carrying on with defaults - no gaurantees it'll work well"
            )

        #attempt to estimate any missing parameters from the data itself
        try:
            MetaData.fillInBlanks(self.mdh, self.dataSource)
        except:
            logger.exception('Error attempting to populate missing metadata')

        #calculate the name to use when we do batch analysis on this
        #from PYME.IO.FileUtils.nameUtils import getRelFilename
        self.seriesName = getRelFilename(filename)

        #try and find a previously performed analysis
        fns = filename.split(os.path.sep)
        cand = os.path.sep.join(fns[:-2] + [
            'analysis',
        ] + fns[-2:]) + 'r'
        print(cand)
        if False:  #os.path.exists(cand):
            h5Results = tables.open_file(cand)

            if 'FitResults' in dir(h5Results.root):
                self.fitResults = h5Results.root.FitResults[:]
                self.resultsSource = tabular.H5RSource(h5Results)

                self.resultsMdh = MetaData.TIRFDefault
                self.resultsMdh.copyEntriesFrom(
                    MetaDataHandler.HDFMDHandler(h5Results))

        self.events = self.dataSource.getEvents()

        self.mode = 'LM'
Beispiel #3
0
    def _loadNPY(self, filename):
        """Load numpy .npy data.
        
       
        """
        from PYME.IO import unifiedIO
        mdfn = self._findAndParseMetadata(filename)

        with unifiedIO.local_or_temp_filename(filename) as fn:
            self.data = numpy.load(fn)

        #from PYME.ParallelTasks.relativeFiles import getRelFilename
        self.seriesName = getRelFilename(filename)

        self.mode = 'default'
Beispiel #4
0
    def _loadImageSeries(self, filename):
        #from PYME.IO.FileUtils import readTiff
        from PYME.IO.DataSources import ImageSeriesDataSource

        self.dataSource = ImageSeriesDataSource.DataSource(filename, None)
        self.dataSource = BufferedDataSource.DataSource(
            self.dataSource, min(self.dataSource.getNumSlices(), 50))
        self.data = self.dataSource  #this will get replaced with a wrapped version
        #self.data = readTiff.read3DTiff(filename)

        self._findAndParseMetadata(filename)

        #from PYME.ParallelTasks.relativeFiles import getRelFilename
        self.seriesName = getRelFilename(filename)

        self.mode = 'default'
Beispiel #5
0
    def _loadDCIMG(self, filename):
        from PYME.IO.DataSources import DcimgDataSource, MultiviewDataSource

        self._findAndParseMetadata(filename)

        self.dataSource = DcimgDataSource.DataSource(filename)

        if 'Multiview.NumROIs' in self.mdh.keys():
            self.dataSource = MultiviewDataSource.DataSource(
                self.dataSource, self.mdh)

        self.data = self.dataSource  #this will get replaced with a wrapped version

        self.seriesName = getRelFilename(filename)

        self.mode = 'default'
Beispiel #6
0
    def _loadPZF(self, filename):
        """Load .pzf data.


        """
        from PYME.IO import unifiedIO
        from PYME.IO import PZFFormat
        mdfn = self._findAndParseMetadata(filename)

        with unifiedIO.openFile(filename) as f:
            self.data = PZFFormat.loads(f.read())[0]

        #from PYME.ParallelTasks.relativeFiles import getRelFilename
        self.seriesName = getRelFilename(filename)

        self.mode = 'default'
Beispiel #7
0
    def _loadPSF(self, filename):
        """Load PYME .psf data.
        
        .psf files consist of a tuple containing the data and the voxelsize.
        """
        from PYME.IO import unifiedIO
        with unifiedIO.local_or_temp_filename(filename) as fn:
            self.data, vox = numpy.load(fn)
        self.mdh = MetaDataHandler.NestedClassMDHandler(MetaData.ConfocDefault)

        self.mdh.setEntry('voxelsize.x', vox.x)
        self.mdh.setEntry('voxelsize.y', vox.y)
        self.mdh.setEntry('voxelsize.z', vox.z)

        #from PYME.ParallelTasks.relativeFiles import getRelFilename
        self.seriesName = getRelFilename(filename)

        self.mode = 'psf'
Beispiel #8
0
    def _loadDBL(self, filename):
        """Load Bewersdorf custom STED data.
       
        """
        mdfn = self._findAndParseMetadata(filename)

        self.data = numpy.memmap(filename,
                                 dtype='<f4',
                                 mode='r',
                                 offset=128,
                                 shape=(self.mdh['Camera.ROIWidth'],
                                        self.mdh['Camera.ROIHeight'],
                                        self.mdh['NumImages']),
                                 order='F')

        #from PYME.ParallelTasks.relativeFiles import getRelFilename
        self.seriesName = getRelFilename(filename)

        self.mode = 'default'
Beispiel #9
0
    def StartSpooling(self,
                      fn=None,
                      stack=False,
                      compLevel=2,
                      zDwellTime=None,
                      doPreflightCheck=True,
                      maxFrames=sys.maxsize,
                      pzf_compression_settings=None,
                      cluster_h5=False):
        """Start spooling
        """

        # these settings were managed by the GUI, but are now managed by the controller, still allow them to be passed in,
        # but default to using our internal values
        compLevel = self.hdf_compression_level if compLevel is None else compLevel
        pzf_compression_settings = self.pzf_compression_settings if pzf_compression_settings is None else pzf_compression_settings
        stack = self.z_stepped if stack is None else stack
        cluster_h5 = self.cluster_h5 if cluster_h5 is None else cluster_h5
        fn = self.seriesName if fn in ['', None] else fn
        zDwellTime = self.z_dwell if zDwellTime is None else zDwellTime

        #make directories as needed
        if not (self.spoolType == 'Cluster'):
            dirname = os.path.split(self._get_queue_name(fn))[0]
            if not os.path.exists(dirname):
                os.makedirs(dirname)

        if self._checkOutputExists(
                fn):  #check to see if data with the same name exists
            self.seriesCounter += 1
            self.seriesName = self._GenSeriesName()

            raise IOError('Output file already exists')

        if stack:
            protocol = self.protocolZ
            if not zDwellTime is None:
                protocol.dwellTime = zDwellTime
            print(protocol)
        else:
            protocol = self.protocol

        if doPreflightCheck and not preflight.ShowPreflightResults(
                None, self.protocol.PreflightCheck()):
            return  #bail if we failed the pre flight check, and the user didn't choose to continue

        #fix timing when using fake camera
        if self.scope.cam.__class__.__name__ == 'FakeCamera':
            fakeCycleTime = self.scope.cam.GetIntegTime()
        else:
            fakeCycleTime = None

        frameShape = (self.scope.cam.GetPicWidth(),
                      self.scope.cam.GetPicHeight())

        if self.spoolType == 'Queue':
            from PYME.Acquire import QueueSpooler
            self.queueName = getRelFilename(self._get_queue_name(fn))
            self.spooler = QueueSpooler.Spooler(
                self.queueName,
                self.scope.frameWrangler.onFrame,
                frameShape=frameShape,
                protocol=protocol,
                guiUpdateCallback=self._ProgressUpate,
                complevel=compLevel,
                fakeCamCycleTime=fakeCycleTime,
                maxFrames=maxFrames)
        elif self.spoolType == 'Cluster':
            from PYME.Acquire import HTTPSpooler
            self.queueName = self._get_queue_name(fn, pcs=(not cluster_h5))
            self.spooler = HTTPSpooler.Spooler(
                self.queueName,
                self.scope.frameWrangler.onFrame,
                frameShape=frameShape,
                protocol=protocol,
                guiUpdateCallback=self._ProgressUpate,
                complevel=compLevel,
                fakeCamCycleTime=fakeCycleTime,
                maxFrames=maxFrames,
                compressionSettings=pzf_compression_settings,
                aggregate_h5=cluster_h5)

        else:
            from PYME.Acquire import HDFSpooler
            self.spooler = HDFSpooler.Spooler(
                self._get_queue_name(fn),
                self.scope.frameWrangler.onFrame,
                frameShape=frameShape,
                protocol=protocol,
                guiUpdateCallback=self._ProgressUpate,
                complevel=compLevel,
                fakeCamCycleTime=fakeCycleTime,
                maxFrames=maxFrames)

        #TODO - sample info is probably better handled with a metadata hook
        #if sampInf:
        #    try:
        #        sampleInformation.getSampleData(self, self.spooler.md)
        #    except:
        #        #the connection to the database will timeout if not present
        #        #FIXME: catch the right exception (or delegate handling to sampleInformation module)
        #        pass

        self.spooler.onSpoolStop.connect(self.SpoolStopped)
        self.spooler.StartSpool()

        self.onSpoolStart.send(self)

        #return a function which can be called to indicate if we are done
        return lambda: not self.spooler.spoolOn
    def start_spooling(self, fn=None, settings={}, preflight_mode='interactive'):
        """

        Parameters
        ----------
        fn : str, optional
            fn can be hardcoded here, otherwise differs to the seriesName
            property which will create one if need-be.
        settings : dict
            keys should be `SpoolController` attributes or properties with
            setters. Not all keys must be present, and example keys include:
                method : str
                    One of 'File', 'Cluster', or 'Queue'(py2 only)
                hdf_compression_level: int
                    zlib compression level that pytables should use (spool to
                    file and queue)
                z_stepped : bool
                    toggle z-stepping during acquisition
                z_dwell : int
                    number of frames to acquire at each z level (predicated on
                    `SpoolController.z_stepped` being True)
                cluster_h5 : bool
                    Toggle spooling to single h5 file on cluster rather than pzf
                    file per frame. Only applicable to 'Cluster' `method` and
                    preferred for PYMEClusterOfOne.
                pzf_compression_settings : dict
                    Compression settings relevant for 'Cluster' `method` if
                    `cluster_h5` is False. See HTTPSpooler.defaultCompSettings.
                protocol_name : str
                    Note that passing the protocol name will force a (re)load of
                    the protocol file (even if it is already selected).
                max_frames : int, optional
                    point at which to end the series automatically, by default
                    sys.maxsize
                subdirectory : str, optional
                    Directory within current set directory to spool this series. The
                    directory will be created if it doesn't already exist.
                extra_metadata : dict, optional
                    metadata to supplement this series for entries known prior to
                    acquisition which do not have handlers to hook start metadata
        preflight_mode : str (default='interactive')
            What to do when the preflight check fails. Options are 'interactive', 'warn', 'abort' and 'skip' which will
            display a dialog and prompt the user, log a warning and continue, and log an error and abort, or skip completely.
            The former is suitable for interactive acquisition, whereas one of the latter modes is likely better for automated spooling
            via the action manager.
        """
        # these settings were managed by the GUI, but are now managed by the 
        # controller, still allow them to be passed in, but default to internals
        
        
        fn = self.seriesName if fn in ['', None] else fn
        stack = settings.get('z_stepped', self.z_stepped)
        compLevel = settings.get('hdf_compression_level', self.hdf_compression_level)
        pzf_compression_settings = settings.get('pzf_compression_settings', self.pzf_compression_settings)
        cluster_h5 = settings.get('cluster_h5', self.cluster_h5)
        maxFrames = settings.get('max_frames', sys.maxsize)
        stack_settings = settings.get('stack_settings', None)
        
        
        # try stack settings for z_dwell, then aq settings.
        # precedence is settings > stack_settings > self.z_dwell
        # The reasoning for allowing the dwell time to be set in either the spooling or stack settings is to allow
        # API users to choose which is most coherent for their use case (it would seem logical to put dwell time with
        # the other stack settings, but this becomes problematic when sharing stack settings across modalities - e.g.
        # PALM/STORM and widefield stacks which are likely to share most of the stack settings but have greatly different
        # z dwell times). PYMEAcquire specifies it in the spooling/series settings by default to allow shared usage
        # between modalities.
        if stack_settings:
            if isinstance(stack_settings, dict):
                z_dwell = stack_settings.get('DwellFrames', self.z_dwell)
            else:
                # have a StackSettings object
                # TODO - fix this to be a bit more sane and not use private attributes etc ...
                z_dwell = stack_settings._dwell_frames
                # z_dwell defaults to -1  (with a meaning of ignore) in StackSettings objects if not value is not
                # explicitly provided. In this case, use our internal value instead. The reason for the 'ignore'
                # special value is to allow the same StackSettings object to be used for widefield stacks and
                # localization series (where sharing everything except dwell time makes sense).
                if z_dwell < 1:
                    z_dwell = self.z_dwell
        else:
            z_dwell = self.z_dwell
        
        z_dwell = settings.get('z_dwell', z_dwell)
        
        if (stack_settings is not None) and (not isinstance(stack_settings, stackSettings.StackSettings)):
            # let us pass stack settings as a dict, constructing a StackSettings instance as needed
            stack_settings = stackSettings.StackSettings(**dict(stack_settings))
        
        protocol_name = settings.get('protocol_name')
        if protocol_name is None:
            protocol, protocol_z = self.protocol, self.protocolZ
        else:
            pmod = prot.get_protocol(protocol_name)
            protocol, protocol_z = pmod.PROTOCOL, pmod.PROTOCOL_STACK

        subdirectory  = settings.get('subdirectory', None)
        # make directories as needed, makedirs(dir, exist_ok=True) once py2 support is dropped
        if (self.spoolType != 'Cluster') and (not os.path.exists(self.get_dirname(subdirectory))):
                os.makedirs(self.get_dirname(subdirectory))

        if self._checkOutputExists(fn): #check to see if data with the same name exists
            self.seriesCounter +=1
            self.seriesName = self._GenSeriesName()
            
            raise IOError('A series with the same name already exists')

        if stack:
            protocol = protocol_z
            protocol.dwellTime = z_dwell
            #print(protocol)
        else:
            protocol = protocol

        if (preflight_mode != 'skip') and not preflight.ShowPreflightResults(protocol.PreflightCheck(), preflight_mode):
            return #bail if we failed the pre flight check, and the user didn't choose to continue
            
          
        #fix timing when using fake camera
        if self.scope.cam.__class__.__name__ == 'FakeCamera':
            fakeCycleTime = self.scope.cam.GetIntegTime()
        else:
            fakeCycleTime = None
            
        frameShape = (self.scope.cam.GetPicWidth(), self.scope.cam.GetPicHeight())
        
        if self.spoolType == 'Queue':
            from PYME.Acquire import QueueSpooler
            self.queueName = getRelFilename(self._get_queue_name(fn, subdirectory=subdirectory))
            self.spooler = QueueSpooler.Spooler(self.queueName, self.scope.frameWrangler.onFrame, 
                                                frameShape = frameShape, protocol=protocol, 
                                                guiUpdateCallback=self._ProgressUpate, complevel=compLevel, 
                                                fakeCamCycleTime=fakeCycleTime, maxFrames=maxFrames, stack_settings=stack_settings)
        elif self.spoolType == 'Cluster':
            from PYME.Acquire import HTTPSpooler
            self.queueName = self._get_queue_name(fn, pcs=(not cluster_h5), 
                                                  subdirectory=subdirectory)
            self.spooler = HTTPSpooler.Spooler(self.queueName, self.scope.frameWrangler.onFrame,
                                               frameShape = frameShape, protocol=protocol,
                                               guiUpdateCallback=self._ProgressUpate,
                                               fakeCamCycleTime=fakeCycleTime, maxFrames=maxFrames,
                                               compressionSettings=pzf_compression_settings, aggregate_h5=cluster_h5, stack_settings=stack_settings)
           
        else:
            from PYME.Acquire import HDFSpooler
            self.spooler = HDFSpooler.Spooler(self._get_queue_name(fn, subdirectory=subdirectory),
                                              self.scope.frameWrangler.onFrame,
                                              frameShape = frameShape, protocol=protocol, 
                                              guiUpdateCallback=self._ProgressUpate, complevel=compLevel, 
                                              fakeCamCycleTime=fakeCycleTime, maxFrames=maxFrames, stack_settings=stack_settings)

        #TODO - sample info is probably better handled with a metadata hook
        #if sampInf:
        #    try:
        #        sampleInformation.getSampleData(self, self.spooler.md)
        #    except:
        #        #the connection to the database will timeout if not present
        #        #FIXME: catch the right exception (or delegate handling to sampleInformation module)
        #        pass
        extra_metadata = settings.get('extra_metadata')
        if extra_metadata is not None:
            self.spooler.md.mergeEntriesFrom(MetaDataHandler.DictMDHandler(extra_metadata))

        # stop the frameWrangler before we start spooling
        # this serves to ensure that a) we don't accidentally spool frames which were in the camera buffer when we hit start
        # and b) we get a nice clean timestamp for when the actual frames start (after any protocol init tasks)
        # it might also slightly improve performance.
        self.scope.frameWrangler.stop()
        
        try:
            self.spooler.onSpoolStop.connect(self.SpoolStopped)
            self.spooler.StartSpool()
        except:
            self.spooler.abort()
            raise

        # restart frame wrangler
        self.scope.frameWrangler.Prepare()
        self.scope.frameWrangler.start()
        
        self.onSpoolStart.send(self)
        
        #return a function which can be called to indicate if we are done
        return lambda : self.spooler.spool_complete
Beispiel #11
0
    def _loadTiff(self, filename):
        #from PYME.IO.FileUtils import readTiff
        from PYME.IO.DataSources import TiffDataSource, BGSDataSource

        mdfn = self._findAndParseMetadata(filename)

        self.dataSource = TiffDataSource.DataSource(filename, None)
        print(self.dataSource.shape)
        self.dataSource = BufferedDataSource.DataSource(
            self.dataSource, min(self.dataSource.getNumSlices(), 50))
        self.data = self.dataSource  #this will get replaced with a wrapped version

        if self.dataSource.getNumSlices(
        ) > 500:  #this is likely to be a localization data set
            #background subtraction in the GUI the same way as in the analysis
            self.data = BGSDataSource.DataSource(
                self.dataSource
            )  #this will get replaced with a wrapped version

        print(self.data.shape)

        #if we have a multi channel data set, try and pull in all the channels
        if 'ChannelFiles' in self.mdh.getEntryNames() and not len(
                self.mdh['ChannelFiles']) == self.data.shape[3]:
            try:
                from PYME.IO.dataWrap import ListWrap
                #pull in all channels

                chans = []

                for cf in self.mdh.getEntry('ChannelFiles'):
                    cfn = os.path.join(os.path.split(filename)[0], cf)

                    ds = TiffDataSource.DataSource(cfn, None)
                    ds = BufferedDataSource.DataSource(
                        ds, min(ds.getNumSlices(), 50))

                    chans.append(ds)

                self.data = ListWrap(
                    chans)  #this will get replaced with a wrapped version

                self.filename = mdfn
            except:
                pass

        elif 'ChannelNames' in self.mdh.getEntryNames() and len(
                self.mdh['ChannelNames']) == self.data.getNumSlices():
            from PYME.IO.dataWrap import ListWrap
            chans = [
                numpy.atleast_3d(self.data.getSlice(i))
                for i in range(len(self.mdh['ChannelNames']))
            ]
            self.data = ListWrap(chans)
        elif filename.endswith(
                '.lsm') and 'LSM.images_number_channels' in self.mdh.keys(
                ) and self.mdh['LSM.images_number_channels'] > 1:
            from PYME.IO.dataWrap import ListWrap
            nChans = self.mdh['LSM.images_number_channels']

            chans = []

            for n in range(nChans):
                ds = TiffDataSource.DataSource(filename, None, n)
                ds = BufferedDataSource.DataSource(ds,
                                                   min(ds.getNumSlices(), 50))

                chans.append(ds)

            self.data = ListWrap(chans)

        #from PYME.ParallelTasks.relativeFiles import getRelFilename
        self.seriesName = getRelFilename(filename)

        self.mode = 'default'

        if self.mdh.getOrDefault('ImageType', '') == 'PSF':
            self.mode = 'psf'
        elif self.dataSource.getNumSlices() > 5000:
            #likely to want to localize this
            self.mode = 'LM'
Beispiel #12
0
    def _loadBioformats(self, filename):
        try:
            import bioformats
        except ImportError:
            logger.exception(
                'Error importing bioformats - is the python-bioformats module installed?'
            )
            raise

        from PYME.IO.DataSources import BioformatsDataSource
        series_num = None

        if '?' in filename:
            #we have a query string to pick the series
            from six.moves import urllib
            filename, query = filename.split('?')

            try:
                series_num = int(urllib.parse.parse_qs(query)['series'][0])
            except KeyError:
                pass

        #mdfn = self.FindAndParseMetadata(filename)
        print("Bioformats:loading data")
        bioformats_file = BioformatsDataSource.BioformatsFile(filename)
        if series_num is None and bioformats_file.series_count > 1:
            print('File has multiple series, need to pick one.')

            if self.haveGUI:
                import wx
                dlg = wx.SingleChoiceDialog(None, 'Series', 'Select a series',
                                            bioformats_file.series_names)
                if dlg.ShowModal() == wx.ID_OK:
                    series_num = dlg.GetSelection()
            else:
                logger.warning('No GUI, using 0th series.')

        self.dataSource = BioformatsDataSource.DataSource(bioformats_file,
                                                          series=series_num)
        self.mdh = MetaDataHandler.NestedClassMDHandler(MetaData.BareBones)

        # NOTE: We are triple-loading metadata. BioformatsDataSource.BioformatsFile.rdr has metadata (hard to access),
        # BioformatsDataSource.BioformatsFile has metadata, and now we are loading the same metadata again here.
        print("Bioformats:loading metadata")
        OMEXML = bioformats.get_omexml_metadata(filename).encode('utf8')
        print("Bioformats:parsing metadata")
        OMEmd = MetaDataHandler.OMEXMLMDHandler(OMEXML)
        self.mdh.copyEntriesFrom(OMEmd)
        print("Bioformats:done")

        #fix voxelsizes if not specified in OME metadata
        if self.haveGUI and ((self.mdh['voxelsize.x'] < 0) or
                             (self.mdh['voxelsize.y'] < 0)):
            from PYME.DSView.voxSizeDialog import VoxSizeDialog

            dlg = VoxSizeDialog(None)
            dlg.ShowModal()

            self.mdh.setEntry('voxelsize.x', dlg.GetVoxX())
            self.mdh.setEntry('voxelsize.y', dlg.GetVoxY())
            self.mdh.setEntry('voxelsize.z', dlg.GetVoxZ())

            dlg.Destroy()

        print(self.dataSource.shape)
        self.dataSource = BufferedDataSource.DataSource(
            self.dataSource, min(self.dataSource.getNumSlices(), 50))
        self.data = self.dataSource  #this will get replaced with a wrapped version

        print(self.data.shape)

        #from PYME.ParallelTasks.relativeFiles import getRelFilename
        self.seriesName = getRelFilename(filename)

        self.mode = 'default'