Beispiel #1
0
class PyFaceProgress(IProgress):
    def initialize(self, title, max_index):
        self.progress = ProgressDialog(title="Characterizing %d peaks on current image"%max_index, 
                          max=int(max_index), show_time=True, can_cancel=False)
        self.progress.open()
        
    def increment(self):
        self.current_idx+=1
        self.progress.update(self.current_idx)
Beispiel #2
0
	def open_project(self, path):
		with open(path, 'rb') as fp:
			if fp.read(15) != 'Spacetime\nJSON\n':
				raise ValueError('not a valid Spacetime project file')
			data = json.load(fp)

		progress = ProgressDialog(title="Open", message="Loading project", max=len(data)+1, can_cancel=False, parent=self.context.uiparent)
		progress.open()
		with self.context.canvas.hold_delayed():
			self.tabs[0].from_serialized(data.pop(0)[1])
			tabs = [self.tabs[0]]
			progress.update(1)
			# FIXME: check version number and emit warning
			for p, (id, props) in enumerate(data):
				try:
					tab = self.get_new_tab(self.moduleloader.get_class_by_id(id))
					tab.from_serialized(props)
					tabs.append(tab)
				except KeyError:
					support.Message.show(title='Warning', message='Warning: incompatible project file', desc='Ignoring unknown graph id "{0}". Project might not be completely functional.'.format(id))
				progress.update(2+p)
			self.tabs = tabs
			self.project_path = path
			wx.CallAfter(self.clear_project_modified)
			wx.CallAfter(lambda: (progress.update(progress.max), progress.close()))
Beispiel #3
0
 def locate_peaks(self):
     peaks=[]
     progress = ProgressDialog(title="Peak finder progress", message="Finding peaks on %s images"%self.numfiles, max=self.numfiles, show_time=True, can_cancel=False)
     progress.open()
     for idx in xrange(self.numfiles):
         self.controller.set_active_index(idx)
         self.data = self.controller.get_active_image()[:]
         self.CC = cv_funcs.xcorr(self.template, self.data)
         # peak finder needs peaks greater than 1.  Multiply by 255 to scale them.
         pks=pc.two_dim_findpeaks(self.CC*255, peak_width=self.peak_width, medfilt_radius=None)
         pks[:,2]=pks[:,2]/255.
         peaks.append(pks)
         progress.update(idx+1)
     #ipdb.set_trace()
     self.peaks=peaks
     self.redraw_plots()
Beispiel #4
0
    def _get_misc_data(self):
        if not self.raw:
            return
        if self.show_gui:
            # progress dialog with indefinite progress bar
            prog = ProgressDialog(title="Loading SQD data...",
                                  message="Loading stim channel data from SQD "
                                  "file ...")
            prog.open()
            prog.update(0)
        else:
            prog = None

        try:
            data, times = self.raw[self.misc_chs]
        except Exception as err:
            if self.show_gui:
                error(None, "Error reading SQD data file: %s (Check the "
                      "terminal output for details)" % str(err),
                      "Error Reading SQD File")
            raise
        finally:
            if self.show_gui:
                prog.close()
        return data
Beispiel #5
0
	def do_reload(self, info):
		app = info.ui.context['object']
		progress = ProgressDialog(title="Reload", message="Reloading data", max=len(app.tabs)+1, can_cancel=True, parent=app.context.uiparent)
		progress.open()
		with app.drawmgr.hold():
			for i, tab in enumerate(app.tabs):
				tab.reload = True
				cont, skip = progress.update(i+1)
				if not cont or skip:
					break
		progress.update(progress.max)
		progress.close()
Beispiel #6
0
    def _create_fsaverage_fired(self):
        # progress dialog with indefinite progress bar
        title = "Creating FsAverage ..."
        message = "Copying fsaverage files ..."
        prog = ProgressDialog(title=title, message=message)
        prog.open()
        prog.update(0)

        try:
            self.model.create_fsaverage()
        except Exception as err:
            error(None, str(err), "Error Creating FsAverage")
            raise
        finally:
            prog.close()
Beispiel #7
0
 def locate_peaks(self):
     from hyperspy import peak_char as pc
     peaks=[]
     """from hyperspy.misc.progressbar import ProgressBar, \
         Percentage, RotatingMarker, ETA, Bar
     widgets = ['Locating peaks: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
                ' ', ETA()]
     pbar = ProgressBar(widgets=widgets, maxval=100).start()"""
     progress = ProgressDialog(title="Peak finder progress", message="Finding peaks on %s images"%self.numfiles, max=self.numfiles, show_time=True, can_cancel=False)
     progress.open()
     for idx in xrange(self.numfiles):
         #pbar.update(float(idx)/self.numfiles*100)
         self.CC = cv_funcs.xcorr(self.sig.data[self.tmp_img_idx,
                                                self.top:self.top+self.tmp_size,
                                                self.left:self.left+self.tmp_size],
                                            self.sig.data[idx,:,:])
         # peak finder needs peaks greater than 1.  Multiply by 255 to scale them.
         pks=pc.two_dim_findpeaks(self.CC*255, peak_width=self.peak_width, medfilt_radius=None)
         pks[:,2]=pks[:,2]/255.
         peaks.append(pks)
         progress.update(idx+1)
     #pbar.finish()
     self.peaks=peaks
Beispiel #8
0
	def retime_files(self):
		if not self.gui_active:
			return
		chunksize = self.parent.probe_progress_chunksize
		chunkcount = int(math.ceil(float(len(self.files)) / chunksize))
		if chunkcount > 1:
			progress = ProgressDialog(title="Images", message="Loading images", max=chunkcount, can_cancel=False, parent=self.context.uiparent)
		else:
			progress = gui.support.DummyProgressDialog()

		progress.open()
		with cache.Cache('image_metadata') as c:
			for i, f in enumerate(self.files):
				if i % chunksize == 0:
					progress.update(i / chunksize)
				f.timestamp, f.exposure = self.get_timestamp(c, i, f.path)
			
		self.files = self.sort_files(self.files)
		progress.update(progress.max)
		progress.close()
Beispiel #9
0
	def get_files(self, reload=False):
		chunksize = self.parent.probe_progress_chunksize

		if reload or not self.files:
			filenames = glob.glob(os.path.join(self.directory, self.pattern))
			chunkcount = int(math.ceil(float(len(filenames)) / chunksize))
			if chunkcount > 1:
				progress = ProgressDialog(title="Images", message="Loading images", max=chunkcount, can_cancel=False, parent=self.parent.context.uiparent)
			else:
				progress = gui.support.DummyProgressDialog()

			files = []
			with cache.Cache('image_metadata') as c:
				progress.open()
				for i in range(chunkcount):
					eiter = enumerate(filenames[chunksize*i:chunksize*(i+1)], chunksize*i)
					files.extend(ImageFile.probefile(files, c, i, fn, self.get_timestamp) for (i, fn) in eiter)
					progress.update(i)
				progress.update(progress.max)
				progress.close()
		self.files = self.sort_files(files)
Beispiel #10
0
    def _create_fsaverage_fired(self):
        # progress dialog with indefinite progress bar
        title = "Creating FsAverage ..."
        message = "Copying fsaverage files ..."
        prog = ProgressDialog(title=title, message=message)
        prog.open()
        prog.update(0)

        try:
            self.model.create_fsaverage()
        except Exception as err:
            error(None, str(err), "Error Creating FsAverage")
            raise
        finally:
            prog.close()
Beispiel #11
0
def task_func(t):
    progress = ProgressDialog(title="progress", message="counting to %d"%t, max=t, show_time=True, can_cancel=True)
    progress.open()

    for i in range(0,t+1):
        time.sleep(1)
        print(i)
        (cont, skip) = progress.update(i)
        if not cont or skip:
            break

    progress.update(t)
def task_func(t):
    progress = ProgressDialog(title="progress",
                              message="counting to %d" % t,
                              max=t,
                              show_time=True,
                              can_cancel=True)
    progress.open()

    for i in range(0, t + 1):
        time.sleep(1)
        print(i)
        (cont, skip) = progress.update(i)
        if not cont or skip:
            break

    progress.update(t)
Beispiel #13
0
    def _get_misc_data(self):
        if not self.raw:
            return
        if self.show_gui:
            # progress dialog with indefinite progress bar
            prog = ProgressDialog(title="Loading SQD data...",
                                  message="Loading stim channel data from SQD "
                                  "file ...")
            prog.open()
            prog.update(0)
        else:
            prog = None

        try:
            data, times = self.raw[self.misc_chs]
        except Exception as err:
            if self.show_gui:
                error(None, str(err), "Error Creating FsAverage")
            raise
        finally:
            if self.show_gui:
                prog.close()
        return data
Beispiel #14
0
 def run_model(self, modeldata):
     self.simulation = True
     self.md = modeldata
     self.c_stock = numpy.empty(shape=(0, 10), dtype=numpy.float32)
     self.c_change = numpy.empty(shape=(0, 10), dtype=numpy.float32)
     self.co2_yield = numpy.empty(shape=(0, 3), dtype=numpy.float32)
     self.timemap = defaultdict(list)
     self.area_timemap = defaultdict(list)
     samplesize = self.md.sample_size
     msg = "Simulating %d samples for %d timesteps" % (
         samplesize, self.md.simulation_length)
     progress = ProgressDialog(title="Simulation",
                               message=msg,
                               max=samplesize,
                               show_time=True,
                               can_cancel=True)
     progress.open()
     timesteps = self.md.simulation_length
     self.timestep_length = self.md.timestep_length
     self.ml_run = True
     self.infall = {}
     self.initial_mode = self.md.initial_mode
     if self.initial_mode == 'steady state':
         self.initial_def = self.md.steady_state
     else:
         self.initial_def = self.md.initial_litter
     timemsg = None
     for j in range(samplesize):
         (cont, skip) = progress.update(j)
         if not cont or skip:
             break
         self.draw = True
         self.curr_yr_ind = 0
         self.curr_month_ind = 0
         for k in range(timesteps):
             self._predict_timestep(j, k)
         self.ml_run = False
     self._fill_moment_results()
     progress.update(samplesize)
     if timemsg is not None:
         error(timemsg, title='Error handling timesteps', buttons=['OK'])
     return self.c_stock, self.c_change, self.co2_yield
Beispiel #15
0
    def _render_animation_fired(self):
        self.stop = True
        n_frames_render = self.render_to_frame - self.render_from_frame
        # prepare the render window
        renwin = self._figure.scene.render_window
        aa_frames = renwin.aa_frames
        renwin.aa_frames = 8
        renwin.alpha_bit_planes = 1
        # turn on off screen rendering
        #renwin.off_screen_rendering = True
        # set size of window
        if self.fix_image_size:
            orig_size = renwin.size
            renwin.size = self.image_size
        # render the frames
        progress = ProgressDialog(title="Rendering", max=n_frames_render, 
                                  show_time=True, can_cancel=True)
        progress.open()
        self.is_rendering_animation = True
        for frame in range(self.render_from_frame, self.render_to_frame + 1):
            # move animation to desired frame, this will also render the scene
            self.current_frame = frame
            # prepare window to image writer
            render = tvtk.WindowToImageFilter(input=renwin, magnification=1)#, input_buffer_type='rgba')
            if not self.fix_image_size:
                render.magnification = self.magnification
            exporter = tvtk.PNGWriter(file_name=path.join(self.render_directory, self.render_name_pattern % frame))

            configure_input(exporter,render)
            exporter.write()
            do_continue, skip = progress.update(frame - self.render_from_frame)
            if not do_continue:
                break
        # reset the render window to old values
        renwin.aa_frames = aa_frames
        if self.fix_image_size:
            renwin.size = orig_size
        #renwin.off_screen_rendering = False
        self.is_rendering_animation = False
        progress.close()
Beispiel #16
0
    def _get_misc_data(self):
        if not self.raw:
            return
        if self.show_gui:
            # progress dialog with indefinite progress bar
            prog = ProgressDialog(title="Loading SQD data...",
                                  message="Loading stim channel data from SQD "
                                  "file ...")
            prog.open()
            prog.update(0)
        else:
            prog = None

        try:
            data, times = self.raw[self.misc_chs]
        except Exception as err:
            if self.show_gui:
                error(None, str(err), "Error Creating FsAverage")
            raise err
        finally:
            if self.show_gui:
                prog.close()
        return data
Beispiel #17
0
 def run_model(self, modeldata):
     self.simulation = True
     self.md = modeldata
     self.c_stock = numpy.empty(shape=(0,10), dtype=numpy.float32)
     self.c_change = numpy.empty(shape=(0,10), dtype=numpy.float32)
     self.co2_yield = numpy.empty(shape=(0,3), dtype=numpy.float32)
     self.timemap = defaultdict(list)
     self.area_timemap = defaultdict(list)
     samplesize = self.md.sample_size
     msg = "Simulating %d samples for %d timesteps" % (samplesize,
                                                 self.md.simulation_length)
     progress = ProgressDialog(title="Simulation", message=msg,
                               max=samplesize, show_time=True,
                               can_cancel=True)
     progress.open()
     timesteps = self.md.simulation_length
     self.timestep_length = self.md.timestep_length
     self.ml_run = True
     self.infall = {}
     self.initial_mode = self.md.initial_mode
     if self.initial_mode=='steady state':
         self.initial_def = self.md.steady_state
     else:
         self.initial_def = self.md.initial_litter
     timemsg = None
     for j in range(samplesize):
         (cont, skip) = progress.update(j)
         if not cont or skip:
             break
         self.draw = True
         self.curr_yr_ind = 0
         self.curr_month_ind = 0
         for k in range(timesteps):
             self._predict_timestep(j, k)
         self.ml_run = False
     self._fill_moment_results()
     progress.update(samplesize)
     if timemsg is not None:
         error(timemsg, title='Error handling timesteps',
               buttons=['OK'])
     return self.c_stock, self.c_change, self.co2_yield
Beispiel #18
0
    def __getitem__(self, factor_slices):
        '''
        Access to the output_array using factor level indices.

        This method enables access to the values using the syntax

        output_sub_array = pstudy[ f1_level_idx, f2_level_idx, ... ]

        Here the standard numpy indices including slice and elipses can be used. 
        '''

        # map the slices within the levels2run array
        # to the indices of the expanded input_table
        #
        n_sims = self._get_slice_n_sims(factor_slices)
        progress = ProgressDialog(title='simulation progress',
                                  message='running %d simulations' % n_sims,
                                  max=n_sims,
                                  show_time=True,
                                  can_cancel=True)
        progress.open()

        run_idx_list = self.levels2run[factor_slices].flatten()
        runs_levels = self.input_table[run_idx_list]
        runs_levels_idx = self.run2levels[run_idx_list]

        # start the computation for each of the runs
        #
        sim_idx = 0
        for run_levels, run_levels_idx in zip(runs_levels, runs_levels_idx):

            # check to see if this run is already in the cache
            #
            outputs = self._output_cache.get(tuple(run_levels), None)
            if outputs == None:

                print 'new simulation', sim_idx

                # Set the factor values of the run in
                # the simulation model
                #
                for factor_name, factor, level in zip(self.factor_names,
                                                      self.factor_list,
                                                      run_levels):
                    level = factor.get_level_value(level)
                    setattr(self.sim_model, factor_name, level)

                # Perform the simulation
                #
                outputs = self.sim_model.peval()

                self.output_array[tuple(run_levels_idx)] = outputs
                self._output_cache[tuple(run_levels)] = outputs

            else:
                print 'cached simulation', sim_idx

            # let the progress bar interfere
            #
            (cont, skip) = progress.update(sim_idx)
            if not cont or skip:
                break
            sim_idx += 1

        progress.update(n_sims)
        return self.output_array[factor_slices]
Beispiel #19
0
    def read_data(self):
        """ Obtain x_locations, y_locations, data_locations, traces in a context

            Returns:
            ---------
            context: DataContext

        """

        # Check if the filename is valid for reading data
        if not self.file_handle:
            return None

        # Set the file reader at the first char.
        if self.file_handle.closed:
            self.file_handle = file(self.filename, 'rb')

        # Setup a progress dialog
        progress = ProgressDialog(title='Reading Segy Files',
                                  message='Reading Segy Files',
                                  max=100, show_time=True, can_cancel=True)
        progress.open()

        # Skip the card_image_header and binary header
        self.file_handle.seek(Segy.CARD_IMAGE_HEADER_LEN +
                              Segy.BINARY_HEADER_LEN)
        progress.update(1)

        # Check if data lengths are correct.
        x_data_len = struct.calcsize(self.x_format)
        y_data_len = struct.calcsize(self.y_format)
        inline_data_len = struct.calcsize(self.inline_format)
        crossline_data_len = struct.calcsize(self.crossline_format)

        if not (x_data_len == y_data_len and
                y_data_len == inline_data_len and
                inline_data_len == crossline_data_len):
            logger.error('SegyReader: Mismatch in format lengths')
            return None

        if self.scale_format != '':
            scale_data_len = struct.calcsize(self.scale_format)
            if scale_data_len != x_data_len:
                logger.error('SegyReader: Mismatch in format lengths')
                return None

        # Get trace header data of 240 bytes.
        header_data = self.file_handle.read(Segy.TRACE_HEADER_LEN)
        traces, read_error = [], False
        previous_update = 1
        while header_data != '' and not read_error:
            trace = self._read_trace(header_data, x_data_len)
            if trace is None:
                logger.error('SegyReader: Error in reading a trace')
                read_error = True
            else:
                traces.append(trace)
                header_data = self.file_handle.read(Segy.TRACE_HEADER_LEN)

            progress_pc = 1 + int(98.0*float(len(traces))/
                                  float(self.trace_count))
            if progress_pc - previous_update > 1:
                cont_val, skip_val = progress.update(progress_pc)
                previous_update = progress_pc

                # If the user has cancelled the action then stop the import
                # immediately
                if skip_val or not cont_val:
                    del traces
                    self.file_handle.close()
                    return None

        self.file_handle.close()
        progress.update(100)

        if read_error:
            del traces
            return None
        else:
            arr_descriptor = {'names': ('x','y','inline','crossline',
                                        'scale_factor', 'trace'),
                              'formats': ('f4', 'f4', 'f4', 'f4', 'f4',
                                          str(self.samples_per_trace)+'f4')
                              }
            traces = array(traces, dtype=arr_descriptor)
            filesplit = os.path.split(self.filename)
            name = str(os.path.splitext(filesplit[1])[0]).translate(trans_table)
            return DataContext(
                name=name,
                _bindings={'traces':traces['trace'],
                           'x_locations':traces['x'],
                           'y_locations':traces['y'],
                           'inline_values':traces['inline'],
                           'crossline_values':traces['crossline'],
                           'scale_factors':traces['scale_factor']})
        return
Beispiel #20
0
    def __getitem__(self, factor_slices):
        '''
        Access to the output_array using factor level indices.

        This method enables access to the values using the syntax

        output_sub_array = pstudy[ f1_level_idx, f2_level_idx, ... ]

        Here the standard numpy indices including slice and elipses can be used. 
        '''

        # map the slices within the levels2run array
        # to the indices of the expanded input_table
        #
        n_sims = self._get_slice_n_sims(factor_slices)
        progress = ProgressDialog(title='simulation progress',
                                  message='running %d simulations' % n_sims,
                                  max=n_sims,
                                  show_time=True,
                                  can_cancel=True)
        progress.open()

        run_idx_list = self.levels2run[factor_slices].flatten()
        runs_levels = self.input_table[run_idx_list]
        runs_levels_idx = self.run2levels[run_idx_list]

        # start the computation for each of the runs
        #
        sim_idx = 0
        for run_levels, run_levels_idx in zip(runs_levels, runs_levels_idx):

            # check to see if this run is already in the cache
            #
            outputs = self._output_cache.get(tuple(run_levels), None)
            if outputs == None:

                print 'new simulation', sim_idx

                # Set the factor values of the run in
                # the simulation model
                #
                for factor_name, factor, level in zip(self.factor_names,
                                                      self.factor_list,
                                                      run_levels):
                    level = factor.get_level_value(level)
                    setattr(self.sim_model, factor_name, level)

                # Perform the simulation
                #
                outputs = self.sim_model.peval()

                self.output_array[tuple(run_levels_idx)] = outputs
                self._output_cache[tuple(run_levels)] = outputs

            else:
                print 'cached simulation', sim_idx

            # let the progress bar interfere
            #
            (cont, skip) = progress.update(sim_idx)
            if not cont or skip:
                break
            sim_idx += 1

        progress.update(n_sims)
        return self.output_array[factor_slices]
Beispiel #21
0
 def characterize(self, target_locations=None, 
                  target_neighborhood=20, 
                  medfilt_radius=5):
     #print "Main thread?" 
     #print Application.instance().is_main_thread()
     # disable the UI while we're running
     self._toggle_UI(False)
     #print 
     try:
         # wipe out old results
         self.chest.removeNode('/cell_peaks')        
     except:
         # any errors will be because the table doesn't exist. That's OK.
         pass        
     # locate peaks on the average image to use as target locations.
     #   also determines the number of peaks, which in turn determines
     #   the table columns.
     target_locations = pc.two_dim_findpeaks(self._get_average_image(),
                                             )[:,:2]
     self.numpeaks = int(target_locations.shape[0])
     # generate a list of column names
     names = [('x%i, y%i, dx%i, dy%i, h%i, o%i, e%i, sx%i, sy%i' % ((x,)*9)).split(', ') 
              for x in xrange(self.numpeaks)]
     # flatten that from a list of lists to a simple list
     names = [item for sublist in names for item in sublist]
     # make tuples of each column name and 'f8' for the data type
     dtypes = zip(names, ['f8', ] * self.numpeaks*9)
     # prepend the filename and index columns
     dtypes = [('filename', '|S250'), ('file_idx', 'i4'), ('omit', 'bool')] + dtypes
     # create an empty recarray with our data type
     desc = np.recarray((0,), dtype=dtypes)
     # create the table using the empty description recarray
     self.chest.createTable(self.chest.root,
                            'cell_peaks', description=desc)        
     # for each file in the cell_data group, run analysis.
     nodes = self.chest.listNodes('/cells')
     node_names = [node.name for node in nodes]
     progress = ProgressDialog(title="Peak characterization progress", 
                               message="Characterizing peaks on %d images"%(len(node_names)-2),
                               max=len(node_names)-1, show_time=True, can_cancel=False)
     progress.open()
     file_progress=0
     for node in node_names:
         # exclude some nodes
         if node == 'template':
             continue
         cell_data = self.get_cell_set(node)
         data = np.zeros((cell_data.shape[0]),dtype=dtypes)
         data['filename'] = node
         data['file_idx'] = np.arange(cell_data.shape[0])
         # might want to tweak this loop or cythonize for speed...
         attribs = self._peak_attribs_stack(cell_data,
                         peak_width=self.peak_width, 
                         target_locations=target_locations,
                         target_neighborhood=target_neighborhood,
                         medfilt_radius=medfilt_radius)
         attribs = attribs.T
         # for each column name, copy in the data
         for name_idx in xrange(len(names)):
             data[names[name_idx]] = attribs[:, name_idx]
         # add the data to the table
         self.chest.root.cell_peaks.append(data)
         self.chest.root.cell_peaks.flush()
         file_progress+=1
         progress.update(file_progress)            
     # add an attribute for the total number of peaks recorded
     self.chest.setNodeAttr('/cell_peaks','number_of_peaks', self.numpeaks)
     self.chest.root.cell_peaks.flush()
     self.chest.flush()
     self._can_show_peak_ids = True
     self.parent.image_controller.update_peak_map_choices()
     self._progress_value = 0
     self.log_action(action="Characterize peaks", 
                     target_locations=target_locations, 
                     target_neighborhood=target_neighborhood, 
                     medfilt_radius=medfilt_radius)
     self._toggle_UI(True)
Beispiel #22
0
	def do_export_movie(self, info):
		context = info.ui.context['object'].context
		
		moviedialog = context.app.export_movie_dialog
		try:
			if not moviedialog.run().result:
				return
		except RuntimeError as e:
			support.Message.show(parent=context.uiparent, message='Nothing to animate', desc=str(e))
			return

		dlg = wx.FileDialog(
			info.ui.control,
			"Save movie",
			context.prefs.get_path('export_movie'),
			"movie." + moviedialog.format,
			"*.{0}|*.{0}|All files (*.*)|*.*".format(moviedialog.format),
			wx.SAVE|wx.OVERWRITE_PROMPT
		)

		if dlg.ShowModal() != wx.ID_OK:
			return
		context.prefs.set_path('export_movie', dlg.GetDirectory())

		# preparations, no harm is done if something goes wrong here
		movie = stdout_cb = stdout = None
		oldfig = context.plot.figure
		progress = ProgressDialog(title="Movie", message="Building movie", max=moviedialog.get_framecount()+2, can_cancel=True, show_time=True, parent=context.uiparent)
		newfig = matplotlib.figure.Figure((moviedialog.frame_width / moviedialog.dpi, moviedialog.frame_height / moviedialog.dpi), moviedialog.dpi)
		canvas = FigureCanvasAgg(newfig)
		drawmgr = context.canvas.relocate(redraw=newfig.canvas.draw)

		finalpath = dlg.GetPath()
		if '%' in finalpath: # to support stuff like -f image2 -c:v png file_%02.png
			temppath = False
		else:
			temppath = finalpath + '.temp'

		class UserCanceled(Exception): pass
		class FFmpegFailed(Exception): pass

		# now the real thing starts. we have to clean up properly
		try:
			progress.open()
			context.plot.relocate(newfig)
			movie = util.FFmpegEncode(
				temppath or finalpath,
				moviedialog.format,
				moviedialog.codec,
				moviedialog.frame_rate,
				(moviedialog.frame_width, moviedialog.frame_height),
				moviedialog.ffmpeg_options.split(),
			)
			stdout_cb = movie.spawnstdoutthread()
			drawmgr.rebuild()
			progress.update(1)

			iters = tuple(i() for i in moviedialog.get_animate_functions())
			frameiter = enumerate(itertools.izip_longest(*iters))
			while True:
				with drawmgr.hold():
					try:
						frameno, void = frameiter.next()
					except StopIteration:
						progress.update(progress.max-1)
						break
				movie.writeframe(newfig.canvas.tostring_rgb())
				(cont, skip) = progress.update(frameno+2)
				if not cont or skip:
					raise UserCanceled()
			
			ret = movie.close()	
			if ret != 0:
				raise FFmpegFailed('ffmpeg returned {0}'.format(ret))
			stdout = stdout_cb()
			stdout_cb = None
			if temppath:
				shutil.move(temppath, finalpath)
			progress.update(progress.max)

		except UserCanceled:
			movie.abort()
			return
		except:
			if movie:
				movie.close()
			if stdout_cb:
				try:
					stdout = stdout_cb()
				except:
					pass
			stdout_cb = None

			support.Message.show(
				parent=context.uiparent, 
				message='Movie export failed', title='Exception occured',
				desc='Something went wrong while exporting the movie. Detailed debugging information can be found below.',
				bt="FFMPEG output:\n{0}\n\nBACKTRACE:\n{1}".format(stdout, traceback.format_exc())
			)
			return
		finally:
			if stdout_cb:
				stdout = stdout_cb()
			progress.close()
			try:
				os.remove(temppath)
			except:
				pass
			context.plot.relocate(oldfig)
			context.canvas.rebuild()

		support.Message.show(
			parent=context.uiparent,
			title='Movie complete', message='Movie complete',
			desc='The movie successfully been generated.\nFor debugging purposes, the full FFmpeg output can be found below.',
			bt=stdout
		)
Beispiel #23
0
    def read_data(self):
        """ Obtain x_locations, y_locations, data_locations, traces in a context

            Returns:
            ---------
            context: DataContext

        """

        # Check if the filename is valid for reading data
        if not self.file_handle:
            return None

        # Set the file reader at the first char.
        if self.file_handle.closed:
            self.file_handle = file(self.filename, 'rb')

        # Setup a progress dialog
        progress = ProgressDialog(title='Reading Segy Files',
                                  message='Reading Segy Files',
                                  max=100,
                                  show_time=True,
                                  can_cancel=True)
        progress.open()

        # Skip the card_image_header and binary header
        self.file_handle.seek(Segy.CARD_IMAGE_HEADER_LEN +
                              Segy.BINARY_HEADER_LEN)
        progress.update(1)

        # Check if data lengths are correct.
        x_data_len = struct.calcsize(self.x_format)
        y_data_len = struct.calcsize(self.y_format)
        inline_data_len = struct.calcsize(self.inline_format)
        crossline_data_len = struct.calcsize(self.crossline_format)

        if not (x_data_len == y_data_len and y_data_len == inline_data_len
                and inline_data_len == crossline_data_len):
            logger.error('SegyReader: Mismatch in format lengths')
            return None

        if self.scale_format != '':
            scale_data_len = struct.calcsize(self.scale_format)
            if scale_data_len != x_data_len:
                logger.error('SegyReader: Mismatch in format lengths')
                return None

        # Get trace header data of 240 bytes.
        header_data = self.file_handle.read(Segy.TRACE_HEADER_LEN)
        traces, read_error = [], False
        previous_update = 1
        while header_data != '' and not read_error:
            trace = self._read_trace(header_data, x_data_len)
            if trace is None:
                logger.error('SegyReader: Error in reading a trace')
                read_error = True
            else:
                traces.append(trace)
                header_data = self.file_handle.read(Segy.TRACE_HEADER_LEN)

            progress_pc = 1 + int(
                98.0 * float(len(traces)) / float(self.trace_count))
            if progress_pc - previous_update > 1:
                cont_val, skip_val = progress.update(progress_pc)
                previous_update = progress_pc

                # If the user has cancelled the action then stop the import
                # immediately
                if skip_val or not cont_val:
                    del traces
                    self.file_handle.close()
                    return None

        self.file_handle.close()
        progress.update(100)

        if read_error:
            del traces
            return None
        else:
            arr_descriptor = {
                'names':
                ('x', 'y', 'inline', 'crossline', 'scale_factor', 'trace'),
                'formats': ('f4', 'f4', 'f4', 'f4', 'f4',
                            str(self.samples_per_trace) + 'f4')
            }
            traces = array(traces, dtype=arr_descriptor)
            filesplit = os.path.split(self.filename)
            name = str(os.path.splitext(
                filesplit[1])[0]).translate(trans_table)
            return DataContext(name=name,
                               _bindings={
                                   'traces': traces['trace'],
                                   'x_locations': traces['x'],
                                   'y_locations': traces['y'],
                                   'inline_values': traces['inline'],
                                   'crossline_values': traces['crossline'],
                                   'scale_factors': traces['scale_factor']
                               })
        return
Beispiel #24
0
 def initialize(self, title, max_index):
     self.progress = ProgressDialog(title="Characterizing %d peaks on current image"%max_index, 
                       max=int(max_index), show_time=True, can_cancel=False)
     self.progress.open()
Beispiel #25
0
 def _progress_dialog_default(self):
     return ProgressDialog(
         min=0,
         max=100,
     )
Beispiel #26
0
 def _peak_attribs_stack(self, stack, peak_width, target_locations=None,
                        target_neighborhood=20, medfilt_radius=5,
                        mask = True):
     """
     Characterizes the peaks in a stack of images.
 
         Parameters:
         ----------
 
         peak_width : int (required)
                 expected peak width.  Too big, and you'll include other peaks
                 in measurements.
 
         target_locations : numpy array (n x 2)
                 array of n target locations.  If left as None, will create 
                 target locations by locating peaks on the average image of the stack.
                 default is None (peaks detected from average image)
 
         img_size : tuple, 2 elements
                 (width, height) of images in image stack.
 
         target_neighborhood : int
                 pixel neighborhood to limit peak search to.  Peaks outside the
                 square defined by 2x this value around the peak will be excluded
                 from any fitting.  
 
         medfilt_radius : int (optional)
                 median filter window to apply to smooth the data
                 (see scipy.signal.medfilt)
                 if 0, no filter will be applied.
                 default is set to 5
 
        Returns:
        -------
        2D  numpy array:
         - One column per image
         - 9 rows per peak located
             0,1 - location
             2,3 - difference between location and target location
             4 - height
             5 - orientation
             6 - eccentricity
             7,8 - skew X, Y, respectively
 
     """
     avgImage=np.average(stack,axis=0)
     if target_locations is None:
         # get peak locations from the average image
         # an initial value for the peak width of 11 pixels works
         #   OK to find initial peaks.  We determine a proper value
         #   soon.
         target_locations=pc.two_dim_findpeaks(avgImage, 10)
     
     peak_width = 0.75*pc.min_peak_distance(target_locations)
     if peak_width < 10:
         peak_width = 10        
 
     if mask:
         mask = pc.draw_mask(avgImage.shape,
                             peak_width/2.0,
                             target_locations)            
         stack *= mask
     # get all peaks on all images
     peaks=pc.stack_coords(stack, peak_width=peak_width)
     # two loops here - outer loop loops over images (i index)
     # inner loop loops over target peak locations (j index)
     peak_locations=np.array([[pc.best_match(peaks[:,:,i], 
                                          target_locations[j,:2], 
                                          target_neighborhood) \
                               for i in xrange(peaks.shape[2])] \
                               for j in xrange(target_locations.shape[0])])
 
     # pre-allocate result array.  7 rows for each peak, 1 column for each image
     rlt = np.zeros((9*peak_locations.shape[0],stack.shape[0]))
     rlt_tmp = np.zeros((peak_locations.shape[0],7))
     
     progress = ProgressDialog(title="Peak characterization progress", 
                               message="Characterizing peaks on %d cells"%stack.shape[0], 
                               max=int(stack.shape[0]), show_time=True, can_cancel=False)
     progress.open()
     
     for i in xrange(stack.shape[0]):
         progress.update(int(i+1))
         rlt_tmp=pc.peak_attribs_image(stack[i,:,:], 
                                    target_locations=peak_locations[:,i,:], 
                                    peak_width=peak_width, 
                                    medfilt_radius=medfilt_radius, 
                                    )
         diff_coords=target_locations[:,:2]-rlt_tmp[:,:2]
         for j in xrange(target_locations.shape[0]):
             # peak position
             rlt[ j*9   : j*9+2 ,i] = rlt_tmp[j,:2]
             # difference in peak position relative to average
             rlt[ j*9+2 : j*9+4 ,i] = diff_coords[j]
             # height
             rlt[ j*9+4         ,i]=rlt_tmp[j,2]
             # orientation
             rlt[ j*9+5         ,i]=rlt_tmp[j,3]
             # eccentricity
             rlt[ j*9+6         ,i]=rlt_tmp[j,4]
             # skew (x and y)
             rlt[ j*9+7 : j*9+9 ,i]=rlt_tmp[j,5:]
     return rlt
Beispiel #27
0
def peak_attribs_image(image, peak_width=None, target_locations=None, medfilt_radius=5):
    """
    Characterizes the peaks in an image.

        Parameters:
        ----------

        peak_width : int (optional)
                expected peak width.  Affects characteristic fitting window.
                Too big, and you'll include other peaks in the measurement.  
                Too small, and you'll get spurious peaks around your peaks.
                Default is None (attempts to auto-detect)

        target_locations : numpy array (n x 2)
                array of n target locations.  If left as None, will create 
                target locations by locating peaks on the average image of the stack.
                default is None (peaks detected from average image)

        medfilt_radius : int (optional)
                median filter window to apply to smooth the data
                (see scipy.signal.medfilt)
                if 0, no filter will be applied.
                default is set to 5

        Returns:
        -------

        2D numpy array:
        - One row per peak
        - 7 columns:
          0,1 - location
          2 - height
          3,4 - long and short axis length
          5 - orientation
          6 - eccentricity
          7,8 - skew

    """
    try:
        import cv
    except:
        try:
            import cv2.cv as cv
        except:
            print 'Module %s:' % sys.modules[__name__]
            print 'OpenCV is not available, the peak characterization functions will not work.'
            return None
    if medfilt_radius:
        image=medfilt(image,medfilt_radius)
    if target_locations is None:
        # target locations should be a list of arrays.  Each element in the 
        #    list corresponds to a recursion level in peak finding.  The peak 
        #    width can change with each recursion.
        target_locations=two_dim_findpeaks(image, peak_width=peak_width, medfilt_radius=5,coords_list=[])
    imsize=image.shape[0]
    
    total_peaks = np.array([arr.shape[0] for arr in target_locations]).sum()
    rlt=np.zeros((total_peaks,9))
    
    # TODO: this should be abstracted to use whatever graphical 
    #       or command-line environment we're in.
    progress = ProgressDialog(title="Peak characterization progress", 
                              message="Characterizing %d peaks on current image"%total_peaks, 
                              max=int(total_peaks), show_time=True, can_cancel=False)
    progress.open()
    
    rlt_offset=0
    progress_ctr=0

    for recursed_peak_array in target_locations:
        peak_width = recursed_peak_array[0][3]
        r=np.ceil(peak_width/2)
        roi=np.zeros((r*2,r*2))
        mask = draw_mask((r*2,r*2), r, [(r,r)])
        for loc in xrange(recursed_peak_array.shape[0]):
            c=recursed_peak_array[loc]
            peak_left=c[0]-r
            peak_top=c[1]-r
            peak_right=c[0]+r
            peak_bottom=c[1]+r
            #if bxmin<0: bxmin=0; bxmax=peak_width
            #if bymin<0: bymin=0; bymax=peak_width
            #if bxmax>imsize: bxmax=imsize; bxmin=imsize-peak_width
            #if bymax>imsize: bymax=imsize; bymin=imsize-peak_width            
            # skip peaks that are too close to edges.
            if (peak_right)>image.shape[1]+r/4 or (peak_bottom)>image.shape[0]+r/4:
                progress_ctr+=1
                continue
            # set the neighborhood of the peak to zero so we go look elsewhere
                        #  for other peaks            
            x = np.array(np.arange(peak_left, peak_right),dtype=np.integer)
            y = np.array(np.arange(peak_top, peak_bottom),dtype=np.integer)
            xv,yv = np.meshgrid(x,y)
            roi[:,:] = image[yv.clip(0,image.shape[0]-1),
                             xv.clip(0,image.shape[1]-1)] * mask
            #roi[0:,:]=image[bymin:bymax,bxmin:bxmax]
            # skip frames with significant dead pixels (corners of
            #    rotated images, perhaps
            #if np.average(roi)< 0.5*np.average(image):
            #rlt[loc,:2] = (c[1], c[0])
            #continue
            ms=cv.Moments(cv.fromarray(roi))
            # output from get_characteristics is:
            # x, y, height, long_axis, short_axis, orientation, eccentricity, skew_x, skew_y
            rlt[rlt_offset] = get_characteristics(ms)
        
            # order for these is:
            # amp, xShift, yShift, xWidth, height, yWidth, Rotation
            #  WTF???  Why is this different from return order!?
            # I'm a control freak...
            limit_min = [True, True, True, True, True, True, True]
            limit_max = [True, True, True, True, True, True, True]
            
            # 30 pixels seems like a hell of a lot for a peak...
            max_width=30
            max_height = 1.2*np.max(roi)
            ctr = np.array(roi.shape)/2
            min_height = np.mean(image)/1.5
            
            x = rlt[rlt_offset][0]
            y = rlt[rlt_offset][1]
            amp = image[peak_top+y,peak_left+x]
            long_axis = rlt[rlt_offset][3]
            short_axis = rlt[rlt_offset][4] 
            orientation = rlt[rlt_offset][5]
            height = 0
            params = [amp, x, y, long_axis, height, short_axis, orientation]
            
            minpars = [min_height, x-2, y-2, 0, 0, 0, 0]
            maxpars = [max_height, x+2, y+2, max_width, 0, max_width, 360]
            
            # TODO: could use existing moments or parameters to speed up...
            
            amp, fit_x, fit_y, width_x, height, width_y, rot = gaussfit(roi,
                                        limitedmin=limit_min, 
                                        limitedmax=limit_max,
                                        maxpars=maxpars,
                                        minpars=minpars,
                                        params=params
                                        )
            # x and y are the locations within the ROI.  Add the coordinates of 
            #    the top-left corner of our ROI on the global image.
            rlt[rlt_offset,:2] = (np.array([peak_top,peak_left]) + np.array([fit_y,fit_x]))
            #rlt[loc,:2] = np.array([y,x])
            # insert the height
            rlt[rlt_offset,2]=amp+height
            # TODO: compare this with OpenCV moment calculation above:
            #  (this is using the gaussfitter value)
            rlt[rlt_offset,5]=rot
            rlt_offset+=1
            progress_ctr+=1
            progress.update(int(progress_ctr))
    # chuck outliers based on median of height
    d = np.abs(rlt[:,2] - np.median(rlt[:,2]))
    mdev = np.median(d)
    s = d/mdev if mdev else 0
    # kill outliers based on height
    rlt=rlt[np.logical_and(s<10, rlt[:,2]<image.max()*1.3)]
    # kill peaks that are too close to other peaks
    # the minimum width is 1.5 times the smallest peak width.
    min_width = 1.5*target_locations[-1][0][3]
    #rlt = kill_duplicates(rlt,min_width)
    return rlt