コード例 #1
0
    def analyze(self):
        if self.p1.image.ndim == 2:
            self.viewstack()
        self.p1.play(0)
        df = pd.DataFrame()
        peakCount = []
        
        roiSPosx, roiSPosy = self.p1.roi.pos()
        rows = 22
        columns = 8
        with pg.ProgressDialog("Scanning...", 0, rows*columns) as dlg:        
            for i in range(columns):
                for j in range(rows):
                    time, data = self.p1.roiCurve1.getData()
                    df[str((i,j))] = pd.Series(data)                
                    roiPosx, roiPosy = self.p1.roi.pos()
                    roiPosx += 7.5
                    self.p1.roi.setPos([roiPosx,roiPosy])
                    dlg += 1
                roiPosx = roiSPosx
                roiPosy += 20
                self.p1.roi.setPos([roiPosx,roiPosy])
    
            self.win = pg.GraphicsWindow(title="pyqtgraph example: Linked Views")
            self.win.resize(800,800)
            self.win.scene().sigMouseClicked.connect(self.plotzoom)
            
            plotlist = [[]]
            count = 0

        with pg.ProgressDialog("Analyzing...", 0, 20*8) as dlg:               
            for i,x in enumerate(df.keys()):
                if i%10 == 0:
                    self.win.nextRow()
                plotTemp = self.win.addPlot()
                plotTemp.plot(df[x],pen = pg.intColor(i, 100))
                plotTemp.hideAxis('bottom')
                plotTemp.hideAxis('left')
                plotlist.append(plotTemp)
                
                try:
                    datastart, dataend, datalimit = pfz.findpeaks(df[x],12)
                    count += len(datastart)
                    peakCount.append(len(datastart))
                    point = pg.PlotDataItem(x = datastart, y = df[x][datastart], 
                                            pen = None, symbol = 'o', 
                                            symbolBrush = pg.intColor(i, 100),
                                            symbolSize=6)
                    plotTemp.addItem(point)
                except IndexError:
                    pass
                except ValueError:
                    pass
                dlg += 1                    
                
            
        self.p1.roi.setPos([roiSPosx,roiSPosy])
        print('Events per file per ZMW = ' + str(count/(len(df.keys()))))
        filebase = str(os.path.splitext(self.datafilename)[0])
        np.savetxt(filebase + 'peakCount.txt', peakCount, delimiter = ',')
コード例 #2
0
ファイル: ui.py プロジェクト: afcarl/ccfviewer
def download(url, dest, chunksize=1000000):
    """Download a file from *url* and save it to *dest*, while displaying a
    progress bar.
    """
    req = urlopen(url)
    size = req.info().get('content-length')
    size = 0 if size is None else int(size)
    tmpdst = dest + '.partial'
    fh = open(tmpdst, 'wb')
    with pg.ProgressDialog("Downloading\n%s" % url, maximum=size,
                           nested=True) as dlg:
        try:
            tot = 0
            while True:
                chunk = req.read(chunksize)
                if chunk == '':
                    break
                fh.write(chunk)
                tot += len(chunk)
                dlg.setValue(tot)
                if dlg.wasCanceled():
                    raise Exception("User cancelled download.")
            os.rename(tmpdst, dest)
        finally:
            if os.path.isfile(tmpdst):
                os.remove(tmpdst)
コード例 #3
0
ファイル: progressbar.py プロジェクト: shixnya/aisynphys
    def __init__(self, message, maximum, update_interval=0.1, mode=None):
        self.message = message
        self.maximum = maximum
        self.value = 0

        if mode is None:
            mode = interactive_mode()

        self.mode = mode
        self.update_dt = update_interval
        self._last_update = 0
        self._need_update = True

        if self.mode == 'qt':
            import pyqtgraph as pg
            self.dlg = pg.ProgressDialog(self.message, maximum=1000)
        elif self.mode == 'file':
            print(message)
            sys.stdout.flush()
            # logging to file; don't need frequent updates.
            self.update_dt = 30.0
        else:
            print(message)
            sys.stdout.flush()
            self.last_line_len = 0
コード例 #4
0
    def fit_clicked(self):
        from synaptic_release import ReleaseModel

        self.fit_plot.clear()
        self.fit_plot.show()

        n_sets = len(self.event_sets)
        self.fit_plot.set_shape(n_sets, 1)
        l = self.fit_plot[0, 0].legend
        if l is not None:
            l.scene.removeItem(l)
            self.fit_plot[0, 0].legend = None
        self.fit_plot[0, 0].addLegend()

        for i in range(n_sets):
            self.fit_plot[i, 0].setXLink(self.fit_plot[0, 0])

        spike_sets = []
        for i, evset in enumerate(self.event_sets):
            evset = evset[2][:-1]  # select events, skip last row (average)
            x = np.array([ev['spike_time'] for ev in evset])
            y = np.array([ev['amp'] for ev in evset])
            x -= x[0]
            x *= 1000
            y /= y[0]
            spike_sets.append((x, y))

            self.fit_plot[i, 0].plot(x / 1000., y, pen=None, symbol='o')

        model = ReleaseModel()
        dynamics_types = ['Dep', 'Fac', 'UR', 'SMR', 'DSR']
        for k in dynamics_types:
            model.Dynamics[k] = 0

        fit_params = []
        with pg.ProgressDialog("Fitting release model..", 0,
                               len(dynamics_types)) as dlg:
            for k in dynamics_types:
                model.Dynamics[k] = 1
                fit_params.append(model.run_fit(spike_sets))
                dlg += 1
                if dlg.wasCanceled():
                    return

        max_color = len(fit_params) * 1.5

        for i, params in enumerate(fit_params):
            for j, spikes in enumerate(spike_sets):
                x, y = spikes
                t = np.linspace(0, x.max(), 1000)
                output = model.eval(x, params.values())
                y = output[:, 1]
                x = output[:, 0] / 1000.
                self.fit_plot[j, 0].plot(x,
                                         y,
                                         pen=(i, max_color),
                                         name=dynamics_types[i])
コード例 #5
0
def runManyStages(i):
    """Iterate over runStage() 3 times while incrementing a progress bar.
    """
    with pg.ProgressDialog("Running stage %s.." % i, maximum=3, nested=True, wait=0) as dlg:
        for j in range(1,4):
            runStage('%d.%d' % (i, j))
            dlg += 1
            if dlg.wasCanceled():
                print("Canceled stage %s" % i)
                break
コード例 #6
0
def runStage(i):
    """Waste time for 2 seconds while incrementing a progress bar.
    """
    with pg.ProgressDialog("Running stage %s.." % i, maximum=100, nested=True) as dlg:
        for j in range(100):
            time.sleep(0.02)
            dlg += 1
            if dlg.wasCanceled():
                print("Canceled stage %s" % i)
                break
コード例 #7
0
ファイル: pair_analysis.py プロジェクト: shixnya/aisynphys
    def fit_responses(self, latency=None):
        if latency is None:
            latency_window = [0.5e-3, 10e-3]
        else:
            latency_window = [latency - 100e-6, latency + 100e-6]

        with pg.ProgressDialog("curve fitting..",
                               maximum=len(modes) * len(holdings)) as dlg:
            self.last_fit = {}
            for mode in modes:
                for holding in holdings:
                    self.fit_pass = False
                    sign = self.signs[mode][holding].get(
                        self.ctrl_panel.user_params['Synapse call'],
                        self.signs[mode][holding].get(
                            self.ctrl_panel.user_params['Polysynaptic call'],
                            0))

                    # ofp, x_offset, best_fit = fit_avg_response(self.traces, mode, holding, latency, sign)
                    prs = self.sorted_responses[mode, holding]['qc_pass']
                    if len(prs) == 0:
                        dlg += 1
                        continue

                    fit, avg = fit_avg_pulse_response(
                        prs, latency_window=latency_window, sign=sign)
                    fit_ts = avg.copy(data=fit.best_fit)
                    self.last_fit[mode, holding] = fit

                    self.initial_fit_parameters[mode][str(
                        holding)]['xoffset'] = latency
                    self.output_fit_parameters[mode][str(
                        holding)]['nrmse'] = fit.nrmse()
                    self.output_fit_parameters[mode][str(holding)].update(
                        fit.best_values)
                    self.fit_pass = fit.nrmse() < self.nrmse_thresh
                    self.ctrl_panel.output_params.child(
                        'Fit parameters',
                        str(holding) + ' ' + mode.upper(),
                        'Fit Pass').setValue(self.fit_pass)
                    if mode == 'vc':
                        self.vc_plot.plot_fit(fit_ts, holding, self.fit_pass)
                    elif mode == 'ic':
                        self.ic_plot.plot_fit(fit_ts, holding, self.fit_pass)
                    dlg += 1
                    if dlg.wasCanceled():
                        raise Exception("User canceled fit")
        self.fit_params['initial'] = self.initial_fit_parameters
        self.fit_params['fit'] = self.output_fit_parameters

        self.ctrl_panel.update_fit_params(self.fit_params['fit'])
        self.generate_warnings()
コード例 #8
0
    def scrapePeaks(self):
        """Some peak-finding function with output filtering based on SNR"""

        self.prepGuiParameters()
        self.pk_extracted_by_condi = {}

        for _condi in self.tracedata.keys():
            maxVal = len(self.tPeaks)

            ROI_df = self.tracedata[_condi]
            #print (ROI_df)

            peaksList = []
            progMsg = "Get {0} peaks, {1} set..".format(maxVal, _condi)
            with pg.ProgressDialog(progMsg, 0, maxVal) as dlg:
                dlg.setMinimumWidth(300)
                for t in self.tPeaks:
                    dlg += 1
                    idx = np.searchsorted(ROI_df.index, t)
                    # avoid falling off start or end of columns
                    e = max(idx - self.psr, 0)
                    # zero biased so add one.
                    l = min(idx + self.psr + 1, len(ROI_df.index))
                    print(t, e, idx, l, ROI_df.iloc[e:l])

                    p = ROI_df.iloc[e:l].max().to_frame().transpose()
                    peaksList.append(p)

                #stick rows together (all have same zero index...)
                peaksdf = pd.concat(peaksList)

                # Overwrite index with the original peak positions
                # (somewhat inexact because of the 'range')
                peaksdf.index = self.tPeaks
                self.pk_extracted_by_condi[_condi] = peaksdf

        # yes, output may be modified below
        self.peaksScraped = True
        self.acceptBtn.setEnabled(True)
        self.getRundownBtn.setEnabled(True)
        self.noiseRB.setEnabled(True)
        self.noiseSB.setEnabled(True)
        self.excludedListedByCondi = {}

        if self.ignore:

            # freshly excludedList peaks from traces with low SNR
            self.maskLowSNR()
            self.splitAllowedExcluded()
コード例 #9
0
    def download_and_cache(self, resolution=None):
        """Download atlas data, convert to intermediate format, and store in cache
        folder.
        """
        if resolution is None:
            dlg = AtlasResolutionDialog(self.available_resolutions,
                                        self.cached_resolutions.keys())
            dlg.exec_()
            resolution = dlg.selected_resolution()
            if resolution is None:
                raise Exception("No atlas resolution selected.")

        cache_path = self.cache_path(resolution)
        if not os.path.exists(cache_path):
            os.makedirs(cache_path)

        with pg.ProgressDialog("Preparing %dum CCF data" % resolution,
                               maximum=6,
                               nested=True) as dlg:
            image_url = self.image_url.format(resolution=resolution)
            image_file = os.path.join(cache_path, image_url.split('/')[-1])
            image_cache = os.path.join(cache_path, "image.ma")
            if not os.path.exists(image_file):
                download(image_url, image_file)
            dlg += 1

            label_url = self.label_url.format(resolution=resolution)
            label_file = os.path.join(cache_path, label_url.split('/')[-1])
            label_cache = os.path.join(cache_path, "label.ma")
            if not os.path.exists(label_file):
                download(label_url, label_file)
            dlg += 1

            onto_file = os.path.join(cache_path, 'ontology.json')
            if not os.path.exists(onto_file):
                download(self.ontology_url, onto_file)

            self.load_image_data(image_file)
            dlg += 1
            write_file(self.image, image_cache)
            dlg += 1

            self.load_label_data(label_file, onto_file)
            dlg += 1
            write_file(self.label, label_cache)
            dlg += 1

        self.cached_resolutions[resolution] = (image_cache, label_cache)
        return resolution
コード例 #10
0
ファイル: parallelizer.py プロジェクト: qjones81/pyqtgraph
    def __init__(self,
                 tasks=None,
                 workers=None,
                 block=True,
                 progressDialog=None,
                 randomReseed=True,
                 **kwds):
        """
        ===============  ===================================================================
        Arguments:
        tasks            list of objects to be processed (Parallelize will determine how to 
                         distribute the tasks). If unspecified, then each worker will receive
                         a single task with a unique id number.
        workers          number of worker processes or None to use number of CPUs in the 
                         system
        progressDialog   optional dict of arguments for ProgressDialog
                         to update while tasks are processed
        randomReseed     If True, each forked process will reseed its random number generator
                         to ensure independent results. Works with the built-in random
                         and numpy.random.
        kwds             objects to be shared by proxy with child processes (they will 
                         appear as attributes of the tasker)
        ===============  ===================================================================
        """

        ## Generate progress dialog.
        ## Note that we want to avoid letting forked child processes play with progress dialogs..
        self.showProgress = False
        if progressDialog is not None:
            self.showProgress = True
            if isinstance(progressDialog, basestring):
                progressDialog = {'labelText': progressDialog}
            import pyqtgraph as pg
            self.progressDlg = pg.ProgressDialog(**progressDialog)

        if workers is None:
            workers = self.suggestedWorkerCount()
        if not hasattr(os, 'fork'):
            workers = 1
        self.workers = workers
        if tasks is None:
            tasks = range(workers)
        self.tasks = list(tasks)
        self.reseed = randomReseed
        self.kwds = kwds.copy()
        self.kwds['_taskStarted'] = self._taskStarted
コード例 #11
0
ファイル: baselines.py プロジェクト: AGPlested/SAFT
def baselineIterator(data, lam, p, niter=20):
    """ iterate baseline subtraction over dictionary of dataframes of ROI traces """

    #is there a problem with running it twice? No, the problem was peaks were being chucked out

    bdata = {}
    for _set, df in data.items():
        print(
            "Auto baseline for {0} set. lambda: {1:.3f} and p: {2:.3f}".format(
                _set, lam, p))

        maxVal = len(df.columns)
        progMsg = "Auto baseline for {0} traces".format(maxVal)
        with pg.ProgressDialog(progMsg, 0, maxVal) as dlg:
            dlg.setMinimumWidth(300)
            for col in df:
                dlg += 1
                y = np.asarray(df[col])
                # subtract appropriate baseline from each column of df
                df[col] -= baseline_als(y, lam, p, niter=20, quiet=True)
        bdata[_set] = df
    return bdata
コード例 #12
0
ファイル: hdf5.py プロジェクト: titusjan/pyqtgraph
def createFile(finalSize=2000000000):
    """Create a large HDF5 data file for testing.
    Data consists of 1M random samples tiled through the end of the array.
    """

    chunk = np.random.normal(size=1000000).astype(np.float32)

    f = h5py.File('test.hdf5', 'w')
    f.create_dataset('data', data=chunk, chunks=True, maxshape=(None, ))
    data = f['data']

    nChunks = finalSize // (chunk.size * chunk.itemsize)
    with pg.ProgressDialog("Generating test.hdf5...", 0, nChunks) as dlg:
        for i in range(nChunks):
            newshape = [data.shape[0] + chunk.shape[0]]
            data.resize(newshape)
            data[-chunk.shape[0]:] = chunk
            dlg += 1
            if dlg.wasCanceled():
                f.close()
                os.remove('test.hdf5')
                sys.exit()
        dlg += 1
    f.close()
コード例 #13
0
ファイル: ProgressDialog.py プロジェクト: kuldeepaman/tf-pose
    """Iterate over runStage() 3 times while incrementing a progress bar.
    """
    with pg.ProgressDialog("Running stage %s.." % i,
                           maximum=3,
                           nested=True,
                           wait=0) as dlg:
        for j in range(1, 4):
            runStage('%d.%d' % (i, j))
            dlg += 1
            if dlg.wasCanceled():
                print("Canceled stage %s" % i)
                break


with pg.ProgressDialog("Doing a multi-stage process..",
                       maximum=5,
                       nested=True,
                       wait=0) as dlg1:
    for i in range(1, 6):
        if i == 3:
            # this stage will have 3 nested progress bars
            runManyStages(i)
        else:
            # this stage will have 2 nested progress bars
            runStage(i)

        dlg1 += 1
        if dlg1.wasCanceled():
            print("Canceled process")
            break
コード例 #14
0
ファイル: node_hydrgrad.py プロジェクト: cdd1969/pygwa
    def on_calcAll_requested(self):
        '''
            Calculation of gradient for all timesteps has been requested
        '''
        try:
            info = self.ctrlWidget().selectedWellsInfo()
            if len(info.keys()) < 3:
                raise ValueError(
                    'Select at least 3 wells to calculate gradient')

            datetimeColName = self.ctrlWidget().comboBox_Datetime.currentText()
            if not datetimeColName:
                return

            # now generate long dataframe
            df = self.inputValues()[
                'data']  #pd.DataFrame in the input terminal `data`
            All_df = pd.DataFrame({
                datetimeColName:
                df[datetimeColName],
                'gradient':
                np.zeros(len(df.index)),
                'direction(degrees North)':
                np.zeros(len(df.index))
            })

            # then get the information
            dictForDf = {
                'well': [],
                'x': [],
                'y': []
            }  # dictionary that will be used for a convinient creation of the DataFrame that is requered for calculations
            for wellName, wellInfo in info.iteritems():
                dictForDf['well'].append(wellName)
                dictForDf['x'].append(wellInfo['x'])
                dictForDf['y'].append(wellInfo['y'])

            with pg.ProgressDialog(
                    "Calculating gradient for All timesteps {0}".format(
                        len(All_df.index)), 0, len(All_df.index)) as dlg:
                for row_i in df.index:
                    row = df.loc[row_i]
                    z = np.zeros(len(dictForDf['well']))
                    for i, well_n in enumerate(dictForDf['well']):
                        z[i] = float(row[info[well_n]['z']])
                    x = np.array(dictForDf['x'])
                    y = np.array(dictForDf['y'])
                    _, gradient, angle = devlin2003(np.matrix([x, y, z]).T)
                    All_df.loc[row_i, 'gradient'] = gradient
                    All_df.loc[row_i,
                               'direction(degrees North)'] = angle2bearing(
                                   angle, origin='N')[0]
                    dlg += 1
                    del z

                    if dlg.wasCanceled():
                        del All_df
                        All_df = None
                        break
                dlg += 1
            self.setOutput(All=All_df)
            self.clearException()
        except:
            self.setOutput(All=None)
            self.ctrlWidget().clear(clearTable=False)
            self.setException(sys.exc_info())

            self.sigOutputChanged.emit(
                self)  ## triggers flowchart to propagate new data
コード例 #15
0
ファイル: node_gradient.py プロジェクト: cdd1969/pygwa
    def process(self, coord, data):
        if data is not None:
            colname = [
                col for col in data.columns if isNumpyDatetime(data[col].dtype)
            ]
            self._ctrlWidget.param('Datetime').setLimits(colname)
            self.data = data
        else:
            self.data = None
            return dict(this=None, All=self.All_out)

        if coord is not None:
            colname = [
                col for col in coord.columns
                if isNumpyNumeric(coord[col].dtype)
            ]
            self._ctrlWidget.param('coords_grp', 'x').setLimits(colname)
            self._ctrlWidget.param('coords_grp', 'y').setLimits(colname)
            self.CW().disconnect_valueChanged2upd(self.CW().param(
                'coords_grp', 'x'))
            self.CW().disconnect_valueChanged2upd(self.CW().param(
                'coords_grp', 'y'))

            self.CW().param('coords_grp', 'x').setValue(colname[0])
            self.CW().param('coords_grp', 'y').setValue(colname[1])
            self.CW().connect_valueChanged2upd(self.CW().param(
                'coords_grp', 'x'))
            self.CW().connect_valueChanged2upd(self.CW().param(
                'coords_grp', 'y'))
        else:
            return dict(this=None, All=self.All_out)

        # now make sure All well specified in `coord` dataframe are found in `data`
        well_names = coord.index.values
        for well_n in well_names:
            if well_n not in data.columns:
                raise ValueError(
                    'Well named `{0}` not found in `data` but is declared in `coords`'
                    .format(well_n))

        kwargs = self.ctrlWidget().prepareInputArguments()

        # select row whith user-specified datetime `timestep`
        row = data.loc[data[kwargs['datetime']] == kwargs['t']]
        if row.empty:
            raise IndexError(
                'Selected timestep `{0}` not found in `data`s column {1}. Select correct one'
                .format(kwargs['t'], kwargs['datetime']))

        # now prepare dataframe for devlin calculations
        df = coord.copy()
        df['z'] = np.zeros(len(df.index))
        for well_n in well_names:
            df.loc[well_n, 'z'] = float(row[well_n])
        gradient, direction = devlin2003pandas(df, kwargs['x'], kwargs['y'],
                                               'z')

        self.CW().param('grad').setValue(gradient)
        self.CW().param('angle').setValue(direction)

        # here we will generate large dataset of all timesteps
        if self.CW().CALCULATE_ALL:
            # now generate long dataframe
            All = pd.DataFrame({
                kwargs['datetime']:
                data[kwargs['datetime']],
                'gradient':
                np.zeros(len(data.index)),
                'direction(degrees North)':
                np.zeros(len(data.index))
            })
            self.All_out = All  # pointer
            with pg.ProgressDialog(
                    "Calculating gradient for All timesteps {0}".format(
                        len(All.index)), 0, len(All.index)) as dlg:
                for row_i in data.index:
                    row = data.loc[row_i]
                    z = np.zeros(len(coord.index))
                    for i, well_n in enumerate(well_names):
                        z[i] = float(row[well_n])
                    x = coord[kwargs['x']].values
                    y = coord[kwargs['y']].values
                    _, gradient, angle = devlin2003(np.matrix([x, y, z]).T)
                    All.loc[row_i, 'gradient'] = gradient
                    All.loc[row_i, 'direction(degrees North)'] = angle2bearing(
                        angle, origin='N')[0]
                    dlg += 1
                    del z

                    if dlg.wasCanceled():
                        del All
                        self.All_out = None
                        break
                        #return dict(df=df, All=self.All_out)
                dlg += 1

        return dict(this=df, All=self.All_out)
コード例 #16
0
ファイル: visualSpice.py プロジェクト: playduck/visualSpice
    def run(self):
        progress = pg.ProgressDialog("simuliert...")
        evaluated = self.mainNodeScene.evaluateGraph()
        progress += 10

        fc = fl.Flowchart()
        # add nodes and build terminals
        for node in self.mainNodeScene.scene().nodes.values():
            fc.addNode(node.userData, node.userData.name())
            for socket in node.sockets:
                node.userData.addInput(socket + "/S/")
            for plug in node.plugs:
                node.userData.addOutput(plug + "/P/")
        progress += 10

        # build connections
        for connection in evaluated:
            fc.connectTerminals(
                connection[0][0].userData.terminals[connection[0][2] + "/P/"],
                connection[1][0].userData.terminals[connection[1][2] + "/S/"])
        progress += 10

        # # debug pg fc interface
        # dialog = QtWidgets.QDialog()
        # layout = QtWidgets.QVBoxLayout()
        # layout.addWidget(fc.widget())
        # dialog.setLayout(layout)
        # dialog.show()
        # dialog.exec_()

        # generate sim file
        shutil.copyfile(os.path.abspath(Config.template),
                        Config.TEMP_DIR + Config.template)
        # TODO Apply Settings

        # run fc
        try:
            fc.process()
            progress += 10
        except Exception as e:
            print("\n>>> Cannot process fc")
            print(e)
            print("\n")
        else:
            try:
                sim = Interface.Interface(Config.TEMP_DIR + Config.template)
                sim.runSim()
            except Exception as e:
                print("\n>>> Cannot execute sim")
                print(e)
                print("\n")
            else:
                progress += 10

                data = sim.readRaw()
                progress += 10

                for node in self.mainNodeScene.scene().nodes.values():
                    if isinstance(node.userData, NodeItem.PlotNode):
                        node.userData.setData(data)

                self.plotViewer.plt.vb.autoRange()
        finally:
            # remove node terminals, cannot use for loop, since terminals dict will be mutated in iterations
            for node in self.mainNodeScene.scene().nodes.values():
                while len(node.userData.terminals) > 1:
                    node.userData.removeTerminal(node.userData.terminals[next(
                        iter(node.userData.terminals))])

            # cleanup
            progress.setValue(100)
            fc.clear()
            fc.close()
            del fc
コード例 #17
0
ファイル: poisson_score.py プロジェクト: tropp/ephysanalysis
                if bestval is None or diff < bestval:
                    bestval = diff
                    best = x
                    bestn = (fp + fn) / 2.
        return best, bestn

    algorithms = [
        ('Poisson Score', PoissonScore.score),
        ('Poisson Score + Amp', PoissonAmpScore.score),
        #('Poisson Multi', PoissonRepeatScore.score),
        #('Poisson Multi + Amp', PoissonRepeatAmpScore.score),
    ]
    app = pg.mkQApp()

    win = pg.GraphicsWindow(border=0.3)
    with pg.ProgressDialog('processing..', maximum=len(tests)) as dlg:
        for i in range(len(tests)):
            first = (i == 0)
            last = (i == len(tests) - 1)

            if first:
                evLabel = win.addLabel('Event amplitude',
                                       angle=-90,
                                       rowspan=len(tests))
            evPlt = win.addPlot()

            plots = []
            for title, fn in algorithms:
                if first:
                    label = win.addLabel(title, angle=-90, rowspan=len(tests))
                plt = win.addPlot()
コード例 #18
0
                    'xoffset': (50, 0, 100),
                    'yoffset': 0,
                    'amp': (0, -50, 50),
                    'sigma': (5, 1, 20)
                })
plt.plot(x, fit.best_fit, pen='r')

# Instead, brute-force search for the best fit over multiple ranges for xoffset and amp:
amp = [{'amp': (-10, -50, 0)}, {'amp': (10, 0, 50)}]
xoffset = [{'xoffset': (x + 5, x, x + 10)} for x in range(0, 100, 10)]

# Total number of fit attempts is len(amp) * len(xoffset) = 20
search = SearchFit(model, [amp, xoffset],
                   params={
                       'sigma': (5, 1, 20),
                       'yoffset': 0
                   },
                   x=x,
                   data=y)

# Optionally, let the user know how the fit is progressing:
with pg.ProgressDialog("Fitting...", maximum=len(search)) as dlg:
    for result in search.iter_fit():
        print("Init params this iteration:", result['params'])
        dlg += 1
        if dlg.wasCanceled():
            raise Exception("User canceled fit")

plt.plot(x, search.best_result.best_fit, pen='g')
print("Best fit parameters:", search.best_result.best_values)
コード例 #19
0
ファイル: __init__.py プロジェクト: poirotdavid/OpenHandWrite
    def export(cls, file_path, project):
        """
        ReportExporter subclasses should not override this method.        

        export() is called by the MarkWrite application to create and
        save a report.

        Example usage:

            MyReportExporter.export(path_to_output_file, markwrite_app.project)

        :param file_path: Absolute file path to save the report to.
        :param project: The MarkWriteProject instance that will be used
                        for data and further calculations by the datarows
                        method.
        :return: None
        """
        cls.project = project
        try:

            import pyqtgraph
            with codecs.open(file_path, "w", "utf-8") as f:
                rp = cls.preamble()
                if len(rp) > 0:
                    # TODO: Should split into lines and prefix each line
                    # with a 'comment' character(s), like '#' is used in python
                    f.write(cls.preamble() + cls.nl)
                if len(cls.columnnames()) > 0:
                    f.write(cls.sep.join(cls.columnnames()) + cls.nl)
                rowformatstr = cls.rowformat()
                ri = 0

                if project._mwapp:
                    with pyqtgraph.ProgressDialog(cls.progress_dialog_title,
                                                  0,
                                                  cls.datarowcount(),
                                                  cancelText=None) as dlg:
                        for row in cls.datarows():
                            row.extend([
                                cls.missingval,
                            ] * (cls.columncount() - len(row)))
                            f.write(rowformatstr.format(*row))

                            if ri % cls.progress_update_rate == 0:
                                dlg.setValue(ri)
                            if dlg.wasCanceled():
                                # TODO: Should the incomplete report file be deleted
                                #       if dialog is cancelled?
                                break
                            ri += 1
                else:
                    for row in cls.datarows():
                        row.extend([
                            cls.missingval,
                        ] * (cls.columncount() - len(row)))
                        f.write(rowformatstr.format(*row))
                        ri += 1
            return ri
        except:
            import traceback
            traceback.print_exc()
        finally:
            cls.project = None
        return 0
コード例 #20
0
ファイル: parallelize.py プロジェクト: titusjan/pyqtgraph
##   - once without Parallelize
##   - once with Parallelize, but forced to use a single worker
##   - once with Parallelize automatically determining how many workers to use
##

tasks = range(10)
results = [None] * len(tasks)
results2 = results[:]
results3 = results[:]
size = 2000000

pg.mkQApp()

### Purely serial processing
start = time.time()
with pg.ProgressDialog('processing serially..', maximum=len(tasks)) as dlg:
    for i, x in enumerate(tasks):
        tot = 0
        for j in range(size):
            tot += j * x
        results[i] = tot
        dlg += 1
        if dlg.wasCanceled():
            raise Exception('processing canceled')
print("Serial time: %0.2f" % (time.time() - start))

### Use parallelize, but force a single worker
### (this simulates the behavior seen on windows, which lacks os.fork)
start = time.time()
with mp.Parallelize(
        enumerate(tasks),
コード例 #21
0
ファイル: viewer.py プロジェクト: josemelchor/ccfviewer
def readNRRDLabels(nrrdFile=None, ontologyFile=None):
    """
    Download label files from:
      http://help.brain-map.org/display/mouseconnectivity/API#API-DownloadAtlas

    Download ontology files from:
      http://api.brain-map.org/api/v2/structure_graph_download/1.json

      see:
      http://help.brain-map.org/display/api/Downloading+an+Ontology%27s+Structure+Graph
      http://help.brain-map.org/display/api/Atlas+Drawings+and+Ontologies#AtlasDrawingsandOntologies-StructuresAndOntologies

    This method compresses the annotation data down to a 16-bit array by remapping
    the larger annotations to smaller, unused values.
    """
    global onto, ontology, data, mapping, inds, vxsize, info, ma

    import nrrd
    if nrrdFile is None:
        displayMessage('Select NRRD annotation file')
        nrrdFile = QtGui.QFileDialog.getOpenFileName(
            None, "Select NRRD annotation file")

    if ontologyFile is None:
        displayMessage('Select ontology file (json)')
        ontoFile = QtGui.QFileDialog.getOpenFileName(
            None, "Select ontology file (json)")

    with pg.ProgressDialog("Loading annotation file...", 0, 5, wait=0) as dlg:
        print "Loading annotation file..."
        app.processEvents()
        # Read ontology and convert to flat table
        onto = json.load(open(ontoFile, 'rb'))
        onto = parseOntology(onto['msg'][0])
        l1 = max([len(row[2]) for row in onto])
        l2 = max([len(row[3]) for row in onto])
        ontology = np.array(onto,
                            dtype=[('id', 'int32'), ('parent', 'int32'),
                                   ('name', 'S%d' % l1),
                                   ('acronym', 'S%d' % l2), ('color', 'S6')])

        if dlg.wasCanceled():
            return
        dlg += 1

        # read annotation data
        data, header = nrrd.read(nrrdFile)

        if dlg.wasCanceled():
            return
        dlg += 1

        # data must have axes (anterior, dorsal, right)
        # rearrange axes to fit -- CCF data comes in (posterior, inferior, right) order.
        data = data[::-1, ::-1, :]

        if dlg.wasCanceled():
            return
        dlg += 1

    # compress down to uint16
    print "Compressing.."
    u = np.unique(data)

    # decide on a 32-to-64-bit label mapping
    mask = u <= 2**16 - 1
    next_id = 2**16 - 1
    mapping = OrderedDict()
    inds = set()
    for i in u[mask]:
        mapping[i] = i
        inds.add(i)

    with pg.ProgressDialog("Remapping annotations to 16-bit...",
                           0, (~mask).sum(),
                           wait=0) as dlg:
        app.processEvents()
        for i in u[~mask]:
            while next_id in inds:
                next_id -= 1
            mapping[i] = next_id
            inds.add(next_id)
            data[data == i] = next_id
            ontology['id'][ontology['id'] == i] = next_id
            ontology['parent'][ontology['parent'] == i] = next_id
            if dlg.wasCanceled():
                return
            dlg += 1

    data = data.astype('uint16')
    mapping = np.array(list(mapping.items()))

    # voxel size in um
    vxsize = 1e-6 * float(header['space directions'][0][0])

    info = [{
        'name': 'anterior',
        'values': np.arange(data.shape[0]) * vxsize,
        'units': 'm'
    }, {
        'name': 'dorsal',
        'values': np.arange(data.shape[1]) * vxsize,
        'units': 'm'
    }, {
        'name': 'right',
        'values': np.arange(data.shape[2]) * vxsize,
        'units': 'm'
    }, {
        'vxsize': vxsize,
        'ai_ontology_map': mapping,
        'ontology': ontology
    }]
    ma = metaarray.MetaArray(data, info=info)
    return ma
コード例 #22
0
ファイル: exp_fit_test.py プロジェクト: timjarsky/aisynphys

    for method in methods:

        dtype.extend(method.dtype)
        name = method.name
        dtype.extend([
            (name+'_true_err', float),
        ]
        for par_name in method.params:
            dtype.append(


    examples = np.empty(N, dtype=dtype)

    with pg.ProgressDialog("making some noise..", maximum=N) as dlg:
        for i in range(N):
            ex = examples[i]

            yoffset = np.random.uniform(-80e-3, -60e-3)
            amp = np.random.uniform(-100e-3, 100e-3)
            tau = np.random.uniform(5e-3, 500e-3)

            x = yoffset, amp, tau
            true_y = exp_fn(x, t)
            y = true_y + make_noise(t)

            ex['x'] = x
            ex['y'] = y
            ex['t'] = t
            ex['true_y'] = true_y