Ejemplo n.º 1
0
def simulate_connection(fg_recs, bg_results, classifier, amp, rtime, n_trials=8):
    """Run repeated simulation trials adding a synthetic PSP to recorded background noise.
    """
    import pyqtgraph.multiprocess as mp
    result = {'results': [], 'rise_time': rtime, 'amp': amp}

    sim_results = [None] * n_trials
    with mp.Parallelize(range(n_trials), results=sim_results, workers=8) as tasker:
        for ii in tasker:
            tasker.results[ii] = simulate_response(fg_recs, bg_results, amp, rtime, seed=ii)

    for k in range(len(sim_results)):
        conn_result, traces = sim_results[k]

        result['results'].append(conn_result)
        result['traces'] = traces
        # print(conn_result)
        # print(dict([(k, conn_result[k]) for k in classifier.features]))

    pred = classifier.predict(result['results'])
    result['predictions'] = pred['prediction']
    result['confidence'] = pred['confidence']
    # print("\nrise time:", rtime, " amplitude:", amp)
    # print(pred)
        
    return result
Ejemplo n.º 2
0
    def make_waveform(self):
        # Compute the stimulus by multiplying carriers with the (exponentialized)
        # envelope.
        if not self.calculated:
            self.calculate_params()
        self.vStim = np.zeros(self.vTime.shape[0])
        # this calculation can be easily parallelized, so we do it
        tasks = []
        nworkers = mp.Parallelize.suggestedWorkerCount()
        for iStep in range(0, self.nLoopSteps):
            tasks.append(iStep)
        results = [None] * len(tasks)
        #    with mp.Parallelize(enumerate(tasks), workers=nworkers, results=results, progressDialog='Running parallel calculation..') as tasker:
        with mp.Parallelize(enumerate(tasks),
                            workers=nworkers,
                            results=results) as tasker:
            for i, iStep in tasker:
                result = self.make_wave(iStep)
                #                print 'i: ', i
                tasker.results[i] = result
        for i, r in enumerate(results):
            self.vStim = self.vStim + r

        # original non-parallel code, for reference
        # for iStep in range(0, self.nLoopSteps):
        #     w = (np.sin( 2.0*np.pi*self.vCarrierFreq[iStep] * self.vTime + self.vPhases[iStep]) *
        #             10.**( ((self.nAmp/2.0) *
        #             np.sin( 2.0*np.pi*self.vLogFreq[iStep]*self.vRD + self.vPhase) -
        #             self.nAmp/2.0) /20. ))
        #     self.vStim = self.vStim + w

        # Normalization
        self.vStim = self.nGain * self.vStim / np.max(np.fabs(self.vStim))
Ejemplo n.º 3
0
 def set_sound_stim(self, stim, parallel=False):
     """Set a sound stimulus to generate spike trains for all (real) cells
     in this population.
     """
     real = self.real_cells()
     logging.info("Assigning spike trains to %d SGC cells..", len(real))
     if not parallel:
         for i, ind in enumerate(real):
             logging.info("Assigning spike train to SGC %d (%d/%d)", ind, i, len(real))
             cell = self.get_cell(ind)
             cell.set_sound_stim(stim, self.next_seed)
             self.next_seed += 1
             
     else:
         seeds = range(self.next_seed, self.next_seed + len(real))
         self.next_seed = seeds[-1] + 1
         tasks = zip(seeds, real)
         trains = [None] * len(tasks)
         # generate spike trains in parallel
         with mp.Parallelize(enumerate(tasks), trains=trains, progressDialog='Generating SGC spike trains..') as tasker:
             for i, x in tasker:
                 seed, ind = x
                 cell = self.get_cell(ind)
                 train = cell.generate_spiketrain(stim, seed)
                 tasker.trains[i] = train
         # collected all trains; now assign to cells
         for i,ind in enumerate(real):
             cell = self.get_cell(ind)
             cell.set_spiketrain(trains[i])
Ejemplo n.º 4
0
    def runIV(self, parallelize):
        self.civ = {}
        self.iiv = []
        varsg = np.linspace(0.25, 2.0, int((2.0-0.25)/0.25)+1) #[0.5, 0.75, 1.0, 1.5, 2.0]  # covary Ih and gklt in constant ratio
        self.gklts = np.zeros(len(varsg))
        self.ghs = np.zeros(len(varsg))
        if not parallelize:
            for n in range(self.npost):
                self.civ[n] = []
    #            self.iiv[c] = []
            start = time.time()
            for inj in np.arange(-1.0, 1.51, 0.5):
                self.run(mode='IV', temp=34.0, dt=0.025, stimamp=10, iinj=[inj])
                print 'ran for current = ', inj
                for c in range(self.npost):
                    self.civ[c].append(self['v_post%02d' % c])
                    if c == 0:  # just the first
                        self.iiv.append(self['poststim%02d' %  c])
            print( "\nSerial time, %0.2f" % (time.time() - start))
            if runname is not None:
                f = open(runname, 'w')
                pickle.dump({'t': self['t'], 'v': self.civ, 'i': self.iiv}, f)
                f.close()
        else:
            # mp.parallelizer.multiprocessing.cpu_count()
            nworker = 16
            self.npost = len(varsg)
            tasks = range(self.npost)
            results = [None] * len(tasks)
            ivc = [None] * len(tasks)
            start = time.time()
#            with mp.Parallelize(enumerate(tasks), results=results, workers=nworker, progressDialog='processing in parallel..') as tasker:
            with mp.Parallelize(enumerate(tasks), results=results, workers=nworker) as tasker:
                for i, x in tasker:
                    post_cell = cells.Bushy.create(species=species)
                    refgbar_klt = post_cell.soma().klt.gbar
                    refgbar_ih = post_cell.soma().ihvcn.gbar
                    gklts = refgbar_klt * varsg[i]
                    ghs = refgbar_ih * varsg[i]
                    post_cell.soma().klt.gbar = gklts
                    post_cell.soma().ihvcn.gbar = ghs
                    post_cell.initial_mechanisms = None  # forget the mechanisms we set up initially
                    post_cell.save_all_mechs()  # and save new ones because we are explicitely varying them
                    ivc[i] = iv_curve.IVCurve()
                    ivc[i].run({'pulse': [(-1., 1.5, 0.25)]}, post_cell)
                    tasker.results[i] = {'v': ivc[i].voltage_traces, 'i': ivc[i].current_traces, 't': ivc[i].time_values, 'gklt': gklts, 'gh': ghs}
            print( "\nParallel time: %d workers,  %0.2f sec" % (nworker, time.time() - start))
            cell_info = {'varrange': varsg}
            print cell_info
            res = {'cells': cell_info, 'results': results}
            if runname is not None:
                f = open(runname, 'w')
                pickle.dump(res, f)
                f.close()
Ejemplo n.º 5
0
    def run(self,
            temp=34.0,
            dt=0.025,
            seed=575982035,
            reps=10,
            stimulus='tone',
            simulator='cochlea'):
        assert stimulus in ['tone', 'SAM', 'clicks']  # cases available
        assert self.cell in ['bushy', 'tstellate', 'octopus', 'dstellate']
        self.nrep = reps
        self.stimulus = stimulus
        self.run_duration = 0.20  # in seconds
        self.pip_duration = 0.05  # in seconds
        self.pip_start = [0.1]  # in seconds
        self.Fs = 100e3  # in Hz
        self.f0 = 4000.  # stimulus in Hz
        self.cf = 4000.  # SGCs in Hz
        self.fMod = 100.  # mod freq, Hz
        self.dMod = 0.  # % mod depth, Hz
        self.dbspl = 50.
        self.simulator = simulator
        self.sr = 1  # set SR group
        if self.stimulus == 'SAM':
            self.stim = sound.SAMTone(rate=self.Fs,
                                      duration=self.run_duration,
                                      f0=self.f0,
                                      fmod=self.fMod,
                                      dmod=self.dMod,
                                      dbspl=self.dbspl,
                                      ramp_duration=2.5e-3,
                                      pip_duration=self.pip_duration,
                                      pip_start=self.pip_start)
        if self.stimulus == 'tone':
            self.f0 = 4000.
            self.cf = 4000.
            self.stim = sound.TonePip(rate=self.Fs,
                                      duration=self.run_duration,
                                      f0=self.f0,
                                      dbspl=self.dbspl,
                                      ramp_duration=2.5e-3,
                                      pip_duration=self.pip_duration,
                                      pip_start=self.pip_start)

        if self.stimulus == 'clicks':
            self.click_rate = 0.020  # msec
            self.stim = sound.ClickTrain(
                rate=self.Fs,
                duration=self.run_duration,
                f0=self.f0,
                dbspl=self.dbspl,
                click_start=0.010,
                click_duration=100.e-6,
                click_interval=self.click_rate,
                nclicks=int((self.run_duration - 0.01) / self.click_rate),
                ramp_duration=2.5e-3)

        n_sgc = data.get('convergence',
                         species=species,
                         post_type=self.cell,
                         pre_type='sgc')[0]
        self.n_sgc = int(np.round(n_sgc))
        # for simple synapses, need this value:
        self.AMPA_gmax = data.get(
            'sgc_synapse',
            species=species,
            post_type=self.cell,
            field='AMPA_gmax')[0] / 1e3  # convert nS to uS for NEURON
        self.vms = [None for n in range(self.nrep)]
        self.synapses = [None for n in range(self.nrep)]
        self.xmtrs = [None for n in range(self.nrep)]
        self.pre_cells = [None for n in range(self.nrep)]
        self.time = [None for n in range(self.nrep)]
        info = {
            'n_sgc': self.n_sgc,
            'gmax': self.AMPA_gmax,
            'stim': self.stim,
            'simulator': self.simulator,
            'cf': self.cf,
            'sr': self.sr,
            'seed': seed,
            'run_duration': self.run_duration,
            'temp': temp,
            'dt': dt,
            'init': custom_init
        }
        if not self.parallelize:
            for nr in range(self.nrep):
                info['seed'] = seed + 3 * self.n_sgc * nr
                res = RunTrial(self.cell, info)
                # res contains: {'time': time, 'vm': Vm, 'xmtr': xmtr, 'pre_cells': pre_cells, 'post_cell': post_cell}
                self.pre_cells[nr] = res['pre_cells']
                self.time[nr] = res['time']
                self.xmtr = {k: v.to_python() for k, v in res['xmtr'].items()}
                self.vms[nr] = res['vm']
                self.synapses[nr] = res['synapses']
                self.xmtrs[nr] = self.xmtr

        if self.parallelize:
            ### Use parallelize with multiple workers
            tasks = range(len(self.nrep))
            results3 = results[:]
            start = time.time()
            #            with mp.Parallelize(enumerate(tasks), results=results, progressDialog='processing in parallel..') as tasker:
            with mp.Parallelize(enumerate(tasks), results=results) as tasker:
                for i, x in tasker:
                    tot = 0
                    for j in xrange(size):
                        tot += j * x
                    tasker.results[i] = tot
            print("\nParallel time, %d workers: %0.2f" %
                  (mp.Parallelize.suggestedWorkerCount(), time.time() - start))
            print("Results match serial:      %s" % str(results3 == results))
Ejemplo n.º 6
0
    def runSound(self, parallelize=False):
        self.civ = {}
        self.iiv = []
        if not parallelize:
            pass

        if parallelize:
            nworker = 16
            varsg = np.linspace(
                0.25, 2.0,
                int((2.0 - 0.25) / 0.25) + 1
            )  #[0.5, 0.75, 1.0, 1.5, 2.0]  # covary Ih and gklt in constant ratio
            self.npost = len(varsg)
            nrep = 25
            tasks = range(self.npost)
            results = [None] * len(tasks)
            ivc = [None] * len(tasks)
            gklts = np.zeros(len(varsg))
            ghs = np.zeros(len(varsg))
            start = time.time()
            seed = 0
            cf = 16000.
            f0 = 16000.
            rundur = 0.25  # seconds
            pipdur = 0.1  # seconds
            dbspl = 50.
            fmod = 40.
            dmod = 0.
            stimulus = 'tone'
            #            with mp.Parallelize(enumerate(tasks), results=results, workers=nworker, progressDialog='processing in parallel..') as tasker:
            with mp.Parallelize(enumerate(tasks),
                                results=results,
                                workers=nworker) as tasker:
                for i, x in tasker:
                    post_cell = cells.Bushy.create(species=species)
                    h.celsius = 34
                    self.temp = h.celsius
                    refgbar_klt = post_cell.soma().klt.gbar
                    refgbar_ih = post_cell.soma().ihvcn.gbar
                    gklts[i] = refgbar_klt * varsg[i]
                    ghs[i] = refgbar_ih * varsg[i]
                    post_cell.soma().klt.gbar = gklts[i]
                    post_cell.soma().ihvcn.gbar = ghs[i]
                    post_cell.initial_mechanisms = None  # forget the mechanisms we set up initially
                    post_cell.save_all_mechs(
                    )  # and save new ones because we are explicitely varying them
                    self.make_stimulus(stimulus=stimulus,
                                       cf=cf,
                                       f0=f0,
                                       rundur=rundur,
                                       pipdur=pipdur,
                                       dbspl=50.,
                                       simulator=None,
                                       fmod=fmod,
                                       dmod=dmod)

                    pre_cells = []
                    synapses = []
                    for n in range(self.npre):
                        pre_cells.append(cells.DummySGC(cf=cf, sr=2))
                        synapses.append(pre_cells[n].connect(post_cell,
                                                             type=synapseType))
                    v_reps = []
                    i_reps = []
                    p_reps = []  # pre spike on 0'th sgc
                    for j in range(nrep):
                        for prec in range(len(pre_cells)):
                            pre_cells[prec].set_sound_stim(self.stim,
                                                           seed=seed)
                            seed += 1
                            # for i in range(synapses[-1].terminal.n_rzones):
                            #     xmtr['xmtr%04d'%j] = h.Vector()
                            #     xmtr['xmtr%04d'%j].record(synapses[-1].terminal.relsite._ref_XMTR[i])
                            # j = j + 1
                            #synapses[-1].terminal.relsite.Dep_Flag = False  # no depression in these simulations
                        #
                        # Run simulation
                        #
                        post_cell.vm0 = None
                        post_cell.cell_initialize()
                        # set starting voltage...
                        post_cell.soma(0.5).v = post_cell.vm0
                        h.dt = 0.02
                        h.t = 0  # run a bit to find true stable rmp
                        h.tstop = 20.
                        h.batch_save()
                        h.batch_run(h.tstop, h.dt, 'v.dat')
                        self['t'] = h._ref_t
                        # set up recordings
                        self['v_post%02d' % j] = post_cell.soma(0.5)._ref_v
                        h.finitialize()  # init and instantiate recordings
                        print 'running %d' % i
                        h.t = 0.
                        h.tstop = rundur * 1000.  # rundur is in seconds.
                        post_cell.check_all_mechs(
                        )  # make sure no further changes were introduced before run.
                        h.batch_run(h.tstop, h.dt, 'v.dat')
                        v_reps.append(self['v_post%02d' % j])
                        i_reps.append(0. * self['v_post%02d' % j])
                        p_reps.append(pre_cells[0]._stvec.to_python())
                    tasker.results[i] = {
                        'v': v_reps,
                        'i': i_reps,
                        't': self['t'],
                        'pre': pre_cells[0]._stvec.to_python()
                    }
            print("\nParallel time: %d workers,  %0.2f sec" %
                  (nworker, time.time() - start))
            cell_info = {'gklt': gklts, 'gh': ghs}
            stim_info = {
                'nreps': nrep,
                'cf': cf,
                'f0': f0,
                'rundur': rundur,
                'pipdur': pipdur,
                'dbspl': dbspl,
                'fmod': fmod,
                'dmod': dmod
            }
            res = {'cells': cell_info, 'stim': stim_info, 'results': results}
            if runname is not None:
                f = open(runname, 'w')
                pickle.dump(res, f)
                f.close()
Ejemplo n.º 7
0
    def generateNormalizationTable(cls, nEvents=1000000):
        ## table looks like this:
        ##   (2 x M x N)
        ##   Axis 0:  (score, mapped)
        ##   Axis 1:  expected number of events  [1, 2, 4, 8, ...]
        ##   Axis 2:  score axis

        ## To map:
        ##    determine axis-1 index by expected number of events
        ##    look up axis-2 index from table[0, ind1]
        ##    look up mapped score at table[1, ind1, ind2]

        ## parameters determining sample space for normalization table
        rate = 1.0
        tVals = 2**np.arange(9)  ## set of tMax values
        nev = (nEvents / (rate * tVals)**0.5).astype(
            int)  # number of events to generate for each tMax value

        xSteps = 1000
        r = 10**(30. / xSteps)
        xVals = r**np.arange(
            xSteps)  ## log spacing from 1 to 10**20 in 500 steps
        tableShape = (2, len(tVals), len(xVals))

        path = os.path.dirname(__file__)
        cacheFile = os.path.join(
            path, 'test_data/%s_normTable_%s_float64.dat' %
            (cls.__name__, 'x'.join(map(str, tableShape))))

        if os.path.exists(cacheFile):
            norm = np.fromstring(open(cacheFile, 'rb').read(),
                                 dtype=np.float64).reshape(tableShape)
        else:
            print(
                "Generating poisson score normalization table (will be cached here: %s)"
                % cacheFile)
            norm = np.empty(tableShape)
            counts = []
            with mp.Parallelize(counts=counts) as tasker:
                for task in tasker:
                    count = np.zeros(tableShape[1:], dtype=float)
                    for i, t in enumerate(tVals):
                        n = nev[i] / tasker.numWorkers()
                        for j in xrange(int(n)):
                            if j % 10000 == 0:
                                print("%d/%d  %d/%d" %
                                      (i, len(tVals), j, int(n)))
                                tasker.process()
                            ev = cls.generateRandom(rate=rate, tMax=t, reps=1)

                            score = cls.score(ev, rate, normalize=False)
                            ind = np.log(score) / np.log(r)
                            count[i, :int(ind) + 1] += 1
                    tasker.counts.append(count)

            count = sum(counts)
            count[count == 0] = 1
            norm[0] = xVals.reshape(1, len(xVals))
            norm[1] = nev.reshape(len(nev), 1) / count

            open(cacheFile, 'wb').write(norm.tostring())

        return norm
Ejemplo n.º 8
0
    for i, x in enumerate(tasks):
        tot = 0
        for j in range(size):
            tot += j * x
        results[i] = tot
        dlg += 1
        if dlg.wasCanceled():
            raise Exception('processing canceled')
print("Serial time: %0.2f" % (time.time() - start))

### Use parallelize, but force a single worker
### (this simulates the behavior seen on windows, which lacks os.fork)
start = time.time()
with mp.Parallelize(
        enumerate(tasks),
        results=results2,
        workers=1,
        progressDialog='processing serially (using Parallelizer)..') as tasker:
    for i, x in tasker:
        tot = 0
        for j in range(size):
            tot += j * x
        tasker.results[i] = tot
print("\nParallel time, 1 worker: %0.2f" % (time.time() - start))
print("Results match serial:  %s" % str(results2 == results))

### Use parallelize with multiple workers
start = time.time()
with mp.Parallelize(enumerate(tasks),
                    results=results3,
                    progressDialog='processing in parallel..') as tasker:
    def fit_response_trains(self):
        train_responses = self.train_responses
        pulse_offsets = self._pulse_offsets

        tasks = train_responses.keys()
        results = OrderedDict([(task, None) for task in tasks])
        import pyqtgraph.multiprocess as mp
        with mp.Parallelize(enumerate(tasks),
                            results=results,
                            progressDialog='Fitting PSP trains..') as tasker:
            for i, stim_params in tasker:
                grps = train_responses[stim_params]
                pulse_offset = pulse_offsets[stim_params]
                fits = []
                for j, grp in enumerate(grps):
                    avg = grp.bsub_mean()

                    base = np.median(avg.data[:int(10e-3 / avg.dt)])

                    # initial fit

                    args = {
                        'yoffset': (base, 'fixed'),
                        'xoffset': (0, -1e-3, 1e-3),
                        'rise_time':
                        (rise_time, rise_time * 0.5, rise_time * 2),
                        'decay_tau':
                        (decay_tau, decay_tau * 0.5, decay_tau * 2),
                        'rise_power': (2, 'fixed'),
                    }

                    pulses = [pulse_offset[:8], pulse_offset[8:]][j]
                    for p, pt in enumerate(pulses):
                        args['xoffset%d' % p] = (pt - pulses[0] + self.pre_pad,
                                                 'fixed')
                        args['amp%d' % p] = (amp_est, ) + tuple(
                            sorted([0, amp_est * 10]))

                    fit_kws = {
                        'xtol': 1e-4,
                        'maxfev': 3000,
                        'nan_policy': 'omit'
                    }
                    model = PspTrain(len(pulses))
                    fit = model.fit(avg.data,
                                    x=avg.time_values,
                                    params=args,
                                    fit_kws=fit_kws,
                                    method='leastsq')

                    # Fit again with decay tau per event
                    # Slow, but might improve fit amplitudes
                    args = {
                        'yoffset': (base, 'fixed'),
                        'xoffset': (0, -1e-3, 1e-3),
                        'rise_time': (fit.best_values['rise_time'],
                                      rise_time * 0.5, rise_time * 2),
                        'decay_tau': (fit.best_values['decay_tau'],
                                      decay_tau * 0.5, decay_tau * 2),
                        'rise_power': (2, 'fixed'),
                    }

                    for p, pt in enumerate(pulses):
                        args['xoffset%d' % p] = (fit.best_values['xoffset%d' %
                                                                 p], 'fixed')
                        args['amp%d' %
                             p] = (fit.best_values['amp%d' % p], ) + tuple(
                                 sorted([0, amp_est * 10]))
                        args['decay_tau_factor%d' % p] = (1, 0.5, 2)

                    fit = model.fit(avg.data,
                                    x=avg.time_values,
                                    params=args,
                                    fit_kws=fit_kws,
                                    method='leastsq')

                    fits.append((fit.best_values, len(pulses)))

                tasker.results[stim_params] = fits

        self._train_fit_results = results
        return results
Ejemplo n.º 10
0
        'pip': 0.04,
        'start': [0.1],
        'baseline': [50, 100],
        'response': [100, 140]
    }
    tasks = []
    for f in fvals:
        for db in levels:
            for i in range(nreps):
                tasks.append((f, db, i))

    results = {}
    workers = 1 if not parallel else None
    tot_runs = len(fvals) * len(levels) * nreps
    with mp.Parallelize(enumerate(tasks),
                        results=results,
                        progressDialog='Running parallel simulation..',
                        workers=workers) as tasker:
        for i, task in tasker:
            f, db, iteration = task
            stim = sound.TonePip(
                rate=100e3,
                duration=stimpar['dur'],
                f0=f,
                dbspl=db,  # dura 0.2, pip_start 0.1 pipdur 0.04
                ramp_duration=2.5e-3,
                pip_duration=stimpar['pip'],
                pip_start=stimpar['start'])

            print("=== Start run %d/%d ===" % (i + 1, tot_runs))
            cachefile = os.path.join(
                cachepath, 'seed=%d_f0=%f_dbspl=%f_syntype=%s_iter=%d.pk' %
Ejemplo n.º 11
0
            true_y = exp_fn(x, t)
            y = true_y + make_noise(t)

            ex['x'] = x
            ex['y'] = y
            ex['t'] = t
            ex['true_y'] = true_y
            ex['yoffset'] = x[0]
            ex['amp'] = x[1]
            ex['tau'] = x[2]

            dlg += 1


    results = []
    with mp.Parallelize(range(N), results=results, progressDialog="fitting, don't you think?", workers=1) as tasker:
        for i in tasker:
            ex = examples[i]
            y = ex['y']
            t = ex['t']

            fit = fit_exp(t, y)

            tasker.results.append((fit.x, fit.fun, fit.success, fit.nfev))


    with pg.ProgressDialog("quantifying life mistakes..", maximum=N) as dlg:
        for i,result in enumerate(results):
            fit_x, fit_err, fit_success, fit_nfev = result
            ex = examples[i]