Example #1
0
    def tempo_search(db, Key, tempo):
        """
        ::

            Static tempo-invariant search
            Returns search results for query resampled over a range of tempos.
        """
        if not db.configCheck():
            print "Failed configCheck in query spec."
            print db.configQuery
            return None
        prop = 1.0 / tempo  # the proportion of original samples required for new tempo
        qconf = db.configQuery.copy()
        X = db.retrieve_datum(Key)
        P = db.retrieve_datum(Key, powers=True)
        X_m = pylab.mat(X.mean(0))
        X_resamp = pylab.array(adb.resample_vector(X - pylab.mat(pylab.ones(X.shape[0])).T * X_m, prop))
        X_resamp += pylab.mat(pylab.ones(X_resamp.shape[0])).T * X_m
        P_resamp = pylab.array(adb.resample_vector(P, prop))
        seqStart = int(pylab.around(qconf["seqStart"] * prop))
        qconf["seqStart"] = seqStart
        seqLength = int(pylab.around(qconf["seqLength"] * prop))
        qconf["seqLength"] = seqLength
        tmpconf = db.configQuery
        db.configQuery = qconf
        res = db.query_data(featData=X_resamp, powerData=P_resamp)
        res_resorted = adb.sort_search_result(res.rawData)
        db.configQuery = tmpconf
        return res_resorted
Example #2
0
def fit_disaggregation_model(model):
    """Disaggregation approach"""
    ## Spline model to represent age-specific rate
    model.vars += dismod3.age_pattern.spline(name='dis', ages=model.ages,
                                             knots=knots,
                                             smoothing=pl.inf,
                                             interpolation_method='linear')

    ## Disaggregate input data
    a = []
    p = []
    n = []
    for i in model.input_data.index:
        a_s, a_e = model.input_data.ix[i, 'age_start'], model.input_data.ix[i, 'age_end']
        a += range(a_s, a_e)
        p += [model.input_data.ix[i, 'value']] * (a_e - a_s)
        n += [float(model.input_data.ix[i, 'effective_sample_size']) / (a_e - a_s)] * (a_e - a_s)
    a = pl.array(a)
    p = pl.array(p)
    n = pl.array(n)

    model.vars['pi'] = mc.Lambda('pi_dis', lambda mu=model.vars['mu_age'], a=a: mu[a])
    delta = mc.Uniform('delta_mid', 0., 1000., value=10.)
    model.vars += {'delta': delta}

    ## Negative binomial rate model
    model.vars += dismod3.rate_model.neg_binom(name='dis',
                                               pi=model.vars['pi'],
                                               delta=model.vars['delta'],
                                               p=p,  # TODO: change this parameter name to "r" to match the book chapter
                                               n=n)

    fit_model(model)
Example #3
0
def fit_disaggregation_model(model):
    """Disaggregation approach"""
    ## Spline model to represent age-specific rate
    model.vars += dismod3.age_pattern.spline(
        name="dis", ages=model.ages, knots=knots, smoothing=pl.inf, interpolation_method="linear"
    )

    ## Disaggregate input data
    a = []
    p = []
    n = []
    for i in model.input_data.index:
        a_s, a_e = model.input_data.ix[i, "age_start"], model.input_data.ix[i, "age_end"]
        a += range(a_s, a_e)
        p += [model.input_data.ix[i, "value"]] * (a_e - a_s)
        n += [float(model.input_data.ix[i, "effective_sample_size"]) / (a_e - a_s)] * (a_e - a_s)
    a = pl.array(a)
    p = pl.array(p)
    n = pl.array(n)

    model.vars["pi"] = mc.Lambda("pi_dis", lambda mu=model.vars["mu_age"], a=a: mu[a])
    model.vars["delta"] = mc.Uniform("delta_dis", 0.0, 1000.0, value=10.0)

    ## Negative binomial rate model
    model.vars += dismod3.rate_model.neg_binom(
        name="dis",
        pi=model.vars["pi"],
        delta=model.vars["delta"],
        p=p,  # TODO: change this parameter name to "r" to match the book chapter
        n=n,
    )

    fit_model(model)
def fitData2(fileName):
    '''
    Models predictions using Terman's law model (cubic fit) and 
    Hooks Law (linear fit).

    Hook's Law functions up to the point where the spring reaches 
    it's elastic limit - when it stops behaving as a spring but instead 
    as a rope, etc (doesn't get longer b/c hang more weight on it)
    '''
    xVals, yVals = getData(fileName)
    extX = pylab.array(xVals + [1.05, 1.1, 1.15, 1.2, 1.25])
    xVals = pylab.array(xVals)
    yVals = pylab.array(yVals)
    xVals = xVals*9.81  # convert mass to force (F = mg)
    extX = extX*9.81    # convert mass to force (F = mg)
    pylab.plot(xVals, yVals, 'bo', label = 'Measured displacements')
    pylab.title('Measured Displacement of Spring')
    pylab.xlabel('|Force| (Newtons)')
    pylab.ylabel('Distance (meters)')
    a,b = pylab.polyfit(xVals, yVals, 1)
    estYVals = a*extX + b
    pylab.plot(extX, estYVals, label = 'Linear fit')
    a,b,c,d = pylab.polyfit(xVals, yVals, 3)
    estYVals = a*(extX**3) + b*extX**2 + c*extX + d
    pylab.plot(extX, estYVals, label = 'Cubic fit')
    pylab.legend(loc = 'best')
Example #5
0
    def t(self, k, cosTheta, pk, c):
        """
        Raw trispectrum

        Not recently tested
        """
        pk = c.pkInterp(k)
        f2term = (
            self.tf21(0, 1, 2, k, cosTheta, pk, c)
            + self.tf21(1, 2, 0, k, cosTheta, pk, c)
            + self.tf21(2, 0, 1, k, cosTheta, pk, c)
            + self.tf21(1, 2, 3, k, cosTheta, pk, c)
            + self.tf21(2, 3, 1, k, cosTheta, pk, c)
            + self.tf21(3, 1, 2, k, cosTheta, pk, c)
            + self.tf21(2, 3, 0, k, cosTheta, pk, c)
            + self.tf21(3, 0, 2, k, cosTheta, pk, c)
            + self.tf21(0, 2, 3, k, cosTheta, pk, c)
            + self.tf21(3, 0, 1, k, cosTheta, pk, c)
            + self.tf21(0, 1, 3, k, cosTheta, pk, c)
            + self.tf21(1, 3, 0, k, cosTheta, pk, c)
        ) * 4.0

        f3term = (
            self.tf31(M.array([0, 1, 2]), k, cosTheta, pk)
            + self.tf31(M.array([1, 2, 3]), k, cosTheta, pk)
            + self.tf31(M.array([2, 3, 1]), k, cosTheta, pk)
            + self.tf31(M.array([3, 1, 2]), k, cosTheta, pk)
        ) * 6.0

        # print cosTheta,f2term, f3term, ft2term+f3term
        return f2term + f3term
Example #6
0
def plot_elecs_and_neurons(neuron_dict, ext_sim_dict, neural_sim_dict):
    pl.close('all')
    fig_all = pl.figure(figsize=[15,15])
    ax_all = fig_all.add_axes([0.1, 0.1, 0.8, 0.8], frameon=False)
    for elec in xrange(len(ext_sim_dict['elec_z'])):
        ax_all.plot(ext_sim_dict['elec_z'][elec], ext_sim_dict['elec_y'][elec], color='b',\
                marker='$E%i$'%elec, markersize=20 )    
    legends = []
    for i, neur in enumerate(neuron_dict):
        folder = os.path.join(neural_sim_dict['output_folder'], neuron_dict[neur]['name'])
        coor = np.load(os.path.join(folder,'coor.npy'))
        x,y,z = coor
        n_compartments = len(x)
        fig = pl.figure(figsize=[10, 10])
        ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], frameon=False)
        # Plot the electrodes
        for elec in xrange(len(ext_sim_dict['elec_z'])):
            ax.plot(ext_sim_dict['elec_z'][elec], ext_sim_dict['elec_y'][elec], color='b',\
                   marker='$%i$'%elec, markersize=20 )
        # Plot the neuron
        xmid, ymid, zmid = np.load(folder + '/coor.npy')
        xstart, ystart,zstart = np.load(folder + '/coor_start.npy')
        xend, yend, zend = np.load(folder + '/coor_end.npy')
        diam = np.load(folder + '/diam.npy')
        length = np.load(folder + '/length.npy')
        n_compartments = len(diam)
        for comp in xrange(n_compartments):
            if comp == 0:
                xcoords = pl.array([xmid[comp]])
                ycoords = pl.array([ymid[comp]])
                zcoords = pl.array([zmid[comp]])
                diams = pl.array([diam[comp]])    
            else:
                if zmid[comp] < 0.400 and zmid[comp] > -.400:  
                    xcoords = pl.r_[xcoords, pl.linspace(xstart[comp],
                                                         xend[comp], length[comp]*3*1000)]   
                    ycoords = pl.r_[ycoords, pl.linspace(ystart[comp],
                                                         yend[comp], length[comp]*3*1000)]   
                    zcoords = pl.r_[zcoords, pl.linspace(zstart[comp],
                                                         zend[comp], length[comp]*3*1000)]   
                    diams = pl.r_[diams, pl.linspace(diam[comp], diam[comp],
                                                length[comp]*3*1000)]
        argsort = pl.argsort(-xcoords)
        ax.scatter(zcoords[argsort], ycoords[argsort], s=20*(diams[argsort]*1000)**2,
                       c=xcoords[argsort], edgecolors='none', cmap='gray')
        ax_all.plot(zmid[0], ymid[0], marker='$%i$'%i, markersize=20, label='%i: %s' %(i, neur))
        #legends.append('%i: %s' %(i, neur))
        ax.axis(ext_sim_dict['plot_range'])
        ax.axis('equal')
        ax.axis(ext_sim_dict['plot_range'])
        ax.set_xlabel('z [mm]')
        ax.set_ylabel('y [mm]')
        fig.savefig(os.path.join(neural_sim_dict['output_folder'],\
                  'neuron_figs', '%s.png' % neur))
    ax_all.axis('equal')
    ax.axis(ext_sim_dict['plot_range'])
    ax_all.set_xlabel('z [mm]')
    ax_all.set_ylabel('y [mm]')
    ax_all.legend()
    fig_all.savefig(os.path.join(neural_sim_dict['output_folder'], 'fig.png'))
Example #7
0
    def getCloneReplicates(self, clone, source, condition, applyFilter=False):
        '''Retrieve all growth curves for a clone+source+condition'''
        # Check if any other replicates should be returned
        # retArray is a 2xN multidimensional numpy array
        retArray = py.array([])
        first = True
        for i in xrange(1, self.numReplicates[clone] + 1):
            # Get replicate
            filterMe = self.dataHash[clone][i][source][condition]['filter']
            currCurve = self.dataHash[clone][i][source][condition]['od']

            # Check if filter is enabled and curve should be filtered
            if applyFilter and filterMe:
                continue

            # Create multidimensional array if first
            elif first:
                retArray = py.array([currCurve])
                first = False

            # Append to multidimensional array if not first
            else:
                retArray = py.concatenate((retArray,
                                           py.array([currCurve])))

        return retArray
def sub_mov_ave(im,L):
    # function takes trailed spectra and subtracts moving average of length L
    def movingaverage(x,L):
        ma = pl.zeros(len(x),dtype='Float64')
        # must take the lead-up zone into account (prob slow)
        for i in range(0,L):
            ma[i] = pl.average(x[0:i+1])

        for i in range(L,len(x)):
            #print i
            ma[i] = ma[i-1] + 1.0/L*(x[i]-x[i-L])

        return ma

    def medfilt(x,L):
        ma = sig.medfilt(x,L)
        return ma

    im_new = pl.array(im).copy()
    im_new[:] = 0.0
    im=pl.array(im)
    s = im.shape
    print s

    for i in range(s[1]):
        im_new[:,i] = movingaverage(im[:,i],L)

    return im-im_new
Example #9
0
def fitData3(fileName):

    # xVals is type 'numpy.ndarray'
    # xVals[0] will return the 1st item in array
    xVals, yVals = getData(fileName)
    xVals = pylab.array(xVals[:-6])
    yVals = pylab.array(yVals[:-6])
    xVals = xVals*9.81  # convert mass to force (F = mg)

    observed_data_variance = calcVariance(xVals)

    # need to grab the Y values from somewhere ??? maybe estYVals
    # to compare with observed data Y values
    # to calculate the variance of errors
    # errors_variance = calcVariance(xVals)

    coefficient_determination = 0


    pylab.plot(xVals, yVals, 'bo', label = 'Measured points')
    pylab.title('Measured Displacement of Spring')
    pylab.xlabel('Force (Newtons)')
    pylab.ylabel('Distance (meters)')
    a,b = pylab.polyfit(xVals, yVals, 1)  # fix y = ax + b

    # use line equation to graph predicted values
    estYVals = a*xVals + b
    k = 1/a
    pylab.plot(xVals, estYVals, label = 'Linear fit, k = '
               + str(round(k, 5)))
    pylab.legend(loc = 'best')
Example #10
0
def ratio_change(mag, t_inlet_close=None):
    eq_m = -0.0001
    rise_rate = 0.0125
    ts = linspace(0, 400, 500)
    f = F(ts, mag=mag)
#     g = eq_m * f
    g = G(ts, f, rate=eq_m)
    b = rise_rate * ts

    fg = f + g + b
    if t_inlet_close == None:
        rf, t_inlet_close, vi_F = calc_optimal_eqtime(ts, fg)

    bs = []
    ts = []
    for ri in range(-5, 5):
        ti = t_inlet_close + ri
        fi = F(ti, mag=mag)
        gi = G(ti, fi, rate=eq_m)
        vi = fi + gi + rise_rate * ti
        b = vi - 2 * eq_m * ti
        bs.append(b)
        ts.append(ti)

    ts, bs = array(ts), array(bs)
    return t_inlet_close, ts, bs
def main():
    mu = pl.array([[0], [12], [24], [36]])
    Sigma = pl.array([[3.01602775,  1.02746769, -3.60224613, -2.08792829],
                      [1.02746769,  5.65146472, -3.98616664,  0.48723704],
                      [-3.60224613, -3.98616664, 13.04508284, -1.59255406],
                      [-2.08792829,  0.48723704, -1.59255406,  8.28742469]])

    # The data matrix is created for above mu and Sigma.
    d, U = pl.eig(Sigma)
    L = pl.diagflat(d)
    A = pl.dot(U, pl.sqrt(L))
    X = pl.randn(4, 1000)

    # Y is the data matrix of random samples.
    Y = pl.dot(A, X) + pl.tile(mu, 1000)

    pl.figure(1)
    pl.clf()
    pl.plot(X[0], Y[1], '+', color='#0000FF', label='i=0,j=1')
    pl.plot(X[0], Y[2], '+', color='#FF0000', label='i=0,j=2')
    pl.plot(X[0], Y[3], '+', color='#00FF00', label='i=0,j=3')
    pl.plot(X[1], Y[0], 'x', color='#FFFF00', label='i=1,j=0')
    pl.plot(X[1], Y[2], 'x', color='#00FFFF', label='i=1,j=2')
    pl.plot(X[1], Y[3], 'x', color='#444444', label='i=1,j=3')
    pl.plot(X[2], Y[0], '.', color='#774411', label='i=2,j=0')
    pl.plot(X[2], Y[1], '.', color='#222222', label='i=2,j=1')
    pl.plot(X[2], Y[3], '.', color='#AAAAAA', label='i=2,j=3')
    pl.plot(X[3], Y[0], '+', color='#FFAA22', label='i=3,j=0')
    pl.plot(X[3], Y[1], '+', color='#22AAFF', label='i=3,j=1')
    pl.plot(X[3], Y[2], '+', color='#FFDD00', label='i=3,j=2')
    pl.legend()
    pl.savefig('fig21.png')
Example #12
0
File: misc.py Project: MMaus/mutils
def getApices(y):
    """ 
    returns the time (in frames) and position of initial and final apex
    height from a given trajectory y, which are obtained by fitting a cubic
    spline 
    
    ==========
    parameter:
    ==========
    y : *array* (1D)
        the trajectory. Should ideally start ~1 frame before an apex and end ~1
        frame behind an apex

    ========
    returns:
    ========

    [x0, xF], [y0, yF] : location (in frames) and value of the first and final
    apices

    """
    # the math behind here is: fitting a 2nd order polynomial and finding 
    # the root of its derivative. Here, only the results are applied, that's
    # why it appears like "magic" numbers
    c = dot(array([[.5, -1, .5], [-1.5, 2., -.5], [1., 0., 0.]]), y[:3])
    x0 = -1. * c[1] / (2. * c[0])
    y0 = polyval(c, x0)

    c = dot(array([[.5, -1, .5], [-1.5, 2., -.5], [1., 0., 0.]]), y[-3:])
    xF = -1. * c[1] / (2. * c[0])
    yF = polyval(c, xF)
    xF += len(y) - 3

    return [x0, xF], [y0, yF]
Example #13
0
 def read_doscar(self, fname="DOSCAR"):
     """Read a VASP DOSCAR file."""
     f = open(fname)
     natoms = int(f.readline().split()[0])
     [f.readline() for n in range(4)]  # Skip next 4 lines.
     dos = []
     for na in xrange(natoms + 1):
         try:
             line = f.readline()
             if line == "":
                 raise Exception
         except Exception, e:
             errstr = (
                 "Failed reading "
                 + str(na)
                 + ":th DOS block, probably "
                 + "this DOSCAR is from some old version of VASP that "
                 + "doesn't "
                 + "first produce a block with integrated DOS. Inserting "
                 + "empty 0:th block."
             )
             sys.stderr.write(errstr)
             dos.insert(0, pl.zeros((ndos, dos[1].shape[1])))
             continue
         try:
             ndos = int(line.split()[2])
         except:
             print "Error, line is: " + line + "ENDLINE"
         line = f.readline().split()
         cdos = pl.zeros((ndos, len(line)))
         cdos[0] = pl.array(line)
         for nd in xrange(1, ndos):
             line = f.readline().split()
             cdos[nd] = pl.array(line)
         dos.append(cdos)
Example #14
0
        def residuals(params,x,y,z):
            xo = params[0]
            xs = params[1]
            yo = params[2]
            ys = params[3]
            zo = params[4]
            zs = params[5]

            xc = empty(shape(x))
            for i in range(len(x)):
                xc[i] = (x[i] - xo) * xs

            yc = empty(shape(y))
            for i in range(len(y)):
                yc[i] = (y[i] - yo) * ys

            zc = empty(shape(z))
            for i in range(len(z)):
                zc[i] = (z[i] - zo) * zs

            res = []
            for i in range(len(xc)):
                norm = l2norm(array([xc[i],yc[i],zc[i]])) - 1.0
                res.append(norm)

            return array(res)
Example #15
0
def compare_models(db, stoch="itn coverage", stat_func=None, plot_type="", **kwargs):
    if stat_func == None:
        stat_func = lambda x: x

    X = {}
    for k in sorted(db.keys()):
        c = k.split("_")[2]
        X[c] = []

    for k in sorted(db.keys()):
        c = k.split("_")[2]
        X[c].append([stat_func(x_ki) for x_ki in db[k].__getattribute__(stoch).gettrace()])

    x = pl.array([pl.mean(xc[0]) for xc in X.values()])
    xerr = pl.array([pl.std(xc[0]) for xc in X.values()])
    y = pl.array([pl.mean(xc[1]) for xc in X.values()])
    yerr = pl.array([pl.std(xc[1]) for xc in X.values()])

    if plot_type == "scatter":
        default_args = {"fmt": "o", "ms": 10}
        default_args.update(kwargs)
        for c in X.keys():
            pl.text(pl.mean(X[c][0]), pl.mean(X[c][1]), " %s" % c, fontsize=8, alpha=0.4, zorder=-1)
        pl.errorbar(x, y, xerr=xerr, yerr=yerr, **default_args)
        pl.xlabel("First Model")
        pl.ylabel("Second Model")
        pl.plot([0, 1], [0, 1], alpha=0.5, linestyle="--", color="k", linewidth=2)

    elif plot_type == "rel_diff":
        d1 = sorted(100 * (x - y) / x)
        d2 = sorted(100 * (xerr - yerr) / xerr)
        pl.subplot(2, 1, 1)
        pl.title("Percent Model 2 deviates from Model 1")

        pl.plot(d1, "o")
        pl.xlabel("Countries sorted by deviation in mean")
        pl.ylabel("deviation in mean (%)")

        pl.subplot(2, 1, 2)
        pl.plot(d2, "o")
        pl.xlabel("Countries sorted by deviation in std err")
        pl.ylabel("deviation in std err (%)")
    elif plot_type == "abs_diff":
        d1 = sorted(x - y)
        d2 = sorted(xerr - yerr)
        pl.subplot(2, 1, 1)
        pl.title("Percent Model 2 deviates from Model 1")

        pl.plot(d1, "o")
        pl.xlabel("Countries sorted by deviation in mean")
        pl.ylabel("deviation in mean")

        pl.subplot(2, 1, 2)
        pl.plot(d2, "o")
        pl.xlabel("Countries sorted by deviation in std err")
        pl.ylabel("deviation in std err")
    else:
        assert 0, "plot_type must be abs_diff, rel_diff, or scatter"

    return pl.array([x, y, xerr, yerr])
Example #16
0
def calc(filename):
	p0[0] = 0.0
	p0[1] = 0.0
	x = []
	ymeas = []
	yerr = []
	fin = open(filename,'r')
	for line in fin:
		linedata = line.split()
		x.append(float(linedata[0]))
		ymeas.append(float(linedata[1]))
		yerr.append(float(linedata[1]))
	xarray = pylab.array(x)
	yarray = pylab.array(ymeas)
	yerarray = pylab.array(yerr)
	param_output = scipy.optimize.leastsq(residue, p0, args=(yarray,xarray,yerarray),full_output=True)
	a = param_output[0][0]
	b = param_output[0][1]
	
	H = np.linspace(0,150000,1000)
	ans = b*H**1+a*H**0
	
	print filename
	print a,b
	plt.plot(H,ans)
	plt.scatter(xarray,yarray)
Example #17
0
def normal(name, pi, sigma, p, s):
    """ Generate PyMC objects for a normal model

    :Parameters:
      - `name` : str
      - `pi` : pymc.Node, expected values of rates
      - `sigma` : pymc.Node, dispersion parameters of rates
      - `p` : array, observed values of rates
      - `s` : array, standard error of rates

    :Results:
      - Returns dict of PyMC objects, including 'p_obs' and 'p_pred' the observed stochastic likelihood and data predicted stochastic

    """
    p = pl.array(p)
    s = pl.array(s)

    assert pl.all(s >= 0), "standard error must be non-negative"

    i_inf = pl.isinf(s)

    @mc.observed(name="p_obs_%s" % name)
    def p_obs(value=p, pi=pi, sigma=sigma, s=s):
        return mc.normal_like(value, pi, 1.0 / (sigma ** 2.0 + s ** 2.0))

    s_noninf = s.copy()
    s_noninf[i_inf] = 0.0

    @mc.deterministic(name="p_pred_%s" % name)
    def p_pred(pi=pi, sigma=sigma, s=s_noninf):
        return mc.rnormal(pi, 1.0 / (sigma ** 2.0 + s ** 2.0))

    return dict(p_obs=p_obs, p_pred=p_pred)
Example #18
0
    def _make_log_freq_map(self):
        """
        ::

            For the given ncoef (bands-per-octave) and nfft, calculate the center frequencies
            and bandwidths of linear and log-scaled frequency axes for a constant-Q transform.
        """
        fp = self.feature_params
        bpo = float(self.nbpo) # Bands per octave
        self._fftN = float(self.nfft)
        hi_edge = float( self.hi )
        lo_edge = float( self.lo )
        f_ratio = 2.0**( 1.0 / bpo ) # Constant-Q bandwidth
        self._cqtN = float( P.floor(P.log(hi_edge/lo_edge)/P.log(f_ratio)) )
        self._dctN = self._cqtN
        self._outN = float(self.nfft/2+1)
        if self._cqtN<1: print "warning: cqtN not positive definite"
        mxnorm = P.empty(self._cqtN) # Normalization coefficients        
        fftfrqs = self._fftfrqs #P.array([i * self.sample_rate / float(self._fftN) for i in P.arange(self._outN)])
        logfrqs=P.array([lo_edge * P.exp(P.log(2.0)*i/bpo) for i in P.arange(self._cqtN)])
        logfbws=P.array([max(logfrqs[i] * (f_ratio - 1.0), self.sample_rate / float(self._fftN)) 
                         for i in P.arange(self._cqtN)])
        #self._fftfrqs = fftfrqs
        self._logfrqs = logfrqs
        self._logfbws = logfbws
        self._make_cqt()
def plot_convergence_metrics(convergence_metrics, title_append='',
        x_is_iters=False, save_filename=None):
    x_variable = None
    x_label = None
    if x_is_iters:
        x_variable = pylab.array(convergence_metrics['iter_idx_list']).T
        x_label = 'iter idx'
    else:
        x_variable = pylab.array(convergence_metrics['elapsed_seconds_list']).T
        x_variable = x_variable.cumsum(axis=0)
        x_label = 'cumulative time (seconds)'
    ari_arr = pylab.array(convergence_metrics['column_ari_list']).T
    mean_test_ll_arr = pylab.array(convergence_metrics['mean_test_ll_list']).T
    #
    fh = pylab.figure()
    pylab.subplot(211)
    pylab.title('convergence diagnostics: %s' % title_append)
    pylab.plot(x_variable, ari_arr)
    pylab.xlabel(x_label)
    pylab.ylabel('column ARI')
    pylab.subplot(212)
    pylab.plot(x_variable, mean_test_ll_arr)
    pylab.xlabel(x_label)
    pylab.ylabel('mean test log likelihood')
    #
    if save_filename is not None:
      pylab.savefig(save_filename)
    return fh
Example #20
0
def findOrder(xVals, yVals, accuracy = 1.0e-1):
    X = pylab.array(xVals)
    Y = pylab.array(yVals)
    find = True
    n = 0
    while find:
        #n += 1
        Z = pylab.polyfit(xVals, yVals, n, None, True)
        '''
        est = 0.0
        estVal = []
        for x in xVals:
            for i in range(len(Z[0])):
                est += Z[0][i] * (x**(len(Z[0])-i-1))
            estVal.append(est)
        error = 0.0
        for j in range(len(yVals)):
            error += (estVal[j] - yVals[j]) ** 2
        if error <= accuracy:
            find = False
            print 'order ', n, 'is good enough'
        '''               
        if Z[1] <= accuracy:
            find = False
            print 'order ', n, 'is good enough'
        n += 1
    print (Z[0])
    return Z[0]
Example #21
0
def loadData(filename):
    """
    Read the contents of the given file.  Assumes the file contents contain
    data in the form of comma-separated x,y pairs.

    Parameters:
    filename - the name of the data file as a string

    Returns:
    (x, y) - a tuple containing a Pylab array of x values and
             a Pylab array of y values
    """
    # Open the file and set up the result lists
    f = open(filename, 'r')
    x = []
    y = []

    # Iterate through each line of the file, casting both values in the x,y
    # tuples as ints and storing them in their respesctive lists
    for line in f:
        splitLine = line.strip().split(',')
        x.append(int(splitLine[0]))
        y.append(int(splitLine[1]))

    # Close the file and return the tuple of pylab arrays
    f.close()
    return (pylab.array(x), pylab.array(y))
Example #22
0
def meanVar(data,outMean=None):
    """
    each data[i] is a vector, return the average vector and the variance
    vector
 
    each vector is assumed to be the same length of course
 
    if a mean is available from outside, set outMean which is a vector
    of len(data[i]); then the mean returned is a residual
    """

    mean = []
    var = []
    n = len(data)
    print n,len(data[0])
    try:
        for i in range(n):
            data[i] -= outMean
    except TypeError:
        pass
    for i in range(len(data[0])):
        m = 0.0
        v = 0.0
        for j in range(n):
            m += data[j][i]
            v += data[j][i]*data[j][i]
        if outMean == None:
            v -= m*m/float(n)
        m /= float(n)
        v /= (n-1)
        mean.append(m)
        var.append(v)
    return (M.array(mean),M.array(var))
Example #23
0
def anim_update(i):
    """
    i: frame num
    """

    ## equivalent time = (i/tot)*(runtime/fct)
    t = (float(i)/float(tot_frames))*(anim_run_time/float(fct))
    # lnx, lny = line.get_data()
    # pos_x, pos_y = P.get_position(t)
    # head.set_data([pos_x], [pos_y])
    # lnx = pylab.array([k for k in lnx] + [pos_x])
    # lny = pylab.array([k for k in lny] + [pos_y])
    # line.set_data(lnx, lny)

    for i in range(len(Projs)):
        pos_x, pos_y = Projs[i].get_position(t)
        
        p_heads[i].set_data([pos_x], [pos_y])

        lnx, lny = p_lines[i].get_data()
        lnx = pylab.array([k for k in lnx] + [pos_x])
        lny = pylab.array([k for k in lny] + [pos_y])
        p_lines[i].set_data(lnx, lny)

    return p_lines + p_heads
Example #24
0
def main():
    f = open("final_position.txt","r")
    data = pl.genfromtxt(f,comments = "L")
    
    # need to get every other
    x = pl.array([])
    y = pl.array([])
    for i,j in enumerate(data[:-7,2]):
        if i%4 == 0:
            x = pl.append(x,data[i,4])
            y = pl.append(y,j)
    
    print(x)
    print(y)
    fit = np.polyfit(x,y,2)

    print(fit)
    
    #fited = fit[0]+fit[1]*x + fit[2]*x**2
    fited = np.poly1d(fit)
    print(fited)

    #pl.plot(pl.append(x,[.262,.264,.266]),fited(pl.append(x,[.262,.264,.266])),color="black")
    pl.scatter(x,y,color = "black")
    pl.xlabel("$A$",fontsize="30")
    pl.ylabel("$x$",fontsize="30")
    pl.savefig("fin_pts.png",transparent=True,dpi=300)
    
    os.system("open fin_pts.png")
Example #25
0
def rotate_molecule(coords, rotp = m.array((0.,0.,0.)), phi = 0., \
        theta = 0., psi = 0.):
    """Rotate a molecule via Euler angles.

    See http://mathworld.wolfram.com/EulerAngles.html for definition.
    Input arguments:
    coords: Atom coordinates, as Nx3 2d pylab array.
    rotp: The point to rotate about, as a 1d 3-element pylab array
    phi: The 1st rotation angle around z axis.
    theta: Rotation around x axis.
    psi: 2nd rotation around z axis.

    """
# First move the molecule to the origin
# In contrast to MATLAB, numpy broadcasts the smaller array to the larger
# row-wise, so there is no need to play with the Kronecker product.
    rcoords = coords - rotp
# First Euler rotation about z in matrix form
    D = m.array(((m.cos(phi), m.sin(phi), 0.), (-m.sin(phi), m.cos(phi), 0.), \
            (0., 0., 1.)))
# Second Euler rotation about x:
    C = m.array(((1., 0., 0.), (0., m.cos(theta), m.sin(theta)), \
            (0., -m.sin(theta), m.cos(theta))))
# Third Euler rotation, 2nd rotation about z:
    B = m.array(((m.cos(psi), m.sin(psi), 0.), (-m.sin(psi), m.cos(psi), 0.), \
            (0., 0., 1.)))
# Total Euler rotation
    A = m.dot(B, m.dot(C, D))
# Do the rotation
    rcoords = m.dot(A, m.transpose(rcoords))
# Move back to the rotation point
    return m.transpose(rcoords) + rotp
def tryFits1(fName):
    '''
    Calculate the coefficient of determination (R**2) to determine how
    well the model fits the data and ergo could make predictions.
    '''
    distances, heights = getTrajectoryData(fName)
    distances = pylab.array(distances)*36
    totHeights = pylab.array([0]*len(distances))
    for h in heights:
        totHeights = totHeights + pylab.array(h)
    pylab.title('Trajectory of Projectile (Mean of 4 Trials)')
    pylab.xlabel('Inches from Launch Point')
    pylab.ylabel('Inches Above Launch Point')
    meanHeights = totHeights/float(len(heights))
    pylab.plot(distances, meanHeights, 'bo')
    a,b = pylab.polyfit(distances, meanHeights, 1)
    altitudes = a*distances + b
    pylab.plot(distances, altitudes, 'r',
               label = 'Linear Fit' + ', R2 = '
               + str(round(rSquare(meanHeights, altitudes), 4)))
    a,b,c = pylab.polyfit(distances, meanHeights, 2)
    altitudes = a*(distances**2) + b*distances + c
    pylab.plot(distances, altitudes, 'g',
               label = 'Quadratic Fit' + ', R2 = '
               + str(round(rSquare(meanHeights, altitudes), 4)))
    pylab.legend()
def fitData(fileName):
    '''
    Using Pylab's polyfit to find equations of the line to best fit the data from Hooks Law
    experiment.

    Hooks Law represented with equation - y = ax + b 

    y - Measured distance
    x - Force 
    '''
    xVals, yVals = getData(fileName)
    xVals = pylab.array(xVals)
    yVals = pylab.array(yVals)
    xVals = xVals*9.81  # convert mass to force (F = mg)
    pylab.plot(xVals, yVals, 'bo', label = 'Measured points')
    pylab.title('Measured Displacement of Spring')
    pylab.xlabel('Force (Newtons)')
    pylab.ylabel('Distance (meters)')
    a,b = pylab.polyfit(xVals, yVals, 1)  # fit y = ax + b
    # use line equation to graph predicted values
    estYVals = a*xVals + b
    k = 1/a
    pylab.plot(xVals, estYVals, label = 'Linear fit, k = '
               + str(round(k, 5)))
    pylab.legend(loc = 'best')
Example #28
0
def rollingMeanScale(series, period, plotAxis=False):
    svr_rbf = SVR(kernel='rbf', C=1e4, gamma=.01, epsilon=.01)
    '''Fit Model to Data Series'''
    tS= numpy.array([series.index]).T
    y_rbf = svr_rbf.fit(tS, list(series))
    '''Up-sample to get rid of bias'''
    fFit = arange(series.index[0],series.index[-1]+.1,.25)
    trend = y_rbf.predict(numpy.array([fFit]).T)
    
    '''Take rolling mean over 1-day window'''
    shift = int(round(period/.5))
    rMean = pandas.rolling_mean(trend, shift*2)
    rMean = numpy.roll(rMean, -shift)
    rMean[:shift]=rMean[shift]
    rMean[-(shift+1):]=rMean[-(shift+1)]
    rMean = pandas.Series(rMean, index=fFit)
    
    '''Adjust Data Series by subtracting out trend'''
    series = series - array(rMean[array(series.index, dtype=float)])
    series = scaleMe(series)-.5
    
    if plotAxis:
        plotAxis.plot(fFit, trend, label='Series Trend')
        plotAxis.plot(fFit, rMean, label='Rolling Mean')
        plotAxis.set_title('Detrend the Data')
        plotAxis.legend(loc='lower left')

    return series
def tryFits(fName):
    '''
    Linear fit does not fit the data. Not a logical assumption that the arrow
    flies in a straight line to the target.

    Quadratic fit mirrors a parabolic pathway.
    '''
    distances, heights = getTrajectoryData(fName)
    distances = pylab.array(distances)*36 # Convert yards to feet
    totHeights = pylab.array([0]*len(distances))
    for h in heights:
        totHeights = totHeights + pylab.array(h) # Get one avg measurement of height
    pylab.title('Trajectory of Projectile (Mean of 4 Trials)')
    pylab.xlabel('Inches from Launch Point')
    pylab.ylabel('Inches Above Launch Point')
    meanHeights = totHeights/float(len(heights))
    pylab.plot(distances, meanHeights, 'bo')
    a,b = pylab.polyfit(distances, meanHeights, 1)
    altitudes = a*distances + b
    pylab.plot(distances, altitudes, 'r',
               label = 'Linear Fit')
    a,b,c = pylab.polyfit(distances, meanHeights, 2)
    altitudes = a*(distances**2) + b*distances + c 
    pylab.plot(distances, altitudes, 'g',
               label = 'Quadratic Fit')
    pylab.legend()
def createExpData(f,xVals):
    '''假设f是只有一个参数的指数函数, xVals是一个数组,其中的值可以作为参数传入f
       返回一个数组,包含xVals中的元素传入f生成的结果'''
    yVals=[]
    for i in range(len(xVals)):
        yVals.append(f(xVals[i]))
    return pylab.array(xVals),pylab.array(yVals)
         str(run_key), 'timeseries.png'
     ]
     timeseries_save_filename = filter_join(filename_parts, '_')
     filename_parts = [
         save_filename_prefix,
         str(run_key), 'test_ll_hist.png'
     ]
     test_ll_hist_save_filename = filter_join(filename_parts, '_')
     filename_parts = [
         save_filename_prefix,
         str(run_key), 'runtime_hist.png'
     ]
     runtime_hist_save_filename = filter_join(filename_parts, '_')
     #
     pylab.figure()
     test_lls = pylab.array(convergence_metrics['mean_test_ll_list'])
     final_test_lls = test_lls[:, -1]
     pylab.hist(final_test_lls, n_bins, cumulative=cumulative)
     pylab.savefig(test_ll_hist_save_filename)
     #
     pylab.figure()
     final_times = pylab.array(
         convergence_metrics['elapsed_seconds_list']).T
     final_times = final_times.cumsum(axis=0)
     final_times = final_times[-1, :]
     pylab.hist(final_times, n_bins, cumulative=cumulative)
     pylab.savefig(runtime_hist_save_filename)
 fh = plot_convergence_metrics(convergence_metrics,
                               title_append=str(run_key),
                               x_is_iters=x_is_iters,
                               save_filename=timeseries_save_filename)
def validate_ai_re(N=500,
                   delta_true=.15,
                   sigma_true=[.1, .1, .1, .1, .1],
                   pi_true=quadratic,
                   smoothness='Moderately',
                   heterogeneity='Slightly'):
    ## generate simulated data
    a = pl.arange(0, 101, 1)
    pi_age_true = pi_true(a)

    import dismod3
    import simplejson as json
    model = data.ModelData.from_gbd_jsons(
        json.loads(dismod3.disease_json.DiseaseJson().to_json()))
    gbd_hierarchy = model.hierarchy

    model = data_simulation.simple_model(N)
    model.hierarchy = gbd_hierarchy

    model.parameters['p']['parameter_age_mesh'] = range(0, 101, 10)
    model.parameters['p']['smoothness'] = dict(amount=smoothness)
    model.parameters['p']['heterogeneity'] = heterogeneity

    age_start = pl.array(mc.runiform(0, 100, size=N), dtype=int)
    age_end = pl.array(mc.runiform(age_start, 100, size=N), dtype=int)

    age_weights = pl.ones_like(a)
    sum_pi_wt = pl.cumsum(pi_age_true * age_weights)
    sum_wt = pl.cumsum(age_weights * 1.)
    p = (sum_pi_wt[age_end] - sum_pi_wt[age_start]) / (sum_wt[age_end] -
                                                       sum_wt[age_start])

    # correct cases where age_start == age_end
    i = age_start == age_end
    if pl.any(i):
        p[i] = pi_age_true[age_start[i]]

    model.input_data['age_start'] = age_start
    model.input_data['age_end'] = age_end
    model.input_data['effective_sample_size'] = mc.runiform(100, 10000, size=N)

    from validate_covariates import alpha_true_sim
    area_list = pl.array([
        'all', 'super-region_3', 'north_africa_middle_east', 'EGY', 'KWT',
        'IRN', 'IRQ', 'JOR', 'SYR'
    ])
    alpha = alpha_true_sim(model, area_list, sigma_true)
    print alpha

    model.input_data['true'] = pl.nan

    model.input_data['area'] = area_list[mc.rcategorical(
        pl.ones(len(area_list)) / float(len(area_list)), N)]

    for i, a in model.input_data['area'].iteritems():
        model.input_data['true'][i] = p[i] * pl.exp(
            pl.sum([
                alpha[n] for n in nx.shortest_path(model.hierarchy, 'all', a)
                if n in alpha
            ]))
    p = model.input_data['true']

    n = model.input_data['effective_sample_size']
    model.input_data['value'] = mc.rnegative_binomial(n * p,
                                                      delta_true * n * p) / n

    ## Then fit the model and compare the estimates to the truth
    model.vars = {}
    model.vars['p'] = data_model.data_model('p', model, 'p',
                                            'north_africa_middle_east',
                                            'total', 'all', None, None, None)
    #model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'], iter=1005, burn=500, thin=5, tune_interval=100)
    model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'],
                                                     iter=10000,
                                                     burn=5000,
                                                     thin=25,
                                                     tune_interval=100)

    graphics.plot_one_ppc(model.vars['p'], 'p')
    graphics.plot_convergence_diag(model.vars)
    graphics.plot_one_type(model, model.vars['p'], {}, 'p')
    pl.plot(range(101), pi_age_true, 'r:', label='Truth')
    pl.legend(fancybox=True, shadow=True, loc='upper left')

    pl.show()

    model.input_data['mu_pred'] = model.vars['p']['p_pred'].stats()['mean']
    model.input_data['sigma_pred'] = model.vars['p']['p_pred'].stats(
    )['standard deviation']
    data_simulation.add_quality_metrics(model.input_data)

    model.delta = pandas.DataFrame(dict(true=[delta_true]))
    model.delta['mu_pred'] = pl.exp(model.vars['p']['eta'].trace()).mean()
    model.delta['sigma_pred'] = pl.exp(model.vars['p']['eta'].trace()).std()
    data_simulation.add_quality_metrics(model.delta)

    model.alpha = pandas.DataFrame(
        index=[n for n in nx.traversal.dfs_preorder_nodes(model.hierarchy)])
    model.alpha['true'] = pandas.Series(dict(alpha))
    model.alpha['mu_pred'] = pandas.Series(
        [n.stats()['mean'] for n in model.vars['p']['alpha']],
        index=model.vars['p']['U'].columns)
    model.alpha['sigma_pred'] = pandas.Series(
        [n.stats()['standard deviation'] for n in model.vars['p']['alpha']],
        index=model.vars['p']['U'].columns)
    model.alpha = model.alpha.dropna()
    data_simulation.add_quality_metrics(model.alpha)

    model.sigma = pandas.DataFrame(dict(true=sigma_true))
    model.sigma['mu_pred'] = [
        n.stats()['mean'] for n in model.vars['p']['sigma_alpha']
    ]
    model.sigma['sigma_pred'] = [
        n.stats()['standard deviation'] for n in model.vars['p']['sigma_alpha']
    ]
    data_simulation.add_quality_metrics(model.sigma)

    print 'delta'
    print model.delta

    print '\ndata prediction bias: %.5f, MARE: %.3f, coverage: %.2f' % (
        model.input_data['abs_err'].mean(),
        pl.median(pl.absolute(model.input_data['rel_err'].dropna())),
        model.input_data['covered?'].mean())

    model.mu = pandas.DataFrame(
        dict(true=pi_age_true,
             mu_pred=model.vars['p']['mu_age'].stats()['mean'],
             sigma_pred=model.vars['p']['mu_age'].stats()
             ['standard deviation']))
    data_simulation.add_quality_metrics(model.mu)

    data_simulation.initialize_results(model)
    data_simulation.add_to_results(model, 'delta')
    data_simulation.add_to_results(model, 'mu')
    data_simulation.add_to_results(model, 'input_data')
    data_simulation.add_to_results(model, 'alpha')
    data_simulation.add_to_results(model, 'sigma')
    data_simulation.finalize_results(model)

    print model.results

    return model
Example #33
0
time_span_ours = []

filename = sys.argv[1]  #multiple files useful, we ask for user-input

with open(filename) as infile:
    infile.readline()  #discard 1st line
    lines = infile.readlines()  #read whole file
    for line in lines:
        words = line.split(", ")  #split line per each ", "
        n.append(float(words[0]))  #convert and append 1st element
        iterations.append(float(words[1]))  #convert and append 2nd element
        time_span_eig_sym.append(float(words[2]))
        time_span_ours.append(float(words[3]))

#convert lists to arrays for ease-of-use
n = plt.array(n)
iterations = plt.array(iterations)
time_span_eig_sym = plt.array(time_span_eig_sym)
time_span_ours = plt.array(time_span_ours)

plt.figure(figsize=(12, 9))  #change figuresize for better quality image
plt.plot(n, iterations)  #plot of iterations as function of dimension
plt.grid()  #add grid to plot
plt.title("Plot of iterations from " + filename)  #add title
plt.xlabel("n")  #add x-label
plt.ylabel("Iterations")  #add y-label
plt.legend()  #enable labels on plot
plt.savefig("../iterations-" + filename[:-4] + ".png")  #saves plot as image
plt.show()  #show plot

plt.figure(figsize=(12, 9))  #change figuresize for better quality image
Example #34
0
#udata = ca.DM([-0.00750703, 0.00913669, 0.0220866, 0.0296195, 0.0460261, 0.0304416, 0.0499982, 0.033759, 0.0211894, 0.0128117, -0.00203445, -0.0169411, -0.00241727, 0.00882324, 0.0138102, 0.0258826, 0.0395631, 0.0247059, 0.0131711, 0.00709643, -0.00437765, -0.0186208, -0.00372299, 0.00777423, 0.0138853, 0.025267, 0.0395367, 0.0246305, 0.0131314, 0.00700164, -0.00436575, -0.0186428, -0.035017, -0.0205003, -0.00941461, -0.00464579, 0.00740074, 0.0209836, 0.00612024, -0.00542827, -0.00921961, -0.0237426, -0.00779888, -0.0276405, -0.0112354, 0.00134591, 0.0102192, -0.00547353, 0.0140357, -0.00255183, -0.0152869, -0.024784, -0.0386468, -0.02351, -0.0118429, -0.00501222, 0.00593656, 0.0205816, 0.00564462, -0.00583115, -0.00961946, -0.0240569, -0.00810298, 0.00394063, 0.0130636, 0.0223687, 0.0382465, 0.0231859, 0.0117751, 0.0054356, -0.00559484, -0.0200046, -0.0362104, -0.0217006, -0.0106011, -0.00582847, 0.00623502, 0.0198186, 0.00495697, -0.0065901, -0.0126904, -0.0241578, -0.0384144, -0.0235163, -0.0120208, -0.00590995, 0.00337668, 0.0204126, 0.00518239, -0.00620927, -0.0129587, -0.0217397, -0.0387916, -0.0235371, -0.0438555, -0.0276591, -0.0467201, -0.029174, -0.0472253, -0.0290896])

# udata weighted, tf = 5.0, sigma_u = 0.005, p_L = [4.14054, 3.96515, 1.64997] as initial params
#udata = ca.DM([-0.00478741, 0.0113049, 0.0239446, 0.0309362, 0.0475909, 0.0322277, 0.0499981, 0.033987, 0.0212807, 0.0127835, -0.00260174, -0.0174883, -0.00320861, 0.00796152, 0.0132514, 0.025497, 0.0392554, 0.0246066, 0.0131363, 0.00668194, -0.00492859, -0.0192749, -0.00458539, 0.00684527, 0.0133319, 0.0248526, 0.0392217, 0.024524, 0.0130912, 0.00658653, -0.00492167, -0.0192967, -0.0356644, -0.0213522, -0.0103167, -0.00513811, 0.00700795, 0.0207247, 0.00606411, -0.00541673, -0.00946968, -0.0242207, -0.00850737, -0.0281794, -0.0118957, 0.000675527, 0.00985312, -0.0056186, 0.0137282, -0.00273397, -0.0154534, -0.0228741, -0.0398174, -0.0245545, -0.0130772, -0.00526507, 0.00508129, 0.0201622, 0.00537374, -0.00603188, -0.0101182, -0.0247716, -0.00904385, 0.00290652, 0.012318, 0.0208662, 0.0380663, 0.0230788, 0.0117833, 0.00482458, -0.0060484, -0.0206898, -0.0367453, -0.0224467, -0.0113863, -0.00620238, 0.00597602, 0.019693, 0.0050357, -0.00644302, -0.0129266, -0.0245217, -0.038884, -0.0241934, -0.0127642, -0.0062763, 0.00316272, 0.0202251, 0.00521663, -0.00610962, -0.0131903, -0.022163, -0.0392395, -0.0242091, -0.0143356, -0.00531497, -0.0218453, -0.0030035, -0.0196556, -0.000967417])


simulation_true_parameters = cp.sim.Simulation( \
    system = system, pdata = p_true)

# simulation_true_parameters.run_system_simulation( \
#     x0 = x0, time_points = time_points, udata = udata)

# ydata = simulation_true_parameters.simulation_results.T

sigma_u = 0.005
sigma_y = pl.array([0.01, 0.01, 0.01, 0.01])

repetitions = 100

p_test = []

for k in range(repetitions):

    udata_noise = udata + sigma_u * pl.randn(*udata.shape)

    simulation_true_parameters.run_system_simulation( \
        x0 = x0, time_points = time_points, udata = udata_noise)

    ydata = simulation_true_parameters.simulation_results.T

    ydata_noise = ydata + sigma_y * pl.randn(*ydata.shape)
Example #35
0
import pylab as pl

# Disclosure for the sake of the current git repo: 
# This code is borrowed from a kind-hearted soul on the internet, whose identity I have lost in the annals of time.

# Known values # Taken from URL: (https://nierautomata.wiki.fextralife.com/Shock+Wave)
factor0 = 1.
factor1 = 1.5
factor2 = 2.
factor3 = 2.5
factor4 = 3.75
factor5 = 5.
factor6 = 6.75
factor8 = 12.75
fs_known = pl.array([factor0, factor1, factor2, factor3, factor4, factor5, factor6, factor8])

# We know the +8 chip is also the damage limit, as it is called 'Very powerful shockwave';
# and if you try to stack some of chips on top of each other,
# then you will be notified when you overreach this limit.

# Note, how there are linear relationships between the values for ranks (0,1,2,3)[*], 
# and then a slightly increased linear relationship for ranks (3,4,5)[**],
# And then it is clear that the slope for ranks (5,6) is further increased.

# Hazarding a guess for the values on the missing chip:
# It wouldn't be far-fetched to assume that all three of the ranks (5,6,7)
# could have the same linear relatioship (form a straight line),
factor7lin = factor6 + (factor6 - factor5) #=> 8.5
# which is a simple model to make from a gamedev point of view.

# Then again, to reward players to upgrade to rank 7, they may also have made the upgrade from 6 to 7
Example #36
0
DGEtableI = pd.concat([IndexDGE, DGEtable],axis=1)

procisof=0

for element in hg19tableI.Index:
    procisof+=1
    
    TexonStartsList=""
    TexonEndsList=""
    TexonLengthList=""
    TintronLengthList=""
    readStartList=""
    readEndList=""
    readItemList=""

    strand=array(hg19tableI.strand[hg19tableI.Index==element], dtype=str)[0]
    exonCount=hg19tableI.exonCount[hg19tableI.Index==element]
    symbol=array(hg19tableI.name2[hg19tableI.Index==element], dtype=str)[0]
    
    if strand == "+":
        txStart=int(hg19tableI.txStart[hg19tableI.Index==element])-1
        hg19tableI.cdsStart[hg19tableI.Index==element]=int(hg19tableI.cdsStart[hg19tableI.Index==element])-txStart
        hg19tableI.cdsEnd[hg19tableI.Index==element]=int(hg19tableI.cdsEnd[hg19tableI.Index==element])-txStart
        exonStarts=hg19tableI.exonStarts[hg19tableI.Index==element]
        exonStartsList=exonStarts.str.split(',', expand=True)
        exonEnds=hg19tableI.exonEnds[hg19tableI.Index==element]
        exonEndsList=exonEnds.str.split(',', expand=True)
        hg19tableI.txEnd[hg19tableI.Index==element]=int(hg19tableI.txEnd[hg19tableI.Index==element])-txStart
        
        for i in range(exonCount):
            TexonStart=int(exonStartsList[i])-txStart
Example #37
0
    def swi_graphpng(self,
                     dir,
                     name,
                     bcount=100,
                     conf=0.95,
                     x=None,
                     y=None,
                     m=None,
                     dpi=80,
                     width=8,
                     height=6,
                     compare='',
                     sortby='',
                     xvals='',
                     yvals='',
                     contour_n=20,
                     contour_min='',
                     contour_max='',
                     contour_lines=10,
                     contour_fmt='',
                     xlabel='',
                     ylabel='',
                     xtickrotation='',
                     b_left='',
                     b_bot='',
                     b_top='',
                     b_right=''):
        pylab.figure(figsize=(int(width), int(height)))
        try:
            b_left = float(b_left)
        except:
            b_left = 0.1
        try:
            b_right = float(b_right)
        except:
            b_right = 0.1
        try:
            b_top = float(b_top)
        except:
            b_top = 0.1
        try:
            b_bot = float(b_bot)
        except:
            b_bot = 0.1
        pylab.axes(
            (b_left, b_bot, 1.0 - b_left - b_right, 1.0 - b_top - b_bot))
        try:
            xtickrotation = float(xtickrotation)
        except:
            xtickrotation = 0

        if len(compare) > 0:
            if '/' not in compare:
                c_dir, c_name = compare, 'default'
            else:
                c_dir, c_name = compare.split('/', 1)
            c_names, c_sample, c_ci = extract_individual_data(
                c_dir, c_name, int(bcount), float(conf), m)
        else:
            c_names = []
        xvals = xvals.split(';')
        yvals = yvals.split(';')
        if type(m) is str: m = [m]

        if x is None and y is None:
            names, sample, ci = extract_individual_data(
                dir, name, int(bcount), float(conf), m)

            if len(c_names) > 0 and sortby != '':
                reversed = False
                if sortby.endswith('_r'):
                    reversed = True
                    sortby = sortby[:-2]
                data = compare_stats(sortby, names, sample, ci, c_names,
                                     c_sample, c_ci)
                data.sort()
                if reversed: data.reverse()
                index = [names.index(x[1]) for x in data]
                names = [names[i] for i in index]

                sample = pylab.array([sample[i] for i in index])
                ci = pylab.array([ci[i] for i in index])
                c_sample = pylab.array([c_sample[i] for i in index])
                c_ci = pylab.array([c_ci[i] for i in index])

            xvalpts = pylab.array(range(len(names)))

            if len(c_names) > 0:
                capsize = 4
                c_yerr = pylab.array(
                    [c_sample - c_ci[:, 1], c_ci[:, 0] - c_sample])
                compare = True
                barwidth = 0.4
                pylab.bar(xvalpts + 0.2,
                          c_sample,
                          align='center',
                          color='#CCCCCC',
                          width=barwidth)
                pylab.errorbar(xvalpts + 0.2,
                               c_sample,
                               yerr=c_yerr,
                               ecolor='k',
                               capsize=capsize,
                               linewidth=0,
                               elinewidth=1)
                xvalpts = xvalpts - 0.2
            else:
                capsize = 5
                compare = False
                barwidth = 0.8

            pylab.bar(xvalpts,
                      sample,
                      align='center',
                      color='#EEEEEE',
                      width=barwidth)
            yerr = pylab.array([sample - ci[:, 1], ci[:, 0] - sample])
            pylab.errorbar(xvalpts,
                           sample,
                           yerr=yerr,
                           ecolor='k',
                           capsize=capsize,
                           linewidth=0,
                           elinewidth=1)

            pylab.xticks(range(len(names)), names, rotation=xtickrotation)
            pylab.xlim(-1, len(names))
        elif x is not None and y is None:
            setting = parse_setting_name(name)
            xvalpts = pylab.array(range(len(xvals)))

            if False and len(compare) > 0:
                pass
            else:
                for measure in m:
                    v = []
                    vlow = []
                    vhigh = []
                    for xval in xvals:
                        setting2 = dict(setting)
                        setting2[x] = xval
                        name2 = make_setting_name(dir, setting2)
                        names, sample, ci = extract_individual_data(
                            dir, name2, int(bcount), float(conf), measure)
                        v.append(sample[0])
                        vlow.append(sample[0] - ci[0][1])
                        vhigh.append(ci[0][0] - sample[0])
                    pylab.plot(xvalpts, v, label=measure)
                    pylab.errorbar(xvalpts,
                                   v,
                                   yerr=[vlow, vhigh],
                                   ecolor='k',
                                   capsize=3,
                                   linewidth=0,
                                   elinewidth=1)
                pylab.legend()

            pylab.xticks(xvalpts, xvals)
            if xlabel == '':
                pylab.xlabel(x)
            else:
                pylab.xlabel(xlabel)
            if ylabel != '':
                pylab.ylabel(ylabel)
        elif x is not None and y is not None:
            setting = parse_setting_name(name)
            xvalpts = pylab.array(range(len(xvals)))
            yvalpts = pylab.array(range(len(yvals)))
            contour_n = int(contour_n)
            contour_lines = int(contour_lines)
            if len(contour_max) == 0: contour_max = None
            else: contour_max = float(contour_max)
            if len(contour_min) == 0: contour_min = None
            else: contour_min = float(contour_min)
            if contour_fmt == '': contour_fmt = '%1.3f'

            data = []
            for yval in yvals:
                setting2 = dict(setting)
                setting2[y] = yval
                row = []
                for xval in xvals:
                    setting2[x] = xval
                    name2 = make_setting_name(dir, setting2)
                    names, sample, ci = extract_individual_data(
                        dir, name2, int(bcount), float(conf), m)

                    if len(compare) > 0:
                        d = compare_combine(sortby, names, sample, ci, c_names,
                                            c_sample, c_ci)
                    else:
                        d = sample[0]

                    row.append(d)
                data.append(row)

            if contour_min is None or contour_max is None:
                pylab.contourf(xvalpts,
                               yvalpts,
                               data,
                               contour_n,
                               antialiased=True,
                               extend='both')
                cs = pylab.contour(xvalpts,
                                   yvalpts,
                                   data,
                                   contour_lines,
                                   colors='k',
                                   linewidths=1)
            else:
                clevels = pylab.array(range(contour_n)) * (
                    contour_max - contour_min) / (contour_n - 1) + contour_min
                pylab.contourf(xvalpts,
                               yvalpts,
                               data,
                               list(clevels),
                               antialiased=True,
                               extend='both')
                clevels = pylab.array(
                    range(contour_lines)) * (contour_max - contour_min) / (
                        contour_lines - 1) + contour_min
                cs = pylab.contour(xvalpts,
                                   yvalpts,
                                   data,
                                   list(clevels),
                                   colors='k',
                                   linewidths=1)
            pylab.clabel(cs, fmt=contour_fmt)

            pylab.xticks(xvalpts, xvals, rotation=xtickrotation)
            if xlabel == '':
                pylab.xlabel(x)
            else:
                pylab.xlabel(xlabel)
            pylab.yticks(yvalpts, yvals)
            if ylabel == '':
                pylab.ylabel(y)
            else:
                pylab.ylabel(ylabel)

        img = StringIO.StringIO()
        if type(dpi) is list: dpi = dpi[-1]
        pylab.savefig(img, dpi=int(dpi), format='png')
        return 'image/png', img.getvalue()
Example #38
0
    def run(self,
            seed_infections=1,
            verbose=None,
            calc_likelihood=False,
            do_plot=False,
            **kwargs):
        ''' Run the simulation '''

        T = sc.tic()

        # Reset settings and results
        if verbose is None:
            verbose = self['verbose']
        self.init_results()
        self.init_people(
            seed_infections=seed_infections)  # Actually create the people
        daily_tests = self.data[
            'new_tests']  # Number of tests each day, from the data
        evacuated = self.data['evacuated']  # Number of people evacuated

        # Main simulation loop
        for t in range(self.npts):

            # Print progress
            if verbose >= 1:
                string = f'  Running day {t:0.0f} of {self.pars["n_days"]}...'
                if verbose >= 2:
                    sc.heading(string)
                else:
                    print(string)

            self.results['t'][t] = t
            test_probs = {
            }  # Store the probability of each person getting tested

            # Update each person
            for person in self.people.values():

                # Count susceptibles
                if person.susceptible:
                    self.results['n_susceptible'][t] += 1

                # Handle testing probability
                if person.infectious:
                    test_probs[person.uid] = self[
                        'symptomatic']  # They're infectious: high probability of testing
                else:
                    test_probs[person.uid] = 1.0

                # If exposed, check if the person becomes infectious
                if person.exposed:
                    self.results['n_exposed'][t] += 1
                    if not person.infectious and t >= person.date_infectious:  # It's the day they become infectious
                        person.infectious = True
                        if verbose >= 2:
                            print(
                                f'      Person {person.uid} became infectious!'
                            )

                # If infectious, check if anyone gets infected
                if person.infectious:
                    # First, check for recovery
                    if person.date_recovered and t >= person.date_recovered:  # It's the day they become infectious
                        person.exposed = False
                        person.infectious = False
                        person.recovered = True
                        self.results['recoveries'][t] += 1
                    else:
                        self.results['n_infectious'][
                            t] += 1  # Count this person as infectious
                        n_contacts = cov_ut.pt(
                            person.contacts
                        )  # Draw the number of Poisson contacts for this person
                        contact_inds = cov_ut.choose_people(
                            max_ind=len(self.people),
                            n=n_contacts)  # Choose people at random
                        for contact_ind in contact_inds:
                            exposure = cov_ut.bt(
                                self['r_contact']
                            )  # Check for exposure per person
                            if exposure:
                                target_person = self.people[contact_ind]
                                if target_person.susceptible:  # Skip people who are not susceptible
                                    self.results['infections'][t] += 1
                                    target_person.susceptible = False
                                    target_person.exposed = True
                                    target_person.date_exposed = t
                                    incub_dist = round(
                                        pl.normal(person.pars['incub'],
                                                  person.pars['incub_std']))
                                    dur_dist = round(
                                        pl.normal(person.pars['dur'],
                                                  person.pars['dur_std']))
                                    target_person.date_infectious = t + incub_dist
                                    target_person.date_recovered = target_person.date_infectious + dur_dist
                                    if verbose >= 2:
                                        print(
                                            f'        Person {person.uid} infected person {target_person.uid}!'
                                        )

                # Count people who recovered
                if person.recovered:
                    self.results['n_recovered'][t] += 1

            # Implement testing -- this is outside of the loop over people, but inside the loop over time
            if t < len(
                    daily_tests
            ):  # Don't know how long the data is, ensure we don't go past the end
                n_tests = daily_tests.iloc[t]  # Number of tests for this day
                if n_tests and not pl.isnan(
                        n_tests):  # There are tests this day
                    self.results['tests'][
                        t] = n_tests  # Store the number of tests
                    test_probs = pl.array(list(test_probs.values()))
                    test_probs /= test_probs.sum()
                    test_inds = cov_ut.choose_people_weighted(probs=test_probs,
                                                              n=n_tests)
                    uids_to_pop = []
                    for test_ind in test_inds:
                        tested_person = self.people[test_ind]
                        if tested_person.infectious and cov_ut.bt(
                                self['sensitivity']
                        ):  # Person was tested and is true-positive
                            self.results['diagnoses'][t] += 1
                            tested_person.diagnosed = True
                            if self['evac_positives']:
                                uids_to_pop.append(tested_person.uid)
                            if verbose >= 2:
                                print(
                                    f'          Person {person.uid} was diagnosed!'
                                )
                    for uid in uids_to_pop:  # Remove people from the ship once they're diagnosed
                        self.off_ship[uid] = self.people.pop(uid)

            # Implement quarantine
            if t == self['quarantine']:
                if verbose >= 1:
                    print(f'Implementing quarantine on day {t}...')
                for person in self.people.values():
                    if 'quarantine_eff' in self.pars.keys():
                        quarantine_eff = self['quarantine_eff']  # Both
                    else:
                        if person.crew:
                            quarantine_eff = self['quarantine_eff_c']  # Crew
                        else:
                            quarantine_eff = self['quarantine_eff_g']  # Guests
                    person.contacts *= quarantine_eff

            # Implement testing change
            if t == self['testing_change']:
                if verbose >= 1:
                    print(f'Implementing testing change on day {t}...')
                self['symptomatic'] *= self[
                    'testing_symptoms']  # Reduce the proportion of symptomatic testing

            # Implement evacuations
            if t < len(evacuated):
                n_evacuated = evacuated.iloc[
                    t]  # Number of evacuees for this day
                if n_evacuated and not pl.isnan(
                        n_evacuated
                ):  # There are evacuees this day # TODO -- refactor with n_tests
                    if verbose >= 1:
                        print(f'Implementing evacuation on day {t}')
                    evac_inds = cov_ut.choose_people(max_ind=len(self.people),
                                                     n=n_evacuated)
                    uids_to_pop = []
                    for evac_ind in evac_inds:
                        evac_person = self.people[evac_ind]
                        if evac_person.infectious and cov_ut.bt(
                                self['sensitivity']):
                            self.results['evac_diagnoses'][t] += 1
                        uids_to_pop.append(evac_person.uid)
                    for uid in uids_to_pop:  # Remove people from the ship once they're diagnosed
                        self.off_ship[uid] = self.people.pop(uid)

        # Compute cumulative results
        self.results['cum_exposed'] = pl.cumsum(self.results['infections'])
        self.results['cum_tested'] = pl.cumsum(self.results['tests'])
        self.results['cum_diagnosed'] = pl.cumsum(self.results['diagnoses'])

        # Compute likelihood
        if calc_likelihood:
            self.likelihood()

        # Tidy up
        self.results['ready'] = True
        elapsed = sc.toc(T, output=True)
        if verbose >= 1:
            print(f'\nRun finished after {elapsed:0.1f} s.\n')
            summary = self.summary_stats()
            print(f"""Summary: 
     {summary['n_susceptible']:5.0f} susceptible 
     {summary['n_exposed']:5.0f} exposed
     {summary['n_infectious']:5.0f} infectious
               """)

        if do_plot:
            self.plot(**kwargs)

        return self.results
Example #39
0
plt.plot(cost_of_capital_rate,
         proj_B_npv,
         '#f4ab84',
         label='Project B',
         linewidth=2.0)

plt.ylim(0, 2000)
plt.yticks(plt.arange(0, 2000 + 1, 400))
plt.xticks(plt.arange(0, 0.24 + 0.01, 0.02))

#plt.legend()
plt.legend(loc='upper right')

plt.title('Project A/B NPV Analysis')

a = plt.array(proj_A)
#array([-1500,   450,   450,   450,   450,   450,   450])

b = plt.array(proj_B)
#array([-1200,     0,     0,     0,     0,     0,  3000])
intersection = a - b
#array([ -300,   450,   450,   450,   450,   450, -2550])
intersection_irr = plt.irr(intersection)
#0.11005666742719433
intersection_npv = plt.npv(intersection_irr, a)

plt.annotate("intersection = %.4f" % intersection_irr,
             xy=(intersection_irr, intersection_npv),
             xytext=(intersection_irr + 0.02, intersection_npv + 100),
             arrowprops=dict(facecolor='black', shrink=0.05))
Example #40
0
from xml.etree import ElementTree as etree
from xml.etree.ElementTree import Element, SubElement, ElementTree
from xml.dom import minidom

MaxYaxisValue = 450

windows = tk.Tk()
windows.minsize(500, 300)
IOTFrame = Frame(windows)
IOTFrame.pack()

IPEntry = tk.Entry(IOTFrame)
IPEntry.insert(0, '192.168.22.57')

xAchse = pylab.arange(0, 100, 1)
yAchse = pylab.array([0] * 100)

fig, ax = plt.subplots()
ax.grid(True)
ax.set_title("ADRC")
ax.set_xlabel("Time")
ax.set_ylabel("Temperature/PWM")
ax.axis([0, 1000, 0, MaxYaxisValue])

line1 = ax.plot(xAchse, yAchse, "-r", label="Temperature")
#line2=ax.plot(xAchse,yAchse, "-b", label="Watt")
line3 = ax.plot(xAchse, yAchse, "-y", label="PWM")
legend(loc='upper left')

canvas = FigureCanvasTkAgg(fig, master=windows)
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
Example #41
0
    ay = -GM * y / r**3
    return pylab.array([vx, ax, vy, ay])


x0 = input('initial x position (au) -> ')
y0 = input('initial y position (au) -> ')
vx0 = input('initial x velocity (au/yr) -> ')
vy0 = input('initial y velocity (au/yr) -> ')
dt = input('time step (yr) -> ')
tmax = input('time to end of simulation (yr) -> ')
nsteps = int(tmax / dt)
x = [0.0] * nsteps
y = [0.0] * nsteps

# integrate Newton's equations of motion using rk4;
# X is a vector that contains the positions and velocities being integrated
X = pylab.array([x0, vx0, y0, vy0])
for i in range(nsteps):
    x[i] = X[0]
    y[i] = X[2]
    # update the vector X to the next time step
    X = rk4(i * dt, X, dXdt, dt)
pylab.figure(figsize=(6, 6))
pylab.plot(x, y, 'o-')
pylab.xlabel('x (au)')
pylab.ylabel('y (au)')
minmax = 1.1 * max(abs(min(x + y)), abs(max(x + y)))
pylab.axis([-minmax, minmax, -minmax, minmax], aspect='equal')
pylab.grid()
pylab.show()
Example #42
0
def calcAverage(fld):
    wt = pylab.array([1.0, 1.0, 1.0, 1.0])
    return (fld[:, :, 0:4] * wt).sum(axis=-1)
Example #43
0
#!/usr/bin/python

from __future__ import division
import matplotlib, sys

if len(sys.argv) < 2 or sys.argv[1]!="show" :
  matplotlib.use('Agg')
import pylab, numpy, random
from pylab import *
#import Scientific.Functions.LeastSquares as ls

from scipy.optimize import leastsq

ff = pylab.array([.05, .1, .15, .2, .25, .3, .35, .4, .45, .5])
slope = pylab.zeros(len(ff))
hsigma = pylab.zeros(len(ff))

def read_ghs(base, ff):
    mcdatafilename = "%s-0.%02d.dat" % (base, 100*ff)
    mcdata = numpy.loadtxt(mcdatafilename)
    print('Using', mcdatafilename, 'for filling fraction', ff)
    r_mc = mcdata[:, 0]
    n_mc = mcdata[:, 1]
    ghs = n_mc/ff
    return r_mc, ghs

for i in range(len(ff)):
  print('working with', i)
  r, ghs = read_ghs('figs/gr', ff[i])
  hsigma[i] = (1-ff[i]/2)/(1-ff[i])**3 - 1
  skipnum = 3
    def transition_to_buffered(self, device_name, h5file, initial_values,
                               fresh):
        # Store the initial values in case we have to abort and restore them:
        # TODO: Coerce/quantise these correctly before returning them
        self.initial_values = initial_values

        with h5py.File(h5file, 'r') as hdf5_file:
            group = hdf5_file['devices/'][device_name]
            device_properties = labscript_utils.properties.get(
                hdf5_file, device_name, 'device_properties')
            connection_table_properties = labscript_utils.properties.get(
                hdf5_file, device_name, 'connection_table_properties')
            clock_terminal = connection_table_properties['clock_terminal']
            h5_data = group.get('ANALOG_OUTS')
            if h5_data:
                self.buffered_using_analog = True
                ao_channels = device_properties['analog_out_channels']
                # We use all but the last sample (which is identical to the
                # second last sample) in order to ensure there is one more
                # clock tick than there are samples. The 6713 requires this
                # to determine that the task has completed.
                ao_data = pylab.array(h5_data, dtype=float64)[:-1, :]
            else:
                self.buffered_using_analog = False

            h5_data = group.get('DIGITAL_OUTS')
            if h5_data:
                self.buffered_using_digital = True
                do_channels = device_properties['digital_lines']
                do_bitfield = numpy.array(h5_data, dtype=numpy.uint32)
            else:
                self.buffered_using_digital = False

            final_values = {}
            # We must do digital first, so as to make sure the manual mode task is stopped, or reprogrammed, by the time we setup the AO task
            # this is because the clock_terminal PFI must be freed!
            if self.buffered_using_digital:
                # Expand each bitfield int into self.num_DO
                # (8) individual ones and zeros:
                do_write_data = numpy.zeros(
                    (do_bitfield.shape[0], self.num_DO), dtype=numpy.uint8)
                for i in range(self.num_DO):
                    do_write_data[:, i] = (do_bitfield & (1 << i)) >> i

                self.do_task.StopTask()
                self.do_task.ClearTask()
                self.do_task = Task()
                self.do_read = int32()

                self.do_task.CreateDOChan(do_channels, "",
                                          DAQmx_Val_ChanPerLine)
                self.do_task.CfgSampClkTiming(clock_terminal, 1000000,
                                              DAQmx_Val_Rising,
                                              DAQmx_Val_FiniteSamps,
                                              do_bitfield.shape[0])
                self.do_task.WriteDigitalLines(do_bitfield.shape[0], False,
                                               10.0,
                                               DAQmx_Val_GroupByScanNumber,
                                               do_write_data, self.do_read,
                                               None)
                self.do_task.StartTask()

                for i in range(self.num_DO):
                    final_values['port0/line%d' % i] = do_write_data[-1, i]
            else:
                # We still have to stop the task to make the
                # clock flag available for buffered analog output, or the wait monitor:
                self.do_task.StopTask()
                self.do_task.ClearTask()

            if self.buffered_using_analog:
                self.ao_task.StopTask()
                self.ao_task.ClearTask()
                self.ao_task = Task()
                ao_read = int32()

                self.ao_task.CreateAOVoltageChan(ao_channels, "", -10.0, 10.0,
                                                 DAQmx_Val_Volts, None)
                self.ao_task.CfgSampClkTiming(clock_terminal, 1000000,
                                              DAQmx_Val_Rising,
                                              DAQmx_Val_FiniteSamps,
                                              ao_data.shape[0])
                self.ao_task.WriteAnalogF64(ao_data.shape[0], False, 10.0,
                                            DAQmx_Val_GroupByScanNumber,
                                            ao_data, ao_read, None)
                self.ao_task.StartTask()

                # Final values here are a dictionary of values, keyed by channel:
                channel_list = [
                    channel.split('/')[1]
                    for channel in ao_channels.split(', ')
                ]
                final_values = {
                    channel: value
                    for channel, value in zip(channel_list, ao_data[-1, :])
                }

            else:
                # we should probabaly still stop the task (this makes it easier to setup the task later)
                self.ao_task.StopTask()
                self.ao_task.ClearTask()

        return final_values
def Rx(a):
    s, c = pl.sin(a), pl.cos(a)
    return pl.array([[1, 0, 0, 0], [0, c, -s, 0], [0, s, c, 0], [0, 0, 0, 1]])
Example #46
0
        for t in range(20):
            sample = flipCoin(sampleSize)
            sampleMeans.append(getMeanAndStd(sample)[0])
        new_mean = 0
        for i in sampleMeans:
            new_mean+= i
        new_mean = new_mean/20
        new_std = 0
        for i in sampleMeans:
            new_std += (i-new_mean)**2/20
        new_std = new_std**0.5
        meanOfMeans.append(new_mean)
        stdOfMeans.append(new_std)
        ## WHAT TO DO WITH THE SAMPLE MEANS?

clt()
pylab.figure(1)
pylab.errorbar(sampleSizes, meanOfMeans,
               yerr = 1.96*pylab.array(stdOfMeans),
               label = 'Est. mean and 95% confidence interval')
pylab.xlim(0, max(sampleSizes) + 50)
pylab.axhline(0.65, linestyle = '--',
              label = 'True probability of Heads')
pylab.title('Estimates of Probability of Heads')
pylab.xlabel('Sample Size')
pylab.ylabel('Fraction of Heads (minutes)')
pylab.legend(loc = 'best')
pylab.show()


def Ry(a):
    s, c = pl.sin(a), pl.cos(a)
    return pl.array([[c, 0, s, 0], [0, 1, 0, 0], [-s, 0, c, 0], [0, 0, 0, 1]])
    myStim.setOri(1, '+')
    myStim.draw()
    if event.getKeys():
        print('stopped early')
        break
    win.logOnFlip(msg='frame=%i' % frameN, level=logging.EXP)
    fliptimes[frameN] = win.flip()

if disable_gc:
    gc.enable()
core.rush(False)

win.close()

# calculate some values
intervalsMS = pylab.array(win.frameIntervals) * 1000
m = pylab.mean(intervalsMS)
sd = pylab.std(intervalsMS)
# se=sd/pylab.sqrt(len(intervalsMS)) # for CI of the mean

nTotal = len(intervalsMS)
nDropped = sum(intervalsMS > (1.5 * m))
ifis =(fliptimes[1: ]-fliptimes[: -1]) * 1000

# plot the frameintervals
pylab.figure(figsize=[12, 8], )

pylab.subplot2grid((2, 2), (0, 0), colspan=2)
pylab.plot(intervalsMS, '-')
pylab.ylabel('t (ms)')
pylab.xlabel('frame N')
Example #49
0
def main():
    #set up file names
    DIR = sys.argv[1]
    velo_file = None
    for fn in os.listdir(DIR):
        if fn.startswith("VELODYNE_log"):
            velo_file = fn
            break
    data_file = os.path.join(DIR, velo_file)
    #cali_file="./calibration.xml"
    cali_file = sys.argv[2]
    state_file = os.path.join(DIR, "highrate_state.mat")
    agg_raw_file = os.path.join(DIR, "VELODYNE_agg_raw_road_use_midstate.dat")

    for fn in (data_file, cali_file, state_file):
        if not os.path.exists(fn):
            print fn, " doesn't exist"
            sys.exit(-1)


######################################################################################
    lidar = Lidar_Reader(data_file, cali_file)

    state_reader = StateReader(state_file)
    fw = open(agg_raw_file, 'w')

    #find start point, state usually starts later than lidar
    startTime = state_reader.getStartTime()
    for p, tm in lidar.getPacket():
        if tm > startTime: break
    last_tm = tm
    print 'lidar starts from ', tm, 'state starts from ', startTime

    ##############################################################################################
    t = time.time()
    scanInFrame = []
    cnt = 0  #count number of frame
    lastRotDeg = -1
    for p, tm in lidar.getPacket():
        if tm < LIDAR_START: continue
        scans = p.getScans(last_tm, tm)

        last_tm = tm
        for scan in scans:
            #rotate 360 degree, aggregate to one frame and dump in file
            if lastRotDeg >= scan.rotPosDeg:
                cnt += 1
                print 'frame %d, contains %d scans from %.2f to %.2f' % (
                    cnt, len(scanInFrame), scanInFrame[0].timestamp,
                    scanInFrame[-1].timestamp)
                print 'angular resolution is %.2f' % (360.0 / len(scanInFrame))
                print lastRotDeg, scan.rotPosDeg
                #plotScanXYZ(scanInFrame)

                #make a lidar_frame
                frame = LidarFrame(
                    scanInFrame,
                    pl.array([
                        state_reader.findState(scanElem)
                        for scanElem in scanInFrame
                    ]))
                #frame.plotXYZ();

                fw.write(frame.toLogStr())
                fw.flush()
                #virtualTable.fillTable(frame.dataPoints)

                scanInFrame = []

            lastRotDeg = scan.rotPosDeg
            #calibrate scan and put into frame
            lidar.corrMeasurement(scan)
            scanInFrame.append(scan)

        if tm > LIDAR_END: break

    diff = time.time() - t
    print 'lidar frame rate' % (cnt / (tm - startTime))
    print 'process %d frames' % (cnt)
    print 'costs %.2f seconds' % (diff)
    print 'FPS: %.1f' % (cnt / diff)

    fw.close()
def Rz(a):
    s, c = pl.sin(a), pl.cos(a)
    return pl.array([[c, -s, 0, 0], [s, c, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Example #51
0
    def evaluate(self, x, derivative=0, smooth=0, simple='auto'):
        """
        smooth=0      is how much to smooth the spline data
        simple='auto' is whether we should just use straight interpolation
                      you may want smooth > 0 for this, when derivative=1
        """
        if simple == 'auto': simple = self.simple

        # make it into an array if it isn't one, and remember that we did
        is_array = True
        if not type(x) == type(_pylab.array([])):
            x = _pylab.array([x])
            is_array = False

        if simple:
            # loop over all supplied x data, and come up with a y for each
            y = []
            for n in range(0, len(x)):
                # get a window of data around x
                if smooth:
                    [xtemp, ytemp,
                     etemp] = _fun.trim_data(self.xdata, self.ydata, None,
                                             [x[n] - smooth, x[n] + smooth])
                else:
                    i1 = _fun.index_nearest(x[n], self.xdata)

                    # if the nearest data point is lower than x, use the next point to interpolate
                    if self.xdata[i1] <= x[n] or i1 <= 0: i2 = i1 + 1
                    else: i2 = i1 - 1

                    # if we're at the max, extrapolate
                    if i2 >= len(self.xdata):
                        print(x[n], "is out of range. extrapolating")
                        i2 = i1 - 1

                    x1 = self.xdata[i1]
                    y1 = self.ydata[i1]
                    x2 = self.xdata[i2]
                    y2 = self.ydata[i2]
                    slope = (y2 - y1) / (x2 - x1)

                    xtemp = _numpy.array([x[n]])
                    ytemp = _numpy.array([y1 + (x[n] - x1) * slope])

                # calculate the slope based on xtemp and ytemp (if smoothing)
                # or just use the raw slope if smoothing=0
                if derivative == 1:
                    if smooth:
                        y.append(
                            (_numpy.average(xtemp * ytemp) -
                             _numpy.average(xtemp) * _numpy.average(ytemp)) /
                            (_numpy.average(xtemp * xtemp) -
                             _numpy.average(xtemp)**2))
                    else:
                        y.append(slope)

                # otherwise just average (even with one element)
                elif derivative == 0:
                    y.append(_numpy.average(ytemp))

            if is_array: return _numpy.array(y)
            else: return y[0]

        if smooth:

            y = []
            for n in range(0, len(x)):
                # take 20 data points from x+/-smooth
                xlow = max(self.xmin, x[n] - smooth)
                xhi = min(self.xmax, x[n] + smooth)
                xdata = _pylab.linspace(xlow, xhi, 20)
                ydata = _interpolate.splev(xdata, self.pfit, derivative)
                y.append(_numpy.average(ydata))

            if is_array: return _numpy.array(y)
            else: return y[0]

        else:
            return _interpolate.splev(x, self.pfit, derivative)
Example #52
0
    # evaluate_models_on_training(nationalYearsArray, avgNYCTemps, models,
    #                             'NYC', 'Average Yearly Temps', False)

    # tempsYears = list(TESTING_INTERVAL)
    # nationalYearsArray = np.array(tempsYears)
    # avgNYCtempsTesting = gen_cities_avg(climatedb, ['NEW YORK'],
    #                                     TESTING_INTERVAL)

    # evaluate_models_on_testing(nationalYearsArray, avgNYCtempsTesting,
    #                            models, 'NYC', 'Average Yearly Temps',
    #                            False)
    # TODO: replace this line with your code

    # Part E
    # TODO: replace this line with your code
    climate = Climate('data.csv')
    years = pylab.array(TESTING_INTERVAL)
    tempsSD = gen_std_devs(climate, CITIES, years)

    window_length = 5
    movingAverageTempTesting = moving_average(tempsSD,
                                              window_length)

    modelsDegrees = [1, 2, 3]
    models = generate_models(years, movingAverageTempTesting,
                             modelsDegrees)

    evaluate_models_on_training(years, movingAverageTempTesting, models,
                                'National', 'Temp Std Dev', True)
    pass
Example #53
0
def plot_topo_file(topoplotdata):
    """
    Read in a topo or bathy file and produce a pcolor map.
    """

    import os
    import pylab
    from clawpack.clawutil.data import ClawData

    fname = topoplotdata.fname
    topotype = topoplotdata.topotype
    if topoplotdata.climits:
        # deprecated option
        cmin = topoplotdata.climits[0]
        cmax = topoplotdata.climits[1]
    else:
        cmin = topoplotdata.cmin
        cmax = topoplotdata.cmax
    figno = topoplotdata.figno
    addcolorbar = topoplotdata.addcolorbar
    addcontour = topoplotdata.addcontour
    contour_levels = topoplotdata.contour_levels
    xlimits = topoplotdata.xlimits
    ylimits = topoplotdata.ylimits
    coarsen = topoplotdata.coarsen
    imshow = topoplotdata.imshow
    gridedges_show = topoplotdata.gridedges_show
    neg_cmap = topoplotdata.neg_cmap
    pos_cmap = topoplotdata.pos_cmap
    cmap = topoplotdata.cmap
    print_fname = topoplotdata.print_fname

    if neg_cmap is None:
        neg_cmap = colormaps.make_colormap({
            cmin: [0.3, 0.2, 0.1],
            0: [0.95, 0.9, 0.7]
        })
    if pos_cmap is None:
        pos_cmap = colormaps.make_colormap({
            0: [.5, .7, 0],
            cmax: [.2, .5, .2]
        })
    if cmap is None:
        cmap = colormaps.make_colormap({
            -1: [0.3, 0.2, 0.1],
            -0.00001: [0.95, 0.9, 0.7],
            0.00001: [.5, .7, 0],
            1: [.2, .5, .2]
        })
        #cmap = colormaps.make_colormap({-1:[0,0,1],0:[1,1,1],1:[1,0,0]})

    if abs(topotype) == 1:

        X, Y, topo = topotools.topofile2griddata(fname, topotype)
        topo = pylab.flipud(topo)
        Y = pylab.flipud(Y)
        x = X[0, :]
        y = Y[:, 0]
        xllcorner = x[0]
        yllcorner = y[0]
        cellsize = x[1] - x[0]

    elif abs(topotype) == 3:

        file = open(fname, 'r')
        lines = file.readlines()
        ncols = int(lines[0].split()[0])
        nrows = int(lines[1].split()[0])
        xllcorner = float(lines[2].split()[0])
        yllcorner = float(lines[3].split()[0])
        cellsize = float(lines[4].split()[0])
        NODATA_value = int(lines[5].split()[0])

        print "Loading file ", fname
        print "   nrows = %i, ncols = %i" % (nrows, ncols)
        topo = pylab.loadtxt(fname, skiprows=6, dtype=float)
        print "   Done loading"

        if 0:
            topo = []
            for i in range(nrows):
                topo.append(pylab.array(lines[6 + i], ))
            print '+++ topo = ', topo
            topo = pylab.array(topo)

        topo = pylab.flipud(topo)

        x = pylab.linspace(xllcorner, xllcorner + ncols * cellsize, ncols)
        y = pylab.linspace(yllcorner, yllcorner + nrows * cellsize, nrows)
        print "Shape of x, y, topo: ", x.shape, y.shape, topo.shape

    else:
        raise Exception("*** Only topotypes 1 and 3 supported so far")

    if coarsen > 1:
        topo = topo[slice(0, nrows, coarsen), slice(0, ncols, coarsen)]
        x = x[slice(0, ncols, coarsen)]
        y = y[slice(0, nrows, coarsen)]
        print "Shapes after coarsening: ", x.shape, y.shape, topo.shape

    if topotype < 0:
        topo = -topo

    if figno:
        pylab.figure(figno)

    if topoplotdata.imshow:
        color_norm = Normalize(cmin, cmax, clip=True)
        xylimits = (x[0], x[-1], y[0], y[-1])
        #pylab.imshow(pylab.flipud(topo.T), extent=xylimits, \
        pylab.imshow(pylab.flipud(topo), extent=xylimits, \
                cmap=cmap, interpolation='nearest', \
                norm=color_norm)
        #pylab.clim([cmin,cmax])
        if addcolorbar:
            pylab.colorbar()
    else:
        neg_topo = ma.masked_where(topo > 0., topo)
        all_masked = (ma.count(neg_topo) == 0)
        if not all_masked:
            pylab.pcolormesh(x, y, neg_topo, cmap=neg_cmap)
            pylab.clim([cmin, 0])
            if addcolorbar:
                pylab.colorbar()

        pos_topo = ma.masked_where(topo < 0., topo)
        all_masked = (ma.count(pos_topo) == 0)
        if not all_masked:
            pylab.pcolormesh(x, y, pos_topo, cmap=pos_cmap)
            pylab.clim([0, cmax])
    if addcolorbar:
        pylab.colorbar()

    pylab.axis('scaled')

    if addcontour:
        pylab.contour(x, y, topo, levels=contour_levels, colors='k')

    patchedges_show = True
    if patchedges_show:
        pylab.plot([x[0], x[-1]], [y[0], y[0]], 'k')
        pylab.plot([x[0], x[-1]], [y[-1], y[-1]], 'k')
        pylab.plot([x[0], x[0]], [y[0], y[-1]], 'k')
        pylab.plot([x[-1], x[-1]], [y[0], y[-1]], 'k')

    if print_fname:
        fname2 = os.path.splitext(fname)[0]
        pylab.text(xllcorner + cellsize,
                   yllcorner + cellsize,
                   fname2,
                   color='m')

    topodata = ClawData()
    topodata.x = x
    topodata.y = y
    topodata.topo = topo

    return topodata
Example #54
0
k = form.getvalue(stiff)
m = form.getvalue(mass)
c = form.getvalue(dc)
#k = 19000#float(input("Enter Stiffness:"))
#m = 500#float(input("Enter Mass:"))
#if ch == 2:
   # c =900# float(input("Enter Damp'g Coeff.:"))
x0 =0.0# float(input("Enter Initial Displacement:"))
v0 =20.0# float(input("Enter Initial Velocity:"))
state0 = [x0, v0]  # initial conditions [x0 , v0]  [m, m/sec]
ti = 0.0  # initial time
tf = 4.0  # final time
step = 0.0001  # step
t = arange(ti, tf, step)
state = scipy.integrate.odeint(func, state0, t)
x = array(state[:, [0]])
xd = array(state[:, [1]])
# Plotting displacement and velocity
pylab.ion()
pylab.rcParams['figure.figsize'] = (20, 12)
pylab.rcParams['font.size'] = 18
#b=0.0

fig, ax1 = pylab.subplots()
ax2 = ax1.twinx()
ax2.plot(t, xd, 'g--', label=r'$\dot{x} (m/sec)$', linewidth=2.0)
ax1.plot(t, x * 1e3, 'b', label=r'$x (mm)$', linewidth=2.0)
ax2.plot(t, xd, 'g--', label=r'$\dot{x} (m/sec)$', linewidth=2.0)
ax2.legend(loc='lower right')
ax1.legend()
ax1.set_xlabel('time , sec')
Example #55
0
def test_runtimes(num_iters=10, min_points=MIN_POINTS, max_points=MAX_POINTS):
    for i in xrange(num_iters):
        print "iteration", i
        for num_points in xrange(min_points, max_points + 1):
            print "num_points", num_points
            G = random_point_graph(num_points=num_points)

            sat_tree = satellite_tree(G)

            sort_neighbors(G)

            mcosts = defaultdict(list)
            scosts = defaultdict(list)
            costs = defaultdict(list)
            times = defaultdict(float)

            delta = 0.01
            alphas = pylab.arange(delta, 1, delta)
            algorithms = [
                pareto_steiner_space, pareto_steiner_space2,
                pareto_steiner_fast, pareto_steiner_old
            ]
            names = [
                'space efficient', 'medium space efficient', 'fast',
                'unoptimized'
            ]
            algorithms += [pareto_prim, pareto_khuller]
            names += ['prim', 'khuller']
            for alpha in alphas:
                print "alpha", alpha
                for algorithm, name in zip(algorithms, names):
                    mcost, scost, runtime = time_function(G, alpha, algorithm)
                    cost = pareto_cost(mcost=mcost, scost=scost, alpha=alpha)
                    mcosts[name].append(mcost)
                    scosts[name].append(scost)
                    costs[name].append(cost)
                    times[name] += runtime

            if num_points <= 50 and GENETIC:
                algorithms.append(pareto_genetic)
                names.append('genetic')
                genetic_start = time()
                genetic_trees = pareto_genetic(G)
                genetic_end = time()

                genetic_runtime = (genetic_end - genetic_start) / 60.0
                times['genetic'] = genetic_runtime

                for mcost, scost, T in genetic_trees:
                    mcosts['genetic'].append(mcost)
                    scosts['genetic'].append(scost)

            pylab.figure()
            for name in names:
                mcost = mcosts[name]
                scost = scosts[name]
                pylab.scatter(mcost, scost, label=name)
                pylab.plot(mcost, scost)

            pylab.legend()
            pylab.xlabel('spanning tree cost')
            pylab.ylabel('satellite cost')

            pylab.savefig('test_runtimes/pareto_front%d.pdf' % num_points)

            pylab.close()

            header_line = None
            if not os.path.exists('test_runtimes.csv'):
                header_line = [
                    'algorithm', 'points', 'runtime', 'comparisons',
                    'dominated'
                ]
                header_line = ', '.join(header_line)

            mcosts1, scosts1, costs1 = mcosts[baseline], scosts[
                baseline], pylab.array(costs[baseline])
            with open('test_runtimes.csv', 'a') as outfile:
                if header_line != None:
                    outfile.write('%s\n' % header_line)
                for name in names:
                    write_items = [name, num_points, times[name]]
                    if name in mcosts:
                        assert name in scosts
                        mcosts2, scosts2 = mcosts[name], scosts[name]
                        comparisons, dominated = prop_dominated(mcosts1, scosts1,\
                                                                mcosts2, scosts2)
                        write_items += [comparisons, dominated]
                    else:
                        write_items += ['', '']

                    if name in costs:
                        costs2 = pylab.array(costs[name])
                        cost_ratio = pylab.mean(costs2 / costs1)
                        write_items.append(cost_ratio)
                    else:
                        write_items.append('')

                    write_items = map(str, write_items)
                    write_items = ', '.join(write_items)
                    outfile.write('%s\n' % write_items)
Example #56
0
    #print line
    charges.append(float(line[13:22]))
    ionicradii.append(float(line[31:40]))
    for line in bfiter:
        if not line.strip():
            break
        if not line.strip()[0].isdigit():
            break
        charges.append(float(line[13:22]))
        ionicradii.append(float(line[31:40]))

#print charges
print charges, numato
try:
    charge = [charges[i] for i in numato]
except:
    charge = [charges[i-1] for i in numato]

data = pl.array([numato, Z, x, y, z, dist, charge])
#for dat in data.T:
#    print dat
pl.hist(data[5], bins = 200)
pl.xlabel("distance / $\\AA$")
#pl.ylabel("atom count")

pl.plot(data[5], data[6], "xk", label="charge")
pl.plot(data[5], data[6].cumsum(), ".-r", label="accumulated charge")
pl.plot(data[5], pl.ones(len(data[0])).cumsum(), "g", label="no. atoms")
pl.legend(loc=0)
pl.show()
Example #57
0
    return class_quant, h


#it can easily be done like this
#sturges_law = np.histogram(ss_ab, bins='sturges', density=True)

cl_ss_ab = create_class(ss_ab, ab_amount)
cl_ss_no = create_class(ss_no, no_amount)
cl_gs_ab = create_class(gs_ab, ab_amount)
cl_gs_no = create_class(gs_no, no_amount)
cl_age_ab = create_class(age_ab, ab_amount)
cl_age_no = create_class(age_no, no_amount)

#classes amplitude
cl_ss_ab_h = pl.array(cl_ss_ab[1])
cl_ss_no_h = pl.array(cl_ss_no[1])
cl_gs_ab_h = pl.array(cl_gs_ab[1])
cl_gs_no_h = pl.array(cl_gs_no[1])
cl_age_ab_h = pl.array(cl_age_ab[1])
cl_age_no_h = pl.array(cl_age_no[1])

#classes quantity

#creating an histogram model
st_ss_ab = np.histogram(ss_ab,
                        bins=pl.arange(min(ss_ab),
                                       max(ss_ab) + cl_ss_ab_h, cl_ss_ab_h),
                        density=True)
st_ss_no = np.histogram(ss_no,
                        bins=pl.arange(min(ss_no),
Example #58
0
import pylab as pl
import matplotlib as m

frs = pl.array([2**(x / 12) for x in range(0, 88, 1)])


def _12TET(fs):
    return 12 * pl.log2(fs / 27.5)


pl.figure(figsize=(8, 3))
pl.plot(frs * 27.5, _12TET(frs * 27.5), 'k.')
pl.vlines(3520, 0, 90)
pl.vlines(1760, 0, 90)
pl.vlines(880, 0, 90)
pl.vlines(440, 0, 90)
pl.vlines(220, 0, 90)
pl.vlines(110, 0, 90)
pl.vlines(55, 0, 90)
#pl.xlim(20,3500)
pl.yticks([x * 12 for x in range(1, 8)])
pl.xticks([220 * 2**(x) for x in range(0, 5)])
pl.xlabel('frequency (Hz,A4=440)')
pl.ylabel('12TET pitch')
pl.tight_layout()
pl.show()
Example #59
0
def get_processed_subtraces_for_cell(
        cell_name,
        windows=[1000.0, 1000.0, 1000.0],
        gaps=[0.0, 200.0, 200.0],
        padding=0.0,
        crit_freq=100.0,
        filt_kind='low',
        replace_spikes=True,
        remove_sine_waves=True,
        master_folder_path='E:\\correlated_variability'):
    '''
    For a given cell (indicated by cell_name), get an array containing
    subtraces of interest from all trials.  A subtrace starts (windows[0] + 
    gaps[0] + padding) ms before stim onset, and lasts for (sum(windows) + 
    sum(gaps) + 2*padding) ms.  Before pulling out the subtrace, filter and 
    possibly remove spikes from the full trace.  Remove sine waves from the 
    subtraces if desired.
        Parameters
    ---------- 
    cell_name : string
        name of cell of interest (e.g., '070314_c2')
    windows : list of floats
        widths of ongoing, transient, and steady-state windows (ms)
    gaps : list of floats
        sizes of gaps between stim onset and end of ongoing, stim onset and
        beginning of transient, end of transient and beginning of 
        steady-state (ms)
    padding: float
        size of window (ms) to be added to the beginning and end of each 
        subtrace (only nonzero when doing wavelet filtering)
    crit_freq: float, or tuple of floats
        critical frequency for broad-band filtering of traces (e.g., 100.0)
    filt_kind: string
        type of filter to be used for traces (e.g., 'low' for lowpass)
    replace_spikes: bool
        if True, detect spikes in membrane potential recordings, and replace
        via interpolation before filtering
    remove_sine_waves: bool
        if True, use sine-wave-detection algorithm to remove 60 Hz line noise
        from membrane potential recordings after removing spikes and filtering
    master_folder_path: string
        full path of directory containing data, code, figures, etc.

    Returns
    -------
    subtraces: numpy array
        array in which entries are subtraces extracted from each stimulus
        presentation (w/subtrace defined by stim onset, epochs, windows, gaps)
    
    '''

    raw_traces_path = master_folder_path + '\\downsampled_traces\\'
    if not os.path.isdir(raw_traces_path):
        os.mkdir(raw_traces_path)
    raw_traces_path += '%s\\' % cell_name
    subtraces = []

    print 'processing traces for %s' % cell_name
    for trial in numpy.sort(os.listdir(raw_traces_path)):
        trial_path = raw_traces_path + trial
        results_dict = pickle.load(open(trial_path, 'rb'))

        trace = results_dict['voltage_traces_mV']
        samp_freq = results_dict['samp_freq']
        stim_onsets = results_dict['stim_onsets_ms']

        if replace_spikes == True:
            trace = replace_all_spikes(trace, samp_freq=samp_freq)
        if filt_kind != 'unfiltered':
            trace = butter(trace,
                           sampling_freq=samp_freq,
                           critical_freq=crit_freq,
                           kind=filt_kind,
                           order=3)

        for stim_onset in stim_onsets:
            start = int(int(stim_onset) - gaps[0] - windows[0] - padding)
            stop = int(
                int(stim_onset) + sum(gaps[1:]) + sum(windows[1:]) + padding)
            if start >= 0:
                sub_trace = trace[start:stop]
                if remove_sine_waves:
                    #sub_trace = remove_sines(sub_trace, sine_freqs = [60])
                    sub_trace = remove_sines(sub_trace,
                                             sine_freqs=[60, 120, 180])
                subtraces.append(sub_trace)

    subtraces = pylab.array(subtraces)
    return subtraces
Example #60
0
#!/usr/bin/env python
from __future__ import print_function
import pylab as pl
import scipy.optimize
from scipy.stats import chi2

for fa_rate in 1.0/pl.array([1e1, 1e2, 1e4, 1e6, 1e9]):
    print(fa_rate)
    for df in range(1,7):
        f_eq = lambda x: ((1- fa_rate) - chi2.cdf(x, df))**2
        res = scipy.optimize.minimize(f_eq, df)
        assert res['success']
        print('\t', res.x[0])