Esempio n. 1
0
def scalar_validation_statistics(results, groups):
    """ plot absolute z transformation of p_theta values,
    grouped by dictionary groups

    Parameters
    ----------
    results : pandas.DataFrame with row called 'z'
    groups : list of lists of columns of results
    """
    pl.figure()

    width = max(pl.absolute(results.ix['z']))
    for row, (g_name, g) in enumerate(reversed(groups)):
        z = pl.absolute(results.ix['z', g].__array__())
        pl.plot([pl.mean(z)], [row], 'o', color='k', mec='k', mew=1)
        pl.plot(z, [row] * len(z), 'o', color='none', mec='k', mew=1)

        msg = 'p: %s' % ', '.join(
            ['%.3f' % p for p in sorted(results.ix['p', g] * len(g))])
        #msg += 'MAE: %s' % str(
        pl.text(1.1 * width, row, msg, va='center', fontsize='small')

    pl.yticks(range(len(groups)),
              ['%s %d' % (g_name, len(g)) for (g_name, g) in reversed(groups)],
              fontsize='large')
    pl.axis([-.05 * width, width * 1.05, -.5, len(groups) - .5])
    pl.xlabel(r'Absolute $z$-score of $p_\theta$ values', fontsize='large')

    pl.subplots_adjust(right=.5)
Esempio n. 2
0
  def update_design(self):
    ax = self.ax
    ax.cla()
    ax2 = self.ax2
    ax2.cla()

    wp = self.wp
    ws = self.ws
    gpass = self.gpass
    gstop = self.gstop
    b, a = ss.iirdesign(wp, ws, gpass, gstop, ftype=self.ftype, output='ba')
    self.a = a
    self.b = b
    #b = [1,2]; a = [1,2]
    #Print this on command line so we can use it in our programs
    print 'b = ', pylab.array_repr(b)
    print 'a = ', pylab.array_repr(a)

    my_w = pylab.logspace(pylab.log10(.1*self.ws[0]), 0.0, num=512)
    #import pdb;pdb.set_trace()
    w, h = freqz(b, a, worN=my_w*pylab.pi)
    gp = 10**(-gpass/20.)#Go from db to regular
    gs = 10**(-gstop/20.)
    self.design_line, = ax.plot([.1*self.ws[0], self.ws[0], wp[0], wp[1], ws[1], 1.0], [gs, gs, gp, gp, gs, gs], 'ko:', lw=2, picker=5)
    ax.semilogx(w/pylab.pi, pylab.absolute(h),lw=2)
    ax.text(.5,1.0, '{:d}/{:d}'.format(len(b), len(a)))
    pylab.setp(ax, 'xlim', [.1*self.ws[0], 1.2], 'ylim', [-.1, max(1.1,1.1*pylab.absolute(h).max())], 'xticklabels', [])

    ax2.semilogx(w/pylab.pi, pylab.unwrap(pylab.angle(h)),lw=2)
    pylab.setp(ax2, 'xlim', [.1*self.ws[0], 1.2])
    ax2.set_xlabel('Normalized frequency')

    pylab.draw()
Esempio n. 3
0
def plot_thresholds(rawdata, scan_values, plane='horizontal',
                    xlabel='turns', ylabel='intensity [particles]', zlabel='normalized emittance',
                    xlimits=((0.,8192)), ylimits=((0.,7.1e11)), zlimits=((0., 10.))):

    # Prepare input data.
    # x axis
    t = rawdata[0,:,:]
    turns = plt.ones(t.shape).T * plt.arange(len(t))
    turns = turns.T

    # z axis
    epsn_abs = {}
    epsn_abs['horizontal'] = plt.absolute(rawdata[11,:,:])
    epsn_abs['vertical']   = plt.absolute(rawdata[12,:,:])

    # Prepare plot environment.
    ax11, ax13 = _create_axes(xlabel, ylabel, zlabel, xlimits, ylimits, zlimits)
    cmap = plt.cm.get_cmap('jet', 2)
    ax11.patch.set_facecolor(cmap(range(2))[-1])
    cmap = plt.cm.get_cmap('jet')

    x, y = plt.meshgrid(turns[:,0], scan_values)
    z = epsn_abs[plane]

    threshold_plot = ax11.contourf(x, y, z.T, levels=plt.linspace(zlimits[0], zlimits[1], 201),
                                   vmin=zlimits[0], vmax=zlimits[1], cmap=cmap)
    cb = plt.colorbar(threshold_plot, ax13, orientation='vertical')
    cb.set_label(zlabel)

    plt.tight_layout()
Esempio n. 4
0
    def rank_by_distance_bhatt(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Bhattacharyya distance on timbre-channel probabilities and Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0) 
        rdists=pylab.ones(len(t_keys))*float('inf')
        qk = self._get_probs_tc(qkeys)
        for i in range(len(ikeys[0])): # number of include keys
            ikey=[]
            dk = pylab.zeros(self.timbre_channels)
            for t_chan in range(self.timbre_channels): # timbre channels
                ikey.append(ikeys[t_chan][i])
                try: 
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index( ikey[t_chan] ) # dataset include-key match
                    # the reduced distance function in include_keys order
                    # distance is Bhattacharyya distance on probs and dists
                    dk[t_chan] = dists[t_chan][i_idx]
                except:
                    print("Key not found in result list: ", ikey, "for query:", qkeys[t_chan])
                    raise error.BregmanError()
            rk = self._get_probs_tc(ikey)
            a_idx = t_keys.index( ikey[0] ) # audiodb include-key index
            rdists[a_idx] = distance.bhatt(pylab.sqrt(pylab.absolute(dk)), pylab.sqrt(pylab.absolute(qk*rk)))
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)   # Sort fields into database order
        for r in self.ground_truth: # relevant keys
            ranks_list.append(pylab.where(sort_idx==r)[0][0]) # Rank of the relevant key
        return ranks_list, rdists
Esempio n. 5
0
def add_to_results(model, name):
    df = getattr(model, name)
    model.results['param'].append(name)
    model.results['bias'].append(df['abs_err'].mean())
    model.results['mae'].append((pl.median(pl.absolute(df['abs_err'].dropna()))))
    model.results['mare'].append(pl.median(pl.absolute(df['rel_err'].dropna())))
    model.results['pc'].append(df['covered?'].mean())
Esempio n. 6
0
    def rank_by_distance_bhatt(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Bhattacharyya distance on timbre-channel probabilities and Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0) 
        rdists=pylab.ones(len(t_keys))*float('inf')
        qk = self._get_probs_tc(qkeys)
        for i in range(len(ikeys[0])): # number of include keys
            ikey=[]
            dk = pylab.zeros(self.timbre_channels)
            for t_chan in range(self.timbre_channels): # timbre channels
                ikey.append(ikeys[t_chan][i])
                try: 
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index( ikey[t_chan] ) # dataset include-key match
                    # the reduced distance function in include_keys order
                    # distance is Bhattacharyya distance on probs and dists
                    dk[t_chan] = dists[t_chan][i_idx]
                except:
                    print "Key not found in result list: ", ikey, "for query:", qkeys[t_chan]
                    raise error.BregmanError()
            rk = self._get_probs_tc(ikey)
            a_idx = t_keys.index( ikey[0] ) # audiodb include-key index
            rdists[a_idx] = distance.bhatt(pylab.sqrt(pylab.absolute(dk)), pylab.sqrt(pylab.absolute(qk*rk)))
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)   # Sort fields into database order
        for r in self.ground_truth: # relevant keys
            ranks_list.append(pylab.where(sort_idx==r)[0][0]) # Rank of the relevant key
        return ranks_list, rdists
def scalar_validation_statistics(results, groups):
    """ plot absolute z transformation of p_theta values,
    grouped by dictionary groups

    Parameters
    ----------
    results : pandas.DataFrame with row called 'z'
    groups : list of lists of columns of results
    """
    pl.figure()

    width = max(pl.absolute(results.ix['z']))
    for row, (g_name, g) in enumerate(reversed(groups)):
        z = pl.absolute(results.ix['z', g].__array__())
        pl.plot([pl.mean(z)], [row], 'o', color='k', mec='k', mew=1)
        pl.plot(z, [row]*len(z), 'o', color='none', mec='k', mew=1)

        msg = 'p: %s' % ', '.join(['%.3f'%p for p in sorted(results.ix['p', g]*len(g))])
        #msg += 'MAE: %s' % str(
        pl.text(1.1*width, row, msg, va='center', fontsize='small')

    pl.yticks(range(len(groups)), ['%s %d' % (g_name, len(g)) for (g_name, g) in reversed(groups)], fontsize='large')
    pl.axis([-.05*width, width*1.05, -.5, len(groups)-.5])
    pl.xlabel(r'Absolute $z$-score of $p_\theta$ values', fontsize='large')

    pl.subplots_adjust(right=.5)
Esempio n. 8
0
def boxForcing(bMin, bMax, radius, c,
               angle):  #confines the particles inside the sample

    x = pl.uniform(bMin, bMax)
    y = pl.uniform(bMin, bMax)

    if x < (bMin + c * radius):
        x = x + (radius) * (1 + c * pl.absolute(pl.sin(pl.radians(angle))))
        if x > (bMax - c * radius):
            x = bMin + c * radius
    if x > (bMax - c * radius):
        x = x - (radius) * (1 + c * pl.absolute(pl.sin(pl.radians(angle))))

        if x < (bMin + c * radius):
            x = bMax - c * radius

    if y > (bMax - c * radius):
        y = y - (radius) * (1 + c * pl.absolute(pl.cos(pl.radians(angle))))
        if y < (bMin + c * radius):
            y = bMax - c * radius

    if y < (bMin + c * radius):
        y = y + (radius) * (1 + c * pl.absolute(pl.cos(pl.radians(angle))))
        if y > (bMax - c * radius):
            y = bMin + c * radius

    center = []
    center.append([x, y])

    return center
Esempio n. 9
0
def fwhm(x, y):
	hm = pl.amax(y/2.0);
	y_diff = pl.absolute(y-hm);
	y_diff_sorted = pl.sort(y_diff);
	i1 = pl.where(y_diff==y_diff_sorted[0]);
	i2 = pl.where(y_diff==y_diff_sorted[1]);
	fwhm = pl.absolute(x[i1]-x[i2]);
	return hm, fwhm
Esempio n. 10
0
def add_to_results(model, name):
    df = getattr(model, name)
    model.results['param'].append(name)
    model.results['bias'].append(df['abs_err'].mean())
    model.results['mae'].append(
        (pl.median(pl.absolute(df['abs_err'].dropna()))))
    model.results['mare'].append(pl.median(pl.absolute(
        df['rel_err'].dropna())))
    model.results['pc'].append(df['covered?'].mean())
Esempio n. 11
0
def MaxV(n, m, v, w, chi, phi, a):
    """Returns velocity at which the difference between right-handed and left-handed photons is maximal."""
    D = lambda V: -py.absolute(PlusMinDiff(n, m, V, w, chi, 0, a) / V)
    F = lambda V: -py.absolute(PlusMinDiff(n, m, V, w, chi, py.pi, a) / V)
    optD = optimize.minimize_scalar(D, method='bounded', bounds=(1 / n, 1))
    optF = optimize.minimize_scalar(F, method='bounded', bounds=(1 / n, 1))
    if optD.fun < optF.fun:
        return float(optD.x)
    else:
        return float(optF.x)
Esempio n. 12
0
def fwhm_2gauss(x, y, dx=0.001):
	'''
	Finds the FWHM for the profile y(x), with accuracy dx=0.001
	Uses a 2-Gauss 1D fit.
	'''
	popt, pcov = curve_fit(gauss2, x, y);
	xx = pl.arange(pl.amin(x), pl.amax(x)+dx, dx);
	ym = gauss2(xx, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5])
	hm = pl.amax(ym/2.0);
	y_diff = pl.absolute(ym-hm);
	y_diff_sorted = pl.sort(y_diff);
	i1 = pl.where(y_diff==y_diff_sorted[0]);
	i2 = pl.where(y_diff==y_diff_sorted[1]);
	fwhm = pl.absolute(xx[i1]-xx[i2]);
	return hm, fwhm, xx, ym
Esempio n. 13
0
    def fresnelSingleTransformVW(self,d) :
        # compute new window
        x2 = self.nx*pl.absolute(d)*self.wl/(self.endx-self.startx)
        y2 = self.ny*pl.absolute(d)*self.wl/(self.endy-self.starty)

        # create new intensity object
        i2 = Intensity2D(self.nx,-x2/2,x2/2,
                         self.ny,-y2/2,y2/2,
                         self.wl)

        # compute intensity
        u1p   = self.i*pl.exp(-1j*pl.pi/(d*self.wl)*(self.xgrid**2+self.ygrid**2))
        ftu1p = pl.fftshift(pl.fft2(pl.fftshift(u1p)))
        i2.i  = ftu1p*1j/(d*i2.wl)*pl.exp(-1j*pl.pi/(d*i2.wl)*(i2.xgrid**2+i2.ygrid**2))
        return i2
Esempio n. 14
0
def LoadEnvCond(arg, dirname, files):
    for file in files:
        Grand_mean = p.nan
        filepath = os.path.join(dirname, file)
        if filepath == os.path.join(dirname, 'GeneralData.dat'):
            data = p.genfromtxt(filepath)
            if data[-1, 4] != 0.0:
                grand_mean = data[:, 1].mean()
                frame_scaling = 1.0 / (data[1, 0] - data[0, 0])
                filepath_turb = os.path.join(dirname, 'ModelParams.dat')
                l = re.split(" ", ln.getline(filepath_turb, 6))
                turb_param = float(l[6])
                l = re.split(" ", ln.getline(filepath_turb, 29))
                rand_death_factor = float(l[6])
                frame_half_size = p.rint((1.0 / (data[:, 11] \
                    / data[:, 4]).mean() ) * 0.5 * frame_scaling)
                mean_turbul = p.zeros((data.shape[0]-(2.0 \
                    * frame_half_size), 2))
                mean_turbul[:,0] = data[frame_half_size : \
                    - frame_half_size][:, 0].copy()
                k = frame_half_size
                for j in range(mean_turbul.shape[0]):
                    mean_turbul[j, 1] = p.absolute(data[k \
                        - frame_half_size : k + frame_half_size, 1].mean() \
                        - grand_mean)
                    k = k + 1
                Grand_mean = mean_turbul[:, 1].mean()
                arg.append((Grand_mean, turb_param, rand_death_factor))
            else:
                break
Esempio n. 15
0
def merge_data_csvs(id):

    df = pandas.DataFrame()

    dir = dismod3.settings.JOB_WORKING_DIR % id
    #print dir
    for f in sorted(glob.glob('%s/posterior/data-*.csv' % dir)):
        #print 'merging %s' % f
        df2 = pandas.read_csv(f, index_col=None)
        df2.index = df2['index']
        df = df.drop(set(df.index) & set(df2.index)).append(df2)

    df['residual'] = df['value'] - df['mu_pred']
    df['scaled_residual'] = df['residual'] / pl.sqrt(
        df['value'] * (1 - df['value']) / df['effective_sample_size'])
    #df['scaled_residual'] = df['residual'] * pl.sqrt(df['effective_sample_size'])  # including
    df['abs_scaled_residual'] = pl.absolute(df['scaled_residual'])

    d = .005  # TODO: save delta in these files, use negative binomial to calc logp
    df['logp'] = [
        mc.negative_binomial_like(x * n, (p + 1e-3) * n,
                                  d * (p + 1e-3) * n) for x, p, n in
        zip(df['value'], df['mu_pred'], df['effective_sample_size'])
    ]
    df['logp'][df['data_type'] == 'rr'] = df['scaled_residual'][df['data_type']
                                                                == 'rr']

    df = df.sort('logp')

    #print df.filter('data_type area age_start age_end year_start sex effective_sample_size value residual logp'.split())[:25]
    return df
Esempio n. 16
0
    def fresnelSingleTransformVW(self, d):
        # compute new window
        x2 = self.nx * pl.absolute(d) * self.wl / (self.endx - self.startx)
        y2 = self.ny * pl.absolute(d) * self.wl / (self.endy - self.starty)

        # create new intensity object
        i2 = Intensity2D(self.nx, -x2 / 2, x2 / 2, self.ny, -y2 / 2, y2 / 2,
                         self.wl)

        # compute intensity
        u1p = self.i * pl.exp(-1j * pl.pi / (d * self.wl) *
                              (self.xgrid**2 + self.ygrid**2))
        ftu1p = pl.fftshift(pl.fft2(pl.fftshift(u1p)))
        i2.i = ftu1p * 1j / (d * i2.wl) * pl.exp(-1j * pl.pi / (d * i2.wl) *
                                                 (i2.xgrid**2 + i2.ygrid**2))
        return i2
Esempio n. 17
0
    def rank_by_distance_avg(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0) 
        rdists=pylab.ones(len(t_keys))*float('inf')
        for t_chan in range(self.timbre_channels): # timbre channels
            t_keys, t_lens = self.get_adb_lists(t_chan) 
            for i, ikey in enumerate(ikeys[t_chan]): # include keys, results
                try: 
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index( ikey ) # lower_bounded include-key index
                    a_idx = t_keys.index( ikey ) # audiodb include-key index
                    # the reduced distance function in include_keys order
                    # distance is the sum for now
                    if t_chan:
                        rdists[a_idx] += dists[t_chan][i_idx]
                    else:
                        rdists[a_idx] = dists[t_chan][i_idx]
                except:
                    print("Key not found in result list: ", ikey, "for query:", qkeys[t_chan])
                    raise error.BregmanError()
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)   # Sort fields into database order
        for r in self.ground_truth: # relevant keys
            ranks_list.append(pylab.where(sort_idx==r)[0][0]) # Rank of the relevant key
        return ranks_list, rdists
Esempio n. 18
0
 def deriv_sign_rate(f=rate,
                     age_indices=age_indices,
                     tau=1.e14,
                     deriv=deriv,
                     sign=sign):
     df = pl.diff(f[age_indices], deriv)
     return mc.normal_like(pl.absolute(df) * (sign * df < 0), 0., tau)
Esempio n. 19
0
    def rank_by_distance_avg(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0) 
        rdists=pylab.ones(len(t_keys))*float('inf')
        for t_chan in range(self.timbre_channels): # timbre channels
            t_keys, t_lens = self.get_adb_lists(t_chan) 
            for i, ikey in enumerate(ikeys[t_chan]): # include keys, results
                try: 
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index( ikey ) # lower_bounded include-key index
                    a_idx = t_keys.index( ikey ) # audiodb include-key index
                    # the reduced distance function in include_keys order
                    # distance is the sum for now
                    if t_chan:
                        rdists[a_idx] += dists[t_chan][i_idx]
                    else:
                        rdists[a_idx] = dists[t_chan][i_idx]
                except:
                    print "Key not found in result list: ", ikey, "for query:", qkeys[t_chan]
                    raise error.BregmanError()
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)   # Sort fields into database order
        for r in self.ground_truth: # relevant keys
            ranks_list.append(pylab.where(sort_idx==r)[0][0]) # Rank of the relevant key
        return ranks_list, rdists
Esempio n. 20
0
def determineCuts(spec2d, dCutFactor = 4.0, dAddPix = 10, bPlot = True) :
    
    vproj = spec2d.sum(1)
    dvproj = vproj-pylab.roll(vproj,1)#derivitive
    
    index  = pylab.arange(0,len(vproj),1)
    index1 = index[dvproj > dvproj.max()/dCutFactor]
    start  = index1[0]
    end    = index1[-1]
    
    startWide = start-dAddPix
    endWide   = end+dAddPix 
    
    if bPlot :
        pylab.figure(12)
        pylab.clf()

        pylab.subplot(2,2,1)
        pylab.plot(vproj)

        pylab.subplot(2,2,2)
        pylab.plot(pylab.absolute(dvproj))
        pylab.axhline(dvproj.max()/dCutFactor)

        pylab.subplot(2,2,3)
        pylab.plot(vproj)
        pylab.axvline(start)
        pylab.axvline(end)

        pylab.axvline(startWide)
        pylab.axvline(endWide)

    return [startWide, endWide]
Esempio n. 21
0
def merge_data_csvs(id):

    df = pandas.DataFrame()

    dir = dismod3.settings.JOB_WORKING_DIR % id
    #print dir
    for f in sorted(glob.glob('%s/posterior/data-*.csv'%dir)):
        #print 'merging %s' % f
        df2 = pandas.read_csv(f, index_col=None)
        df2.index = df2['index']
        df = df.drop(set(df.index)&set(df2.index)).append(df2)

    df['residual'] = df['value'] - df['mu_pred']
    df['scaled_residual'] = df['residual'] / pl.sqrt(df['value'] * (1 - df['value']) / df['effective_sample_size'])
    #df['scaled_residual'] = df['residual'] * pl.sqrt(df['effective_sample_size'])  # including 
    df['abs_scaled_residual'] = pl.absolute(df['scaled_residual'])

    d = .005 # TODO: save delta in these files, use negative binomial to calc logp
    df['logp'] = [mc.negative_binomial_like(x*n, (p+1e-3)*n, d*(p+1e-3)*n) for x,p,n in zip(df['value'], df['mu_pred'], df['effective_sample_size'])]
    df['logp'][df['data_type'] == 'rr'] = df['scaled_residual'][df['data_type'] == 'rr']

    df = df.sort('logp')

    #print df.filter('data_type area age_start age_end year_start sex effective_sample_size value residual logp'.split())[:25]
    return df
Esempio n. 22
0
def calcAUC(data, y0, lag, mgr, asym, time):
    """
    Calculate the area under the curve of the logistic function
    using its integrated formula
    [ A( [A-y0] log[ exp( [4m(l-t)/A]+2 )+1 ]) / 4m ] + At
    """

    # First check that max growth rate is not zero
    # If so, calculate using the data instead of the equation
    if mgr == 0:
        auc = calcAUCData(data, time)
    else:
        timeS = time[0]
        timeE = time[-1]
        t1 = asym - y0
        #try:
        t2_s = py.log(py.exp((4 * mgr * (lag - timeS) / asym) + 2) + 1)
        t2_e = py.log(py.exp((4 * mgr * (lag - timeE) / asym) + 2) + 1)
        #except RuntimeWarning as rw:
            # Exponent is too large, setting to 10^3
        #    newexp = 1000
        #    t2_s = py.log(newexp + 1)
        #    t2_e = py.log(newexp + 1)
        t3 = 4 * mgr
        t4_s = asym * timeS
        t4_e = asym * timeE

        start = (asym * (t1 * t2_s) / t3) + t4_s
        end = (asym * (t1 * t2_e) / t3) + t4_e
        auc = end - start

    if py.absolute(auc) == float('Inf'):
        x = py.diff(time)
        auc = py.sum(x * data[1:])
    return auc
Esempio n. 23
0
 def unimodal_rate(f=rate, age_indices=age_indices, tau=1.e5):
     df = pl.diff(f[age_indices])
     sign_changes = pl.find((df[:-1] > NEARLY_ZERO) & (df[1:] < -NEARLY_ZERO))
     sign = pl.ones(len(age_indices)-2)
     if len(sign_changes) > 0:
         change_age = sign_changes[len(sign_changes)/2]
         sign[change_age:] = -1.
     return -tau*pl.dot(pl.absolute(df[:-1]), (sign * df[:-1] < 0))
Esempio n. 24
0
def _calculate_spectra_sussix(sx, sy, Q_x, Q_y, Q_s, n_lines):

    n_turns, n_files = sx.shape

    # Allocate memory for output.        
    oxx, axx = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))
    oyy, ayy = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))

    # Initialise Sussix object.
    SX = PySussix.Sussix()
    
    x, xp, y, yp = sx.real, sx.imag, sy.real, sy.imag
    for file_i in xrange(n_files):
        SX.sussix_inp(nt1=1, nt2=n_turns, idam=2, ir=0, tunex=Q_x[file_i] % 1, tuney=Q_y[file_i] % 1)
        SX.sussix(x[:,file_i], xp[:,file_i], y[:,file_i], yp[:,file_i], sx[:,file_i], sx[:,file_i])

        # Amplitude normalisation
        SX.ax /= plt.amax(SX.ax)
        SX.ay /= plt.amax(SX.ay)

        # Tunes
        SX.ox = plt.absolute(SX.ox)
        SX.oy = plt.absolute(SX.oy)
        if file_i==0:
            tunexsx = SX.ox[plt.argmax(SX.ax)]
            tuneysx = SX.oy[plt.argmax(SX.ay)]
            print "\n*** Tunes from Sussix"
            print "    tunex", tunexsx, ", tuney", tuneysx, "\n"

        # Tune normalisation
        SX.ox = (SX.ox - (Q_x[file_i] % 1)) / Q_s[file_i]
        SX.oy = (SX.oy - (Q_y[file_i] % 1)) / Q_s[file_i]
    
        # Sort
        CX = plt.rec.fromarrays([SX.ox, SX.ax], names='ox, ax')
        CX.sort(order='ax')
        CY = plt.rec.fromarrays([SX.oy, SX.ay], names='oy, ay')
        CY.sort(order='ay')
        ox, ax, oy, ay = CX.ox, CX.ax, CY.oy, CY.ay
        oxx[:,file_i], axx[:,file_i], oyy[:,file_i], ayy[:,file_i] = ox, ax, oy, ay

    spectra = {}
    spectra['horizontal'] = (oxx, axx)
    spectra['vertical']   = (oyy, ayy)
        
    return spectra
Esempio n. 25
0
def store_results(dm, area, sex, year):
    types_to_plot = 'p i r rr'.split()

    graphics.plot_convergence_diag(dm.vars)
    pl.clf()
    for i, t in enumerate(types_to_plot):
        pl.subplot(len(types_to_plot), 1, i + 1)
        graphics.plot_data_bars(dm.model.get_data(t))
        pl.plot(range(101),
                dm.emp_priors[t, 'mu'],
                linestyle='dashed',
                color='grey',
                label='Emp. Prior',
                linewidth=3)
        pl.plot(range(101), dm.true[t], 'b-', label='Truth', linewidth=3)
        pl.plot(range(101),
                dm.posteriors[t].mean(0),
                'r-',
                label='Estimate',
                linewidth=3)

        pl.errorbar(range(101),
                    dm.posteriors[t].mean(0),
                    yerr=1.96 * dm.posteriors[t].std(0),
                    fmt='r-',
                    linewidth=1,
                    capsize=0)

        pl.ylabel(t)
        graphics.expand_axis()

    pl.legend(loc=(0., -.95), fancybox=True, shadow=True)
    pl.subplots_adjust(hspace=0, left=.1, right=.95, bottom=.2, top=.95)
    pl.xlabel('Age (Years)')
    pl.show()

    model = dm
    model.mu = pandas.DataFrame()
    for t in types_to_plot:
        model.mu = model.mu.append(pandas.DataFrame(
            dict(true=dm.true[t],
                 mu_pred=dm.posteriors[t].mean(0),
                 sigma_pred=dm.posteriors[t].std(0))),
                                   ignore_index=True)
    data_simulation.add_quality_metrics(model.mu)
    print '\nparam prediction bias: %.5f, MARE: %.3f, coverage: %.2f' % (
        model.mu['abs_err'].mean(),
        pl.median(pl.absolute(
            model.mu['rel_err'].dropna())), model.mu['covered?'].mean())
    print

    data_simulation.initialize_results(model)
    data_simulation.add_to_results(model, 'mu')
    data_simulation.finalize_results(model)

    print model.results

    return model
Esempio n. 26
0
def Concurrence(n, m, v, w, chi, phi, a):
    """Returns two times the absolute value of the determinant of the matrix of scattering amplitudes.
    
    Used as a measure of entanglement."""
    om = AmpOneMin(n, m, v, w, chi, phi, a)
    op = AmpOnePlus(n, m, v, w, chi, phi, a)
    tm = AmpTwoMin(n, m, v, w, chi, phi, a)
    tp = AmpTwoPlus(n, m, v, w, chi, phi, a)
    return 2 * py.absolute(op * tm - om * tp) / AmpTotSq(n, m, v, w)
Esempio n. 27
0
 def update(self, data):
   blue=pl.less(data,0.) # Fill in True where less than 0.0
   red=~blue # Reverse of the above
   #Blue
   self.image[...,2][blue]=pl.minimum(pl.absolute(pl.divide(data[blue],255.)),1.)
   #Red -- Max 40C, so we increase the intensity of the red color 6 times
   self.image[...,0][red]=pl.minimum(1.,pl.divide(pl.multiply(data[red],6.),255.))
   pl.imshow(self.image)
   pl.draw()
Esempio n. 28
0
def xyamb(xytab,qu,xyout=''):

    mytb=taskinit.tbtool()

    if not isinstance(qu,tuple):
        raise Exception,'qu must be a tuple: (Q,U)'

    if xyout=='':
        xyout=xytab
    if xyout!=xytab:
        os.system('cp -r '+xytab+' '+xyout)

    QUexp=complex(qu[0],qu[1])
    print 'Expected QU = ',qu   # , '  (',pl.angle(QUexp)*180/pi,')'

    mytb.open(xyout,nomodify=False)

    QU=mytb.getkeyword('QU')['QU']
    P=pl.sqrt(QU[0,:]**2+QU[1,:]**2)

    nspw=P.shape[0]
    for ispw in range(nspw):
        st=mytb.query('SPECTRAL_WINDOW_ID=='+str(ispw))
        if (st.nrows()>0):
            q=QU[0,ispw]
            u=QU[1,ispw]
            qufound=complex(q,u)
            c=st.getcol('CPARAM')
            fl=st.getcol('FLAG')
            xyph0=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
            print 'Spw = '+str(ispw)+': Found QU = '+str(QU[:,ispw])  # +'   ('+str(pl.angle(qufound)*180/pi)+')'
            #if ( (abs(q)>0.0 and abs(qu[0])>0.0 and (q/qu[0])<0.0) or
            #     (abs(u)>0.0 and abs(qu[1])>0.0 and (u/qu[1])<0.0) ):
            if ( pl.absolute(pl.angle(qufound/QUexp)*180/pi)>90.0 ):
                c[0,:,:]*=-1.0
                xyph1=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
                st.putcol('CPARAM',c)
                QU[:,ispw]*=-1
                print '   ...CONVERTING X-Y phase from '+str(xyph0)+' to '+str(xyph1)+' deg'
            else:
                print '      ...KEEPING X-Y phase '+str(xyph0)+' deg'
            st.close()
    QUr={}
    QUr['QU']=QU
    mytb.putkeyword('QU',QUr)
    mytb.close()
    QUm=pl.mean(QU[:,P>0],1)
    QUe=pl.std(QU[:,P>0],1)
    Pm=pl.sqrt(QUm[0]**2+QUm[1]**2)
    Xm=0.5*atan2(QUm[1],QUm[0])*180/pi

    print 'Ambiguity resolved (spw mean): Q=',QUm[0],'U=',QUm[1],'(rms=',QUe[0],QUe[1],')','P=',Pm,'X=',Xm

    stokes=[1.0,QUm[0],QUm[1],0.0]
    print 'Returning the following Stokes vector: '+str(stokes)
    
    return stokes
Esempio n. 29
0
 def _power(self):
     if not self._have_stft:
         if not self._stft():
             return False
     fp = self._check_feature_params()
     self.POWER=(pylab.absolute(self.STFT)**2).sum(0)
     self._have_power=True
     if fp['verbosity']:
         print "Extracted POWER"
     return True
Esempio n. 30
0
 def unimodal_rate(f=rate, age_indices=age_indices, tau=1.e5):
     df = pl.diff(f[age_indices])
     sign_changes = pl.find((df[:-1] > NEARLY_ZERO)
                            & (df[1:] < -NEARLY_ZERO))
     sign = pl.ones(len(age_indices) - 2)
     if len(sign_changes) > 0:
         change_age = sign_changes[len(sign_changes) / 2]
         sign[change_age:] = -1.
     return -tau * pl.dot(pl.absolute(df[:-1]),
                          (sign * df[:-1] < 0))
Esempio n. 31
0
def AmpOnePlusSq(n, m, v, w, chi, phi, a):
    """Returns squared amplitude of the ( 1 -> 1,+ ) process."""
    E = Gamma(v) * m
    p = Gamma(v) * m * v
    B = 2 * E + 2 * m - w
    dot = p * py.cos(a) * SinCh(n, m, v, w)
    cross = ACrossPlusThree(n, m, v, w, chi, phi, a)
    amp = dot * B + 1j * cross
    fac = 4 * E**2 * (E + m) * (E + m - w)
    return py.absolute(amp)**2 / fac
Esempio n. 32
0
 def _power(self):
     if not self._stft():
         return False
     fp = self._check_feature_params()
     self.POWER=(P.absolute(self.STFT)**2).sum(0)
     self._have_power=True
     if self.verbosity:
         print "Extracted POWER"
     self.X=self.POWER
     return True
Esempio n. 33
0
 def _power(self):
     if not self._stft():
         return False
     fp = self._check_feature_params()
     self.POWER = (P.absolute(self.STFT)**2).sum(0)
     self._have_power = True
     if self.verbosity:
         print("Extracted POWER")
     self.X = self.POWER
     return True
Esempio n. 34
0
def plot_risetimes(a, b, **kwargs):

    # plt.ion()
    # if kwargs is not None:
    #     for key, value in kwargs.iteritems():
    #         if key == 'file_list':
    #             file_list = value
    #         if key == 'scan_line':
    #             scan_line = value
    # varray = plt.array(get_value_from_cfg(file_list, scan_line))

    n_files = a.shape[-1]
    cmap = plt.get_cmap('jet')
    c = [cmap(i) for i in plt.linspace(0, 1, n_files)]

    fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
    [ax.set_color_cycle(c) for ax in (ax1, ax2)]

    r = []
    for i in xrange(n_files):
        x, y = a[:,i], b[:,i]
        # xo, yo = x, y #, get_envelope(x, y)
        xo, yo = get_envelope(x, y)
        p = plt.polyfit(xo, np.log(yo), 1)

        # Right way to fit... a la Nicolas - the fit expert!
        l = ax1.plot(x, plt.log(plt.absolute(y)))
        lcolor = l[-1].get_color()
        ax1.plot(xo, plt.log(yo), color=lcolor, marker='o', mec=None)
        ax1.plot(x, p[1] + x * p[0], color=lcolor, ls='--', lw=3)

        l = ax2.plot(x, y)
        lcolor = l[-1].get_color()
        ax2.plot(xo, yo, 'o', color=lcolor)
        xi = plt.linspace(plt.amin(x), plt.amax(x))
        yi = plt.exp(p[1] + p[0] * xi)
        ax2.plot(xi, yi, color=lcolor, ls='--', lw=3)

        print p[1], p[0], 1 / p[0]
        # plt.draw()
        # ax1.cla()
        # ax2.cla()

        r.append(1/p[0])

    ax2.set_ylim(0, 1000)
    plt.figure(2)
    plt.plot(r, lw=3, c='purple')
    # plt.gca().set_ylim(0, 10000)

    # ax3 = plt.subplot(111)
    # ax3.semilogy(x, y)
    # ax3.semilogy(xo, yo)

    return r
Esempio n. 35
0
 def _corrIntensity(self, laserNum, intensity, dist):
     #iScale = self.calib.maxIntensity - self.calib.minIntensity
     focalOffset = 256 * (1 - self.calib.caliParams[laserNum].focalDist/13100)**2
     corrIntensity = intensity + self.calib.caliParams[laserNum].focalSlope * pl.absolute(focalOffset - 256*(1-dist/65535)**2)
     
     if corrIntensity<self.calib.minIntensity: corrIntensity = self.calib.minIntensity
     if corrIntensity>self.calib.maxIntensity: corrIntensity = self.calib.maxIntensity
     
     corrIntensity = (corrIntensity-self.calib.minIntensity)/self.calib.maxIntensity
     
     return corrIntensity
Esempio n. 36
0
    def _calculate_sussix_spectrum(self, turn, window_width):
    
        # Initialise Sussix object
        SX = PySussix.Sussix()
        SX.sussix_inp(nt1=1, nt2=window_width, idam=2, ir=0, tunex=self.q_x, tuney=self.q_y)

        tunes_x = plt.zeros(self.n_particles_to_analyse)
        tunes_y = plt.zeros(self.n_particles_to_analyse)

        n_particles_to_analyse_10th = int(self.n_particles_to_analyse/10)
        print 'Running SUSSIX analysis ...'
        for i in xrange(self.n_particles_to_analyse):
            if not i%n_particles_to_analyse_10th: print '  Particle', i
            SX.sussix(self.x[i,turn:turn+window_width], self.xp[i,turn:turn+window_width],
                      self.y[i,turn:turn+window_width], self.yp[i,turn:turn+window_width],
                      self.x[i,turn:turn+window_width], self.xp[i,turn:turn+window_width]) # this line is not used by sussix!
            tunes_x[i] = plt.absolute(SX.ox[plt.argmax(SX.ax)])
            tunes_y[i] = plt.absolute(SX.oy[plt.argmax(SX.ay)])
                
        return tunes_x, tunes_y
Esempio n. 37
0
def mare(id):
    df = upload_fits.merge_data_csvs(id)
    df['are'] = pl.absolute((df['value'] - df['mu_pred']) / df['value'])

    print 'mare by type:'
    print pl.sort(df.groupby('data_type')['are'].median())

    print
    print 'overall mare: %.3f' % df['are'].median()

    return df['are'].median()
Esempio n. 38
0
def _calculate_spectra_fft(sx, sy, Q_x, Q_y, Q_s, n_lines):

    n_turns, n_files = sx.shape
        
    # Allocate memory for output.
    oxx, axx = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))
    oyy, ayy = plt.zeros((n_lines, n_files)), plt.zeros((n_lines, n_files))

    for file_i in xrange(n_files):
        t = plt.linspace(0, 1, n_turns)
        ax = plt.absolute(plt.fft(sx[:, file_i]))
        ay = plt.absolute(plt.fft(sy[:, file_i]))

        # Amplitude normalisation
        ax /= plt.amax(ax, axis=0)
        ay /= plt.amax(ay, axis=0)
    
        # Tunes
        if file_i==0:
            tunexfft = t[plt.argmax(ax[:n_turns/2], axis=0)]
            tuneyfft = t[plt.argmax(ay[:n_turns/2], axis=0)]
            print "\n*** Tunes from FFT"
            print "    tunex:", tunexfft, ", tuney:", tuneyfft, "\n"

        # Tune normalisation
        ox = (t - (Q_x[file_i] % 1)) / Q_s[file_i]
        oy = (t - (Q_y[file_i] % 1)) / Q_s[file_i]
    
        # Sort
        CX = plt.rec.fromarrays([ox, ax], names='ox, ax')
        CX.sort(order='ax')
        CY = plt.rec.fromarrays([oy, ay], names='oy, ay')
        CY.sort(order='ay')
        ox, ax, oy, ay = CX.ox[-n_lines:], CX.ax[-n_lines:], CY.oy[-n_lines:], CY.ay[-n_lines:]
        oxx[:,file_i], axx[:,file_i], oyy[:,file_i], ayy[:,file_i] = ox, ax, oy, ay

    spectra = {}
    spectra['horizontal'] = (oxx, axx)
    spectra['vertical']   = (oyy, ayy)
        
    return spectra
Esempio n. 39
0
def img_transition(file1, file2, map_array, blk_siz=2, n=50):
    I = file2gray(file1)
    J = file2gray(file2)

    d = absolute(I - J)
    steps = linspace(d.min(), d.max(), n+1)

    for i, step in enumerate(steps):
        K = I * (d>step) + J * (d<step)
        # do something with K
        im_name = os.path.join(".", "output", "%s-%s-%02d.png" %(file1, file2, i))
        imsave(im_name, K, cmap=cm.gray)
Esempio n. 40
0
def plot_cascade(cascade, model):
    
    weights = []
    thresholds = []
    for i, stage in enumerate(cascade.stages):
        #print("stage:" , i)
        if stage.feature_type == stage.Level2DecisionTree:
            weights.append(stage.weight)
            thresholds.append(stage.cascade_threshold) 
        elif stage.feature_type == stage.Stumps:
            weights.append(stage.weight)
            thresholds.append(stage.cascade_threshold)       
        else:
            raise Exception("Received an unhandled stage.feature_type")            
    # end of "for each stage"

    for i, stage in enumerate(cascade.stages):
        #print("stage %i cascade threshold:" % i , stage.cascade_threshold)
        #print("stage %i weight:" % i , weights[i])
        pass
    
    if thresholds[0] < -1E5:
        print("The provided model seems not have a soft cascade, " \
              "skipping plot_cascade")
        return

    # create new figure    
    #fig = 
    pylab.figure()
    pylab.clf() # clear the figure
    pylab.gcf().set_facecolor("w") # set white background            
    pylab.grid(True)

    #pylab.spectral() # set the default color map    
        
    # draw the figure
    max_scores = pylab.cumsum(pylab.absolute(weights))
    pylab.plot(max_scores, label="maximum possible score")    
    pylab.plot(thresholds, label="cascade threshold")
    
    pylab.legend(loc ="upper left", fancybox=True)
    pylab.xlabel("Cascade stage")
    pylab.ylabel("Detection score")
  
    title = "Soft cascade"
    if model:
        title = "Soft cascade for model '%s' over '%s' dataset" \
                % (model.detector_name, model.training_dataset_name)
    pylab.title(title)    
    pylab.draw() 

    return
def plot_cascade(cascade, model):

    weights = []
    thresholds = []
    for i, stage in enumerate(cascade.stages):
        #print("stage:" , i)
        if stage.feature_type == stage.Level2DecisionTree:
            weights.append(stage.weight)
            thresholds.append(stage.cascade_threshold)
        elif stage.feature_type == stage.Stumps:
            weights.append(stage.weight)
            thresholds.append(stage.cascade_threshold)
        else:
            raise Exception("Received an unhandled stage.feature_type")
    # end of "for each stage"

    for i, stage in enumerate(cascade.stages):
        #print("stage %i cascade threshold:" % i , stage.cascade_threshold)
        #print("stage %i weight:" % i , weights[i])
        pass

    if thresholds[0] < -1E5:
        print("The provided model seems not have a soft cascade, " \
              "skipping plot_cascade")
        return

    # create new figure
    #fig =
    pylab.figure()
    pylab.clf()  # clear the figure
    pylab.gcf().set_facecolor("w")  # set white background
    pylab.grid(True)

    #pylab.spectral() # set the default color map

    # draw the figure
    max_scores = pylab.cumsum(pylab.absolute(weights))
    pylab.plot(max_scores, label="maximum possible score")
    pylab.plot(thresholds, label="cascade threshold")

    pylab.legend(loc="upper left", fancybox=True)
    pylab.xlabel("Cascade stage")
    pylab.ylabel("Detection score")

    title = "Soft cascade"
    if model:
        title = "Soft cascade for model '%s' over '%s' dataset" \
                % (model.detector_name, model.training_dataset_name)
    pylab.title(title)
    pylab.draw()

    return
Esempio n. 42
0
def calcaV(W,method = "ratio"):
    """Calculate aV"""
    if method == "ratio":
        return pl.log(pl.absolute(W/pl.roll(W,-1,axis=1)))
    else:
        aVs = pl.zeros(pl.shape(W))
        n = pl.arange(1,pl.size(W,axis=1)+1)
        f = lambda b,t,W: W - b[0] * pl.exp(-b[1] * t)
        
        for i in xrange(pl.size(W,axis=0)):
            params,result = optimize.leastsq(f,[1.,1.],args=(n,W[i]))
            aVs[i] = params[1] * pl.ones(pl.shape(W[i]))
            
        return aVs
Esempio n. 43
0
def calc_quality_metrics(true_cf, true_std, std_bias, preds): 
    """ 
    Calculate the CSMF accuracy, absolute error, and relative error for the 
    provided true and predicted CSMFs.
    """
    
    T, J = pl.array(true_cf).shape
    pred_cf = pl.array(preds.mean(0))
    true_cf = pl.array(true_cf)
    
    if len(true_std)==1 and len(true_cf)>1: 
        true_std = [true_std[0] for i in range(len(true_cf))]     
    
    csmf_accuracy = 1. - pl.absolute(pred_cf-true_cf).sum(1) / (2*(1-true_cf.min(1))) 
    abs_err = pl.absolute(pred_cf-true_cf)
    rel_err = pl.absolute(pred_cf-true_cf)/true_cf
    coverage = calc_coverage(true_cf, preds)
    all = pl.np.core.records.fromarrays([true_cf.ravel(), pl.array(true_std).ravel(), (pl.ones([T,J])*pl.array(std_bias)).ravel(),
                                         abs_err.ravel(), rel_err.ravel(), pl.array([[i for j in range(J)] for i in csmf_accuracy]).ravel(),
                                         coverage.ravel(), pl.array([[j for j in range(J)] for t in range(T)]).ravel(),
                                         pl.array([[t for j in range(J)] for t in range(T)]).ravel()],
                                        names=['true_cf','true_std','std_bias','abs_err','rel_err','csmf_accuracy','coverage','cause','time'])
    return all
Esempio n. 44
0
def main():
    x = pylab.randn(100)
    t0 = time.clock()
    y1 = ks_loop(x, 0.9, 10)
    t_loop = time.clock() - t0
    t0 = time.clock()
    y2 = ks(x, 0.9, 10)
    t_matrix = time.clock() - t0
    print("Loop method took %g seconds." % t_loop)
    print("Matrix method took %g seconds." % t_matrix)
    # Make sure y1 and y2 are same within very small numeric
    # error.
    assert(pylab.sum(pylab.absolute(y1 - y2)) < 1e-10)

    # Plot x and y
    pylab.figure()
    pylab.subplot(211)
    pylab.stem(x)
    pylab.ylabel('x')
    pylab.subplot(212)
    pylab.stem(y2)
    pylab.ylabel('y')
    pylab.xlabel('samples')

    print("Generating the opening chord of Hard day's night by The Beatles ...")
    Fs, T, chord = generate_cord()
    pylab.figure()
    pylab.plot(pylab.arange(0.0, T, 1.0/Fs), chord)
    pylab.xlabel('time (sec)')
    pylab.title('First Chord of Hard Days Night')
    print("Writing the chord to chord.wav ...")
    C = max(pylab.absolute(chord))
    scipy.io.wavfile.write("chord.wav", Fs,
                           pylab.int16((2**15 - 1) * chord / C))
    print("Done.")

    pylab.show()
Esempio n. 45
0
def calc_quality_metrics(true_cf, true_std, std_bias, preds):
    """ 
    Calculate the CSMF accuracy, absolute error, and relative error for the 
    provided true and predicted CSMFs.
    """

    T, J = pl.array(true_cf).shape
    pred_cf = pl.array(preds.mean(0))
    true_cf = pl.array(true_cf)

    if len(true_std) == 1 and len(true_cf) > 1:
        true_std = [true_std[0] for i in range(len(true_cf))]

    csmf_accuracy = 1. - pl.absolute(pred_cf -
                                     true_cf).sum(1) / (2 *
                                                        (1 - true_cf.min(1)))
    abs_err = pl.absolute(pred_cf - true_cf)
    rel_err = pl.absolute(pred_cf - true_cf) / true_cf
    coverage = calc_coverage(true_cf, preds)
    all = pl.np.core.records.fromarrays([
        true_cf.ravel(),
        pl.array(true_std).ravel(),
        (pl.ones([T, J]) * pl.array(std_bias)).ravel(),
        abs_err.ravel(),
        rel_err.ravel(),
        pl.array([[i for j in range(J)] for i in csmf_accuracy]).ravel(),
        coverage.ravel(),
        pl.array([[j for j in range(J)] for t in range(T)]).ravel(),
        pl.array([[t for j in range(J)] for t in range(T)]).ravel()
    ],
                                        names=[
                                            'true_cf', 'true_std', 'std_bias',
                                            'abs_err', 'rel_err',
                                            'csmf_accuracy', 'coverage',
                                            'cause', 'time'
                                        ])
    return all
Esempio n. 46
0
def validate_age_group(model, replicate):
    # set random seed for reproducibility
    mc.np.random.seed(1234567 + replicate)

    N = 30
    delta_true = 5.0
    pi_true = true_rate_function
    m = simulate_age_group_data(N=N, delta_true=delta_true, pi_true=pi_true)

    if model == "midpoint_covariate":
        fit_midpoint_covariate_model(m)
    if model == "alt_midpoint_covariate":
        fit_alt_midpoint_covariate_model(m)
    elif model == "age_standardizing":
        fit_age_standardizing_model(m)
    elif model == "age_integrating":
        fit_age_integrating_model(m)
    elif model == "midpoint_model":
        fit_midpoint_model(m)
    elif model == "disaggregation_model":
        fit_disaggregation_model(m)
    else:
        raise TypeError, 'Unknown model type: "%s"' % model

    # compare estimate to ground truth
    import data_simulation

    m.mu = pandas.DataFrame(
        dict(
            true=[pi_true(a) for a in range(101)],
            mu_pred=m.vars["mu_age"].stats()["mean"],
            sigma_pred=m.vars["mu_age"].stats()["standard deviation"],
        )
    )
    data_simulation.add_quality_metrics(m.mu)
    print "\nparam prediction bias: %.5f, MARE: %.3f, coverage: %.2f" % (
        m.mu["abs_err"].mean(),
        pl.median(pl.absolute(m.mu["rel_err"].dropna())),
        m.mu["covered?"].mean(),
    )
    print

    data_simulation.add_quality_metrics(m.mu)

    data_simulation.initialize_results(m)
    data_simulation.add_to_results(m, "mu")
    data_simulation.finalize_results(m)

    return m
Esempio n. 47
0
def amptime(uv, baseline="0_1", pol="xx", applycal=False):
	'''
	Plots Amp vs Time for a single baseline. 
	'''
	fig = pl.figure()
	ax = fig.add_subplot(111)
	aipy.scripting.uv_selector(uv, baseline, pol)
	for preamble, data, flags in uv.all(raw=True):
		uvw, t, (i, j) = preamble
		ax.plot(t, pl.average(pl.absolute(data)), 'ks', mec='None', alpha=0.2, ms=5)
	hfmt = dates.DateFormatter('%m/%d %H:%M')
	ax.xaxis.set_major_locator(dates.HourLocator())
	ax.xaxis.set_major_formatter(hfmt)
	ax.set_ylim(bottom = 0)
	pl.xticks(rotation='vertical')	
Esempio n. 48
0
def fit(model):
    emp_priors = model.emp_priors

    ## Then fit the model and compare the estimates to the truth
    model.vars = {}
    model.vars['p'] = data_model.data_model('p', model, 'p', 'all', 'total', 'all', None, emp_priors['p', 'mu'], emp_priors['p', 'sigma'])
    model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'], iter=5000, burn=2000, thin=25, tune_interval=100)
    #model.map, model.mcmc = fit_model.fit_data_model(model.vars['p'], iter=101, burn=0, thin=1, tune_interval=100)

    #graphics.plot_one_ppc(model.vars['p'], 'p')
    #graphics.plot_convergence_diag(model.vars)
    graphics.plot_one_type(model, model.vars['p'], emp_priors, 'p')
    pl.plot(model.a, model.pi_age_true, 'b--', linewidth=3, alpha=.5, label='Truth')
    pl.legend(fancybox=True, shadow=True, loc='upper left')
    pl.title('Heterogeneity %s'%model.parameters['p']['heterogeneity'])

    pl.show()

    model.input_data['mu_pred'] = model.vars['p']['p_pred'].stats()['mean']
    model.input_data['sigma_pred'] = model.vars['p']['p_pred'].stats()['standard deviation']
    data_simulation.add_quality_metrics(model.input_data)

    model.delta = pandas.DataFrame(dict(true=[model.delta_true]))
    model.delta['mu_pred'] = pl.exp(model.vars['p']['eta'].trace()).mean()
    model.delta['sigma_pred'] = pl.exp(model.vars['p']['eta'].trace()).std()
    data_simulation.add_quality_metrics(model.delta)

    print 'delta'
    print model.delta

    print '\ndata prediction bias: %.5f, MARE: %.3f, coverage: %.2f' % (model.input_data['abs_err'].mean(),
                                                     pl.median(pl.absolute(model.input_data['rel_err'].dropna())),
                                                                       model.input_data['covered?'].mean())

    model.mu = pandas.DataFrame(dict(true=model.pi_age_true,
                                     mu_pred=model.vars['p']['mu_age'].stats()['mean'],
                                     sigma_pred=model.vars['p']['mu_age'].stats()['standard deviation']))
    data_simulation.add_quality_metrics(model.mu)

    data_simulation.initialize_results(model)
    data_simulation.add_to_results(model, 'delta')
    data_simulation.add_to_results(model, 'mu')
    data_simulation.add_to_results(model, 'input_data')
    data_simulation.finalize_results(model)

    print model.results
Esempio n. 49
0
    def _corrIntensity(self, laserNum, intensity, dist):
        #iScale = self.calib.maxIntensity - self.calib.minIntensity
        focalOffset = 256 * (
            1 - self.calib.caliParams[laserNum].focalDist / 13100)**2
        corrIntensity = intensity + self.calib.caliParams[
            laserNum].focalSlope * pl.absolute(focalOffset - 256 *
                                               (1 - dist / 65535)**2)

        if corrIntensity < self.calib.minIntensity:
            corrIntensity = self.calib.minIntensity
        if corrIntensity > self.calib.maxIntensity:
            corrIntensity = self.calib.maxIntensity

        corrIntensity = (corrIntensity -
                         self.calib.minIntensity) / self.calib.maxIntensity

        return corrIntensity
Esempio n. 50
0
def calculate_1d_sussix_spectrum(turn, window_width, x, xp, qx):
    macroparticlenumber = len(x)
    tunes = plt.zeros(macroparticlenumber)

    # Initialise Sussix object
    SX = PySussix.Sussix()
    SX.sussix_inp(nt1=1, nt2=window_width, idam=1, ir=0, tunex=qx)

    n_particles_to_analyse_10th = int(macroparticlenumber/10)
    print 'Running SUSSIX analysis ...'
    for i in xrange(macroparticlenumber):
        if not i%n_particles_to_analyse_10th: print '  Particle', i
        SX.sussix(x[i,turn:turn+window_width], xp[i,turn:turn+window_width],
                  x[i,turn:turn+window_width], xp[i,turn:turn+window_width],
                  x[i,turn:turn+window_width], xp[i,turn:turn+window_width]) # this line is not used by sussix!
        tunes[i] = plt.absolute(SX.ox[plt.argmax(SX.ax)])

    return tunes
Esempio n. 51
0
def validate_age_group(model, replicate):
    # set random seed for reproducibility
    mc.np.random.seed(1234567 + replicate)

    N = 30
    delta_true = 5.
    pi_true = true_rate_function
    m = simulate_age_group_data(N=N, delta_true=delta_true, pi_true=pi_true)

    if model == 'midpoint_covariate':
        fit_midpoint_covariate_model(m)
    if model == 'alt_midpoint_covariate':
        fit_alt_midpoint_covariate_model(m)
    elif model == 'age_standardizing':
        fit_age_standardizing_model(m)
    elif model == 'age_integrating':
        fit_age_integrating_model(m)
    elif model == 'midpoint_model':
        fit_midpoint_model(m)
    elif model == 'disaggregation_model':
        fit_disaggregation_model(m)
    else:
        raise TypeError, 'Unknown model type: "%s"' % model

    # compare estimate to ground truth
    import data_simulation
    m.mu = pandas.DataFrame(
        dict(true=[pi_true(a) for a in range(101)],
             mu_pred=m.vars['mu_age'].stats()['mean'],
             sigma_pred=m.vars['mu_age'].stats()['standard deviation']))
    data_simulation.add_quality_metrics(m.mu)
    print '\nparam prediction bias: %.5f, MARE: %.3f, coverage: %.2f' % (
        m.mu['abs_err'].mean(), pl.median(pl.absolute(
            m.mu['rel_err'].dropna())), m.mu['covered?'].mean())
    print

    data_simulation.add_quality_metrics(m.mu)

    data_simulation.initialize_results(m)
    data_simulation.add_to_results(m, 'mu')
    data_simulation.finalize_results(m)

    return m
Esempio n. 52
0
def validate_age_group(model, replicate):
    # set random seed for reproducibility
    mc.np.random.seed(1234567+replicate)

    N = 30
    delta_true = 5.
    pi_true = true_rate_function
    m = simulate_age_group_data(N=N, delta_true=delta_true, pi_true=pi_true)
    
    if model == 'midpoint_covariate':
        fit_midpoint_covariate_model(m)
    elif model == 'age_standardizing':
        fit_age_standardizing_model(m)
    elif model == 'age_integrating':
        fit_age_integrating_model(m)
    elif model == 'midpoint_model':
        fit_midpoint_model(m)
    elif model == 'disaggregation_model':
        fit_disaggregation_model(m)
    else:
        raise TypeError, 'Unknown model type: "%s"' % model


    # compare estimate to ground truth
    import data_simulation
    m.mu = pandas.DataFrame(dict(true=[pi_true(a) for a in range(101)],
                                 mu_pred=m.vars['mu_age'].stats()['mean'],
                                 lb_pred=m.vars['mu_age'].stats()['95% HPD interval'][:,0],
                                 ub_pred=m.vars['mu_age'].stats()['95% HPD interval'][:,1]))
    data_simulation.add_quality_metrics(m.mu)
    print '\nparam prediction bias: %.5f, MARE: %.3f, coverage: %.2f' % (m.mu['abs_err'].mean(),
                                                                         pl.median(pl.absolute(m.mu['rel_err'].dropna())),
                                                                         m.mu['covered?'].mean())
    print


    data_simulation.add_quality_metrics(m.mu)

    data_simulation.initialize_results(m)
    data_simulation.add_to_results(m, 'mu')
    data_simulation.finalize_results(m)

    return m
Esempio n. 53
0
def filter_cosmic_rays(spectrum, error_thr=10., filter_size=5):
    """
    Filters out cosmic rays from a 1D spectrum (simple spike detection algorithm).
    
    Args:
        spectrum (numpy array): a 1D numpy array with spectrum data
        error_thr (float, optional): spike detection threshold. If the distance from smoothened spectrum to real 
        spectrum is greater than error_thr, data will be replaced by smoothened spectrum.
        filter_size (int, optional): number of pixels of the smoothening window.
    
    Returns:
        numpy array: the spectrum corrected (spikes removed).
    """

    spectrum_smooth = scipy.signal.medfilt(spectrum, filter_size)
    bad_pixels = pl.absolute(spectrum - spectrum_smooth) > float(error_thr)
    spectrum_corr = spectrum.copy()
    spectrum_corr[bad_pixels] = spectrum_smooth[bad_pixels]
    
    return spectrum_corr
Esempio n. 54
0
    def _cqft_intensified(self):
        """
        ::

            Constant-Q Fourier transform using only max abs(STFT) value in each band
        """
        if not self._have_stft:
            if not self._stft():
                return False
        self._make_log_freq_map()
        r,b=self.Q.shape
        b,c=self.STFT.shape
        self.CQFT=P.zeros((r,c))
        for i in P.arange(r):
            for j in P.arange(c):
                self.CQFT[i,j] = (self.Q[i,:]*P.absolute(self.STFT[:,j])).max()
        self._have_cqft=True
        self._is_intensified=True
        self.inverse=self.icqft
        self.X=self.CQFT
        return True
Esempio n. 55
0
 def start(self):
     self.format_data()
     self.y_data = gen(self.shape_cb.currentText()) * pl.absolute(
         self.real_high - self.real_low) / 2 + self.real_off
     self.x_data = pl.linspace(0, 1 / self.real_freq, pl.size(self.y_data))
     self.ax.clear()
     self.ax.plot(self.x_data * 1000, self.y_data)
     print len(self.x_data)
     print len(self.y_data)
     self.canvas.draw()
     if mode == 'has_visa':
         self.inst.write('source1:function:shape ' +
                         self.shape_cb.currentText())
         self.inst.write('source1:frequency:fixed ' +
                         self.freq_text.text() + self.freq_cb.currentText())
         self.inst.write('source1:voltage:level:immediate:high ' +
                         self.high_text.text() + self.high_cb.currentText())
         self.inst.write('source1:voltage:level:immediate:low ' +
                         self.low_text.text() + self.low_cb.currentText())
         self.inst.write('source1:voltage:level:immediate:offset ' +
                         self.off_text.text() + self.off_cb.currentText())
Esempio n. 56
0
    def _cqft(self):
        """
        ::

            Constant-Q Fourier transform.
        """

        if not self._power():
            return False
        fp = self._check_feature_params()
        if self.intensify:
            self._cqft_intensified()
        else:
            self._make_log_freq_map()
            self.CQFT=P.sqrt(P.array(P.mat(self.Q)*P.mat(P.absolute(self.STFT)**2)))
            self._is_intensified=False
        self._have_cqft=True
        if self.verbosity:
            print "Extracted CQFT: intensified=%d" %self._is_intensified
        self.inverse=self.icqft
        self.X=self.CQFT
        return True
Esempio n. 57
0
    def CalculateGrowthInternal(self, times, levels):
        res_mat = self.CalculateRates(times, levels)
        max_i = self.FindMaximumGrowthRate(res_mat)

        t_mat = pylab.matrix(times).T
        count_matrix = pylab.matrix(levels).T
        norm_counts = count_matrix - min(levels)

        abs_res_mat = pylab.array(res_mat)
        abs_res_mat[:, 0] = pylab.absolute(res_mat[:, 0])
        order = abs_res_mat[:, 0].argsort(axis=0)
        stationary_indices = filter(lambda x: x >= max_i, order)
        stationary_indices = pylab.array(
            filter(lambda x: res_mat[x, 3] > 0, stationary_indices))

        stationary_level = 0.0
        if stationary_indices.any():
            stationary_level = res_mat[stationary_indices[0], 3]

        pylab.hold(True)
        pylab.plot(times, norm_counts)
        pylab.plot(times, res_mat[:, 0])
        pylab.plot([0, times.max()], [self.minimum_level, self.minimum_level],
                   'r--')
        pylab.plot([0, times.max()], [self.maximum_level, self.maximum_level],
                   'r--')
        i_range = range(max_i, max_i + self.window_size)

        x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))])
        y = x * pylab.matrix(res_mat[max_i, 0:2]).T
        pylab.plot(x[:, 0], pylab.exp(y), 'k:', linewidth=4)

        #pylab.plot([0, max(times)], [stationary_level, stationary_level], 'k-')

        pylab.yscale('log')
        pylab.legend(['OD', 'growth rate', 'threshold', 'fit'])
        #, 'stationary'])

        return res_mat[max_i, 0], stationary_level