Example #1
0
def generate_smooth_gp_re_a(out_fname='data.csv', country_variation=True):
    """ Generate random data based on a nested gaussian process random
    effects model with age, with covariates that vary smoothly over
    time (where unexplained variation in time does not interact with
    unexplained variation in age)

    This function generates data for all countries in all regions, and
    all age groups based on the model::

        Y_r,c,t = beta * X_r,c,t + f_r(t) + g_r(a) + f_c(t)

        beta = [30., -.5, .1, .1, -.1, 0., 0., 0., 0., 0.]
        f_r ~ GP(0, C(3.))
        g_r ~ GP(0, C(2.))
        f_c ~ GP(0, C(1.)) or 0 depending on country_variation flag
        C(amp) = Matern(amp, scale=20., diff_degree=2)

        X_r,c,t[0] = 1
        X_r,c,t[1] = t - 1990.
        X_r,c,t[k] ~ GP(t; 0, C(1)) for k >= 2
    """
    c4 = countries_by_region()

    data = col_names()

    beta = [30., -.5, .1, .1, -.1, 0., 0., 0., 0., 0.]
    C0 = gp.matern.euclidean(time_range, time_range, amp=1., scale=25., diff_degree=2)
    C1 = gp.matern.euclidean(age_range, age_range, amp=1., scale=25., diff_degree=2)
    C2 = gp.matern.euclidean(time_range, time_range, amp=.1, scale=25., diff_degree=2)
    C3 = gp.matern.euclidean(time_range, time_range, amp=1., scale=25., diff_degree=2)

    g = mc.rmv_normal_cov(pl.zeros_like(age_range), C1)
    for r in c4:
        f_r = mc.rmv_normal_cov(pl.zeros_like(time_range), C0)
        g_r = mc.rmv_normal_cov(g, C1)
        for c in c4[r]:
            f_c = mc.rmv_normal_cov(pl.zeros_like(time_range), C2)

            x_gp = {}
            for k in range(2,10):
                x_gp[k] = mc.rmv_normal_cov(pl.zeros_like(time_range), C3)

            for j, t in enumerate(time_range):
                for i, a in enumerate(age_range):
                    x = [1] + [j] + [x_gp[k][j] for k in range(2,10)]
                    y = float(pl.dot(beta, x)) + f_r[j] + g_r[i]
                    if country_variation:
                        y += f_c[j]
                    se = 0.
                    data.append([r, c, t, a, y, se] + list(x))
    write(data, out_fname)
Example #2
0
File: data.py Project: flaxter/gbd
def generate_smooth_gp_re_a(out_fname='data.csv', country_variation=True):
    """ Generate random data based on a nested gaussian process random
    effects model with age, with covariates that vary smoothly over
    time (where unexplained variation in time does not interact with
    unexplained variation in age)

    This function generates data for all countries in all regions, and
    all age groups based on the model::

        Y_r,c,t = beta * X_r,c,t + f_r(t) + g_r(a) + f_c(t)

        beta = [30., -.5, .1, .1, -.1, 0., 0., 0., 0., 0.]
        f_r ~ GP(0, C(3.))
        g_r ~ GP(0, C(2.))
        f_c ~ GP(0, C(1.)) or 0 depending on country_variation flag
        C(amp) = Matern(amp, scale=20., diff_degree=2)

        X_r,c,t[0] = 1
        X_r,c,t[1] = t - 1990.
        X_r,c,t[k] ~ GP(t; 0, C(1)) for k >= 2
    """
    c4 = countries_by_region()

    data = col_names()

    beta = [30., -.5, .1, .1, -.1, 0., 0., 0., 0., 0.]
    C0 = gp.matern.euclidean(time_range, time_range, amp=1., scale=25., diff_degree=2)
    C1 = gp.matern.euclidean(age_range, age_range, amp=1., scale=25., diff_degree=2)
    C2 = gp.matern.euclidean(time_range, time_range, amp=.1, scale=25., diff_degree=2)
    C3 = gp.matern.euclidean(time_range, time_range, amp=1., scale=25., diff_degree=2)

    g = mc.rmv_normal_cov(pl.zeros_like(age_range), C1)
    for r in c4:
        f_r = mc.rmv_normal_cov(pl.zeros_like(time_range), C0)
        g_r = mc.rmv_normal_cov(g, C1)
        for c in c4[r]:
            f_c = mc.rmv_normal_cov(pl.zeros_like(time_range), C2)

            x_gp = {}
            for k in range(2,10):
                x_gp[k] = mc.rmv_normal_cov(pl.zeros_like(time_range), C3)

            for j, t in enumerate(time_range):
                for i, a in enumerate(age_range):
                    x = [1] + [j] + [x_gp[k][j] for k in range(2,10)]
                    y = float(pl.dot(beta, x)) + f_r[j] + g_r[i]
                    if country_variation:
                        y += f_c[j]
                    se = 0.
                    data.append([r, c, t, a, y, se] + list(x))
    write(data, out_fname)
Example #3
0
def plotclusters(labels, imap):
    outm = pl.zeros_like(imap)
    for i in range(labels.max() + 1):
        pixs = pl.where(labels == i)
        outm[pixs] = imap[pixs].mean()

    return outm
Example #4
0
 def plot(self, plot_range):
     u = pb.linspace(-plot_range, plot_range, 1000)
     z = pb.zeros_like(u)
     for i, j in enumerate(u):
         z[i] = self(j)
     pb.plot(u, z)
     pb.show()
Example #5
0
def dist2(x):
  R, ETA = pylab.meshgrid(r[r<fit_rcutoff], eta)
  g = pylab.zeros_like(ETA)
  g = evalg(x, ETA, R)
  gfit = pylab.reshape(g, len(eta)*len(r[r<fit_rcutoff]))
  return gfit - pylab.reshape([g[r<fit_rcutoff] for g in ghs],
                              len(eta)*len(r[r<fit_rcutoff]))
def linearSL(phiOld, c, nt):
    "Semi-Lagrangian advection of profile in phiOld using"
    "linear interpolation"

    # the number of independent points
    nx = len(phiOld) - 1
    # add another wrap-around point for cyclic boundaries
    phiOld = pl.append(phiOld, [phiOld[1]])

    # new time-step arrays for phi
    phi = pl.zeros_like(phiOld)

    # loop over the time steps
    for it in xrange(1, nt + 1):
        # loop over the grid-points
        for j in xrange(1, len(phi) - 1):
            # The index of the point to the left of the departure point
            # (wrap around if less that zero)
            k = pl.floor(j - c) % nx
            # the location of the departure point within interval k->k+1
            beta = (-c) % 1

            # Linear interpolation onto the departure point
            phi[j] = (1 - beta) * phiOld[k] + beta * phiOld[k + 1]

        # cyclic BCs
        phi[0] = phi[-2]
        phi[-1] = phi[1]

        # update arrays
        phiOld = phi.copy()

    # return phi (without the cyclic wrap-around point)
    return phi[0:len(phi) - 1]
Example #7
0
def dS_dX(x0, PR, h_mag = .0005):
    """
    calculates the Jacobian of the SLIP at the given point x0,
    with PR beeing the parameters for that step
    coordinates under consideration are:
        y
        vx
        vz
    only for a single step!
    """
    df = []
    for dim in range(len(x0)):
        delta = zeros_like(x0)
        delta[dim] = 1.            
        h = h_mag * delta      
        # in positive direction           
        resRp = sl.SLIP_step3D(x0 + h, PR)
        SRp = array([resRp['y'][-1], resRp['vx'][-1], resRp['vz'][-1]])
        #fhp = array(SR2 - x0)
        # in negative direction
        resRn = sl.SLIP_step3D(x0 - h, PR)
        SRn = array([resRn['y'][-1], resRn['vx'][-1], resRn['vz'][-1]])
        #fhn = array(SR2 - x0)
        # derivative: difference quotient
        df.append( (SRp - SRn)/(2.*h_mag) )
    
    return vstack(df).T
Example #8
0
def dist2(x):
  R, ETA = pylab.meshgrid(r[r<fit_rcutoff], eta)
  g = pylab.zeros_like(ETA)
  g = evalg(x, ETA, R)
  gfit = pylab.reshape(g, len(eta)*len(r[r<fit_rcutoff]))
  return gfit - pylab.reshape([g[r<fit_rcutoff] for g in ghs],
                              len(eta)*len(r[r<fit_rcutoff]))
Example #9
0
def dist2(x):
    R, GSIGMAS = pylab.meshgrid(r[r < fit_rcutoff], gsigmas)
    g = pylab.zeros_like(GSIGMAS)
    g = evalg(x, GSIGMAS, R)
    gfit = pylab.reshape(g, len(eta) * len(r[r < fit_rcutoff]))
    return gfit - pylab.reshape([g[r < fit_rcutoff] for g in ghs],
                                len(gsigmas) * len(r[r < fit_rcutoff]))
Example #10
0
def plot_count(fname, dpi=70):
    # Load data
    date, libxc, c, code, test, doc = np.loadtxt(fname, unpack=True)
    zero = pl.zeros_like(date)

    fig = pl.figure(1, figsize=(10, 5), dpi=dpi)
    ax = fig.add_subplot(111)
    polygon(date,
            c + code + test,
            c + code + test + doc,
            facecolor='m',
            label='Documentation')
    polygon(date, c + code, c + code + test, facecolor='y', label='Tests')
    polygon(date, c, c + code, facecolor='g', label='Python-code')
    polygon(date, zero, c, facecolor='r', label='C-code')
    polygon(date, zero, zero, facecolor='b', label='Fortran-code')

    months = pl.MonthLocator()
    months4 = pl.MonthLocator(interval=4)
    month_year_fmt = pl.DateFormatter("%b '%y")

    ax.xaxis.set_major_locator(months4)
    ax.xaxis.set_minor_locator(months)
    ax.xaxis.set_major_formatter(month_year_fmt)
    labels = ax.get_xticklabels()
    pl.setp(labels, rotation=30)
    pl.axis('tight')
    pl.legend(loc='upper left')
    pl.title('Number of lines')
    pl.savefig(fname.split('.')[0] + '.png', dpi=dpi)
Example #11
0
def dS_dX(x0, PR, h_mag=.0005):
    """
    calculates the Jacobian of the SLIP at the given point x0,
    with PR beeing the parameters for that step
    coordinates under consideration are:
        y
        vx
        vz
    only for a single step!
    """
    df = []
    for dim in range(len(x0)):
        delta = zeros_like(x0)
        delta[dim] = 1.
        h = h_mag * delta
        # in positive direction
        resRp = sl.SLIP_step3D(x0 + h, PR)
        SRp = array([resRp['y'][-1], resRp['vx'][-1], resRp['vz'][-1]])
        #fhp = array(SR2 - x0)
        # in negative direction
        resRn = sl.SLIP_step3D(x0 - h, PR)
        SRn = array([resRn['y'][-1], resRn['vx'][-1], resRn['vz'][-1]])
        #fhn = array(SR2 - x0)
        # derivative: difference quotient
        df.append((SRp - SRn) / (2. * h_mag))

    return vstack(df).T
Example #12
0
def show_grey_channels(I):
    K = average(I, axis=2)
    for i in range(3):
        J = zeros_like(I)
        J[:, :, i] = K
        figure(i+10)
        imshow(J)
Example #13
0
def bad_model(X):
    """ Results in a matrix with shape matching X, but all rows sum to 1"""
    N, T, J = X.shape
    Y = pl.zeros_like(X)
    for t in range(T):
        Y[:,t,:] = X[:,t,:] / pl.outer(pl.array(X[:,t,:]).sum(axis=1), pl.ones(J))
    return Y.view(pl.recarray) 
Example #14
0
def plot_count(fname, dpi=70):
    # Load data
    date, libxc, c, code, test, doc = np.loadtxt(fname, unpack=True)
    zero = pl.zeros_like(date)

    fig = pl.figure(1, figsize=(10, 5), dpi=dpi)
    ax = fig.add_subplot(111)
    polygon(date, c + code + test, c + code + test + doc,
             facecolor='m', label='Documentation')
    polygon(date, c + code, c + code + test,
             facecolor='y', label='Tests')
    polygon(date, c, c + code,
            facecolor='g', label='Python-code')
    polygon(date, zero, c,
            facecolor='r', label='C-code')
    polygon(date, zero, zero,
            facecolor='b', label='Fortran-code')

    months = pl.MonthLocator()
    months4 = pl.MonthLocator(interval=4)
    month_year_fmt = pl.DateFormatter("%b '%y")

    ax.xaxis.set_major_locator(months4)
    ax.xaxis.set_minor_locator(months)
    ax.xaxis.set_major_formatter(month_year_fmt)
    labels = ax.get_xticklabels()
    pl.setp(labels, rotation=30)
    pl.axis('tight')
    pl.legend(loc='upper left')
    pl.title('Number of lines')
    pl.savefig(fname.split('.')[0] + '.png', dpi=dpi)
Example #15
0
def plot_count(fname, dpi=70):
    # Load data
    date, libxc, c, code, test, doc = np.loadtxt(fname, unpack=True)
    zero = pl.zeros_like(date)

    fig = pl.figure(1, figsize=(10, 5), dpi=dpi)
    ax = fig.add_subplot(111)
    polygon(date, c + libxc + code + test, c + libxc + code + test + doc, facecolor="m", label="Documentation")
    polygon(date, c + libxc + code, c + libxc + code + test, facecolor="y", label="Tests")
    polygon(date, c + libxc, c + libxc + code, facecolor="g", label="Python-code")
    polygon(date, c, c + libxc, facecolor="c", label="LibXC-code")
    polygon(date, zero, c, facecolor="r", label="C-code")
    polygon(date, zero, zero, facecolor="b", label="Fortran-code")

    months = pl.MonthLocator()
    months4 = pl.MonthLocator(interval=4)
    month_year_fmt = pl.DateFormatter("%b '%y")

    ax.xaxis.set_major_locator(months4)
    ax.xaxis.set_minor_locator(months)
    ax.xaxis.set_major_formatter(month_year_fmt)
    labels = ax.get_xticklabels()
    pl.setp(labels, rotation=30)
    pl.axis("tight")
    pl.legend(loc="upper left")
    pl.title("Number of lines")
    pl.savefig(fname.split(".")[0] + ".png", dpi=dpi)
def plot(f, x_start, x_end, x_interval=None, epsilon=0.15):
    if x_interval is None:
        x_interval = x_end - x_start

    x = py.arange(x_start, x_end + 0.1 * x_interval, x_interval)
    # y = x^2
    py.plot(x, f(x), 'ko', label='$y=x^2-10$')
    # y = 0
    py.plot(x, py.zeros_like(x), 'ro', label='$y=0$')

    # +/- epsilon
    py.plot(x, epsilon * py.ones_like(x), 'r-.', label=r'$+\epsilon$')
    py.plot(x, -epsilon * py.ones_like(x), 'r--', label=r'$-\epsilon$')

    # x 축 이름표
    # x axis label
    py.xlabel('x')

    # y 축 이름표
    # y axis label
    py.ylabel('y')

    # 범례 표시
    # Show legend
    py.legend()

    # 모눈 표시
    # Indicate grid
    py.grid()

    return x
Example #17
0
def dist2(x):
  R, GSIGMAS = pylab.meshgrid(r[r<fit_rcutoff], gsigmas)
  g = pylab.zeros_like(GSIGMAS)
  g = evalg(x, GSIGMAS, R)
  gfit = pylab.reshape(g, len(eta)*len(r[r<fit_rcutoff]))
  return gfit - pylab.reshape([g[r<fit_rcutoff] for g in ghs],
                              len(gsigmas)*len(r[r<fit_rcutoff]))
Example #18
0
def bad_model(X):
    """ Results in a matrix with shape matching X, but all rows sum to 1"""
    N, T, J = X.shape
    Y = pl.zeros_like(X)
    for t in range(T):
        Y[:, t, :] = X[:, t, :] / pl.outer(
            pl.array(X[:, t, :]).sum(axis=1), pl.ones(J))
    return Y.view(pl.recarray)
Example #19
0
def edge_props2(sheet, cut_node=None, trail=None, dead_end=None):
    counts_file = '%s/reformated_counts%s.csv' % (DATASETS_DIR, sheet)
    df = pd.read_csv(counts_file, names = ['source', 'dest', 'time'], skipinitialspace=True)
    df['time'] = pd.to_datetime(df['time'])
    df.sort_values(by='time', inplace=True)
    
    times = list(df['time'])
    deltas = []
    starttime = times[0]
    for time in times:
        deltas.append((time - starttime) / pylab.timedelta64(1, 's'))

    sources = list(df['source'])
    dests = list(df['dest'])
    counts = {}
    
    delta_edges = defaultdict(list)
    for i in xrange(len(deltas)):
        delta = deltas[i]
        source = sources[i]
        dest = dests[i]
        #edge = (source, dest)
        edge = tuple(sorted((source, dest)))
        if cut_node == None or cut_node in edge:
            delta_edges[delta].append(edge)
            counts[edge] = []

    for delta in sorted(delta_edges.keys()):
        for edge in counts:
            count = 0
            if len(counts[edge]) > 0:
                count = counts[edge][- 1]
            if edge in delta_edges[delta]:
                count += 1
            counts[edge].append(count)

    step_times = pylab.arange(1, len(delta_edges.keys()) + 1, dtype=pylab.float64)
    norms = pylab.zeros_like(step_times)
    for edge in counts:
        counts[edge] /= step_times
        norms += counts[edge]
 
    pylab.figure()
    for edge in counts:
        counts[edge] /= norms
        if (trail == None and dead_end == None) or ((edge == trail) or (edge == dead_end)):
            label = edge
            if trail != None and edge == trail:
                label = 'trail'
            elif dead_end != None and edge == dead_end:
                label = 'dead end'
            pylab.plot(sorted(delta_edges.keys()), counts[edge], label=label)

    pylab.legend()
    pylab.xlabel('time (seconds)')
    pylab.ylabel('proportion of choices on edge')
    pylab.savefig('cut_edge_props%s.pdf' % sheet, format='pdf')
    pylab.close()
Example #20
0
 def country_tau_c(i_c=i_c,
                   sigma_explained=sigma_explained,
                   sigma_e=sigma_e,
                   var_d_c=data.se[i_c]**2.):
     """ country_tau_c[row] = tau[row] * 1[row.country == c]"""
     country_tau_c = pl.zeros_like(data.y)
     country_tau_c[i_c] = 1 / (sigma_e**2. + sigma_explained[i_c]**2. +
                               var_d_c)
     return country_tau_c
Example #21
0
def uatv(u):
    "Transforms u to v points for the C-grid"
    [nx,ny] = py.shape(u)
    uatv = py.zeros_like(u)
    # loop through x and y directions of u
    for i in xrange(-1,nx-1):
        for j in xrange(1,ny):
            uatv[i,j] = 0.25*(u[i,j] + u[i+1,j] + u[i,j-1] + u[i+1,j-1])
    return uatv
Example #22
0
def int_f(a, fs=1.):
    """
    A fourier-based integrator.

    ===========
    Parameters:
    ===========
    a : *array* (1D)
        The array which should be integrated
    fs : *float*
        sampling time of the data

    ========
    Returns:
    ========
    y : *array* (1D)
        The integrated array

    """

    if False:
    # version with "mirrored" code
        xp = hstack([a, a[::-1]])
        int_fluc = int_f0(xp, float(fs))[:len(a)]
        baseline = mean(a) * arange(len(a)) / float(fs)
        return int_fluc + baseline - int_fluc[0]
    
    # old version
    baseline = mean(a) * arange(len(a)) / float(fs)
    int_fluc = int_f0(a, float(fs))
    return int_fluc + baseline - int_fluc[0]

    # old code - remove eventually (comment on 02/2014)
    # periodify
    if False:
        baseline = linspace(a[0], a[-1], len(a))
        a0 = a - baseline
        m = a0[-1] - a0[-2]
        b2 = linspace(0, -.5 * m, len(a))
        baseline -= b2
        a0 += b2
        a2 = hstack([a0, -1. * a0[1:][::-1]]) # "smooth" periodic signal  

        dbase = baseline[1] - baseline[0]
        t_vec = arange(len(a)) / float(fs)
        baseint = baseline[0] * t_vec + .5 * dbase * t_vec ** 2
        
        # define frequencies
        T = len(a2) / float(fs)
        freqs = 1. / T * arange(len(a2))
        freqs[len(freqs) // 2 + 1 :] -= float(fs)

        spec = fft.fft(a2)
        spec_i = zeros_like(spec, dtype=complex)
        spec_i[1:] = spec[1:] / (2j * pi* freqs[1:])
        res_int = fft.ifft(spec_i).real[:len(a0)] + baseint
        return res_int - res_int[0]
Example #23
0
def ddyC(f, dy):
    "Calculates the C-grid ddy of 2d array f at the staggered locations"
    [nx, ny] = py.shape(f)
    dfdy = py.zeros_like(f)

    for i in xrange(-1, nx - 1):
        for j in xrange(1, ny):
            dfdy[i, j] = (f[i, j] - f[i, j - 1]) / dy
    return dfdy
Example #24
0
def ddxC(f, dx):
    "Calculates the C-grid ddx of 2d array f at the staggered locations"
    [nx, ny] = py.shape(f)
    dfdx = py.zeros_like(f)

    for i in xrange(-1, nx - 1):
        for j in xrange(0, ny):
            dfdx[i, j] = (f[i, j] - f[i - 1, j]) / dx
    return dfdx
Example #25
0
def int_f(a, fs=1.):
    """
    A fourier-based integrator.

    ===========
    Parameters:
    ===========
    a : *array* (1D)
        The array which should be integrated
    fs : *float*
        sampling time of the data

    ========
    Returns:
    ========
    y : *array* (1D)
        The integrated array

    """

    if False:
        # version with "mirrored" code
        xp = hstack([a, a[::-1]])
        int_fluc = int_f0(xp, float(fs))[:len(a)]
        baseline = mean(a) * arange(len(a)) / float(fs)
        return int_fluc + baseline - int_fluc[0]

    # old version
    baseline = mean(a) * arange(len(a)) / float(fs)
    int_fluc = int_f0(a, float(fs))
    return int_fluc + baseline - int_fluc[0]

    # old code - remove eventually (comment on 02/2014)
    # periodify
    if False:
        baseline = linspace(a[0], a[-1], len(a))
        a0 = a - baseline
        m = a0[-1] - a0[-2]
        b2 = linspace(0, -.5 * m, len(a))
        baseline -= b2
        a0 += b2
        a2 = hstack([a0, -1. * a0[1:][::-1]])  # "smooth" periodic signal

        dbase = baseline[1] - baseline[0]
        t_vec = arange(len(a)) / float(fs)
        baseint = baseline[0] * t_vec + .5 * dbase * t_vec**2

        # define frequencies
        T = len(a2) / float(fs)
        freqs = 1. / T * arange(len(a2))
        freqs[len(freqs) // 2 + 1:] -= float(fs)

        spec = fft.fft(a2)
        spec_i = zeros_like(spec, dtype=complex)
        spec_i[1:] = spec[1:] / (2j * pi * freqs[1:])
        res_int = fft.ifft(spec_i).real[:len(a0)] + baseint
        return res_int - res_int[0]
Example #26
0
def hatu(h):
    "Transforms h to u points for the C-grid"
    [nx, ny] = py.shape(h)
    hatu = py.zeros_like(h)
    # loop through x and y directions of h
    for i in xrange(-1, nx - 1):
        for j in xrange(0, ny):
            hatu[i, j] = 0.5 * (h[i - 1, j] + h[i, j])
    return hatu
Example #27
0
    def minimize_partition_measures(self):
        self.Vu = pl.zeros_like(self.Kvals).reshape(-1, 1) * 1.0
        self.Vo = pl.zeros_like(self.Kvals).reshape(-1, 1) * 1.0
        nvals = len(self.Kvals)
        for j, K in enumerate(self.Kvals):
            if self.verbose:
                print(f"Running with K={K} clusters")
            clusters = AgglomerativeClustering(
                n_clusters=K,
                affinity="precomputed",
                linkage="average",
                connectivity=self.connectivity,
            )
            clusters.fit_predict(self._Affinity)
            mu, MD = self.intracluster_distance(K, clusters.labels_)
            dmu = self._metric.pairwise(mu[:, :self._nfeat])
            dmu = pl.ma.fix_invalid(dmu, fill_value=1.0).data
            if self._has_angles:
                dmuang = self.haversine.pairwise(mu[:, self._nfeat:])
                # dmuang =pl.ma.fix_invalid(dmuang,  fill_value=pl.pi).data

                dmu = pl.multiply(dmu, dmuang)

            pl.fill_diagonal(dmu, pl.inf)
            self.Vo[j] = K / dmu.min()  # overpartition meas.
            self.Vu[j] = MD.sum() / K  # underpartition meas.

        # We have to match  Vo and Vu, we rescale Vo so that it ranges as Vu
        min_max_scaler = preprocessing.MinMaxScaler(
            feature_range=(self.Vu.min(), self.Vu.max()))

        self.Vu = min_max_scaler.fit_transform((self.Vu)).reshape(nvals)
        self.Vo = min_max_scaler.fit_transform((self.Vo)).reshape(nvals)
        # minimizing the squared sum

        Vsv = interp1d(self.Kvals,
                       pl.sqrt(self.Vu**2 + self.Vo**2).T,
                       kind="slinear")

        Krange = pl.arange(self.Kvals.min(), self.Kvals.max())
        minval = pl.argmin(Vsv(Krange) - Vsv(Krange).min())

        Kopt = Krange[minval]
        return Kopt
Example #28
0
    def minimize_residual_variances(self, **kwargs):
        """
        Syst. residuals behave as an underpartition measures, i.e. the larger is K the lower is  the residual( the more are patches  the better )
        stat. residuals behave as an overpartition measures, i.e. the larger is K the higher is the residuals (the lesser are the patches the worse, because you have less signal-to-noise in each patch)

        """
        self.Vu = pl.zeros_like(self.Kvals).reshape(-1, 1) * 1.0
        self.Vo = pl.zeros_like(self.Kvals).reshape(-1, 1) * 1.0
        nvals = len(self.Kvals)
        for j, K in enumerate(self.Kvals):
            if self.verbose:
                print(f"Running with K={K} clusters")
            clusters = AgglomerativeClustering(
                n_clusters=K,
                affinity="precomputed",
                linkage="average",
                connectivity=self.connectivity,
            )
            clusters.fit_predict(self._Affinity)
            msys, mstat = estimate_Stat_and_Sys_residuals(
                clusters.labels_,
                galactic_binmask=self.galactic_mask,
                **kwargs)
            m1 = pl.ma.masked_equal(msys[1], hp.UNSEEN).mask
            m2 = pl.ma.masked_equal(mstat[1], hp.UNSEEN).mask

            _, _, var_sys, var_stat = estimate_spectra(msys, mstat)

            self.Vo[j], self.Vu[j] = var_stat, var_sys

        # We have to match  Vo and Vu, we rescale Vo so that it ranges as Vu
        min_max_scaler = preprocessing.MinMaxScaler(
            feature_range=(self.Vu.min(), self.Vu.max()))
        self.Vu = min_max_scaler.fit_transform((self.Vu)).reshape(nvals)
        self.Vo = min_max_scaler.fit_transform((self.Vo)).reshape(nvals)

        Vsv = interp1d(self.Kvals,
                       pl.sqrt(self.Vu**2 + self.Vo**2).T,
                       kind="slinear")
        Krange = pl.arange(self.Kvals.min(), self.Kvals.max())
        minval = pl.argmin(Vsv(Krange) - Vsv(Krange).min())
        Kopt = Krange[minval]

        return Kopt
Example #29
0
def hatv(h):
    "Transforms h to v points for the C-grid"
    [nx, ny] = py.shape(h)
    hatv = py.zeros_like(h)
    # loop through x and y directions of h
    for i in xrange(-1, nx - 1):
        hatv[i, 0] = h[i, 0]
        for j in xrange(1, ny):
            hatv[i, j] = 0.5 * (h[i, j - 1] + h[i, j])
    return hatv
Example #30
0
def vatu(v):
    "Transforms v to u points for the C-grid"
    [nx,ny] = py.shape(v)
    vatu = py.zeros_like(v)
    # loop through x and y directions of v
    for i in xrange(-1,nx-1):
        for j in xrange(0,ny-1):
            vatu[i,j] = 0.25*(v[i-1,j] + v[i-1,j+1] + v[i,j] + v[i,j+1])
        vatu[i,ny-1] = 0.25*(v[i-1,ny-1] + v[i,ny-1])
    return vatu
Example #31
0
def divC(u, v, dx, dy):
    "Calculates the divergence of u and v for the C-grid"
    [nx,ny] = py.shape(u)
    divu = py.zeros_like(u)
    
    for i in xrange(-1,nx-1):
        for j in xrange(0,ny-1):
            divu[i,j] = (u[i+1,j]-u[i,j])/dx + (v[i,j+1]-v[i,j])/dy
        divu[i,ny-1] = (u[i+1,ny-1]-u[i,ny-1])/dx - v[i,ny-1]/dy
    return divu
Example #32
0
def ccm89(wav, a_v, r_v=3.1):
    '''Get dust extincetion using CCM89 law.

    Author: ZSLIN ([email protected])'''
    x = 1. / wav * 1.e4  # convert Angstrom to 1./micron
    nwav = len(wav)
    A_lam = pl.zeros_like(wav)
    for i in range(nwav):
        a, b = get_a_b(x[i])
        A_lam[i] = (a + b / r_v) * a_v
    return A_lam
Example #33
0
 def country_param_pred_c(i=i_c,
                          mu=mu,
                          f=f[r_index_c],
                          g=g[r_index_c],
                          h=h[c_index_c],
                          a=a_index_c,
                          t=t_index_c):
     """ country_param_pred_c[row] = parameter_predicted[row] * 1[row.country == c]"""
     country_param_pred_c = pl.zeros_like(data.y)
     country_param_pred_c[i] = mu[i] + f[t] + g[a] + h[t]
     return country_param_pred_c
Example #34
0
def ddyA(f, dy):
    "Calculates the A-grid ddy of 2d array f"
    [nx, ny] = py.shape(f)
    dfdy = py.zeros_like(f)

    for i in xrange(-1, nx - 1):
        for j in xrange(1, ny - 1):
            dfdy[i, j] = (f[i, j + 1] - f[i, j - 1]) / (2. * dy)
        dfdy[i, 0] = 0  #(f[i,1] - f[i,0])/(2*dy)
        dfdy[i, ny - 1] = 0  #(f[i,ny-1] - f[i,ny-2])/(2*dy)
    return dfdy
Example #35
0
def ddxA(f, dx):
    "Calculates the A-grid ddx of 2d array f"
    [nx, ny] = py.shape(f)
    dfdx = py.zeros_like(f)

    for i in xrange(-1, nx - 1):
        for j in xrange(1, ny - 1):
            dfdx[i, j] = (f[i + 1, j] - f[i - 1, j]) / (2. * dx)
        dfdx[i, 0] = 0
        dfdx[i, ny - 1] = 0
    return dfdx
Example #36
0
def circle(center, radius, color):
    """
		plot a circle with given center and radius

		Arguments
		----------
		center : matrix or ndarray
			it should be 2x1 ndarray or matrix
		radius:  float
			masses per mm
	"""
    u = pb.linspace(0, 2 * np.pi, 200)
    x0 = pb.zeros_like(u)
    y0 = pb.zeros_like(u)
    center = pb.matrix(center)
    center.shape = 2, 1
    for i, j in enumerate(u):
        x0[i] = radius * pb.sin(j) + center[0, 0]
        y0[i] = radius * pb.cos(j) + center[1, 0]
    pb.plot(x0, y0, color)
Example #37
0
def circle(center,radius,color):
	"""
		plot a circle with given center and radius

		Arguments
		----------
		center : matrix or ndarray
			it should be 2x1 ndarray or matrix
		radius:  float
			masses per mm
	"""
	u=pb.linspace(0,2*np.pi,200)
	x0=pb.zeros_like(u)
	y0=pb.zeros_like(u)
	center=pb.matrix(center)
	center.shape=2,1
	for i,j in enumerate(u):
		x0[i]=radius*pb.sin(j)+center[0,0]
		y0[i]=radius*pb.cos(j)+center[1,0]
	pb.plot(x0,y0,color)
Example #38
0
def swap_subsample(I, k=1):
    for c, color in enumerate(colors):
        print "%s <-- %s" %(colors[c], colors[(c+k)%3])
    for i in range(3):
        J = zeros_like(I)
        for j in range(3):
            J[:, :, j] = I[:, :, (j+k)%3]
        J[:, :, i] = zoom(I[::4, ::4, (i+k)%3], 4)
        figure(i+10)
        title("%s channel subsampled" %colors[i])
        imshow(J)
Example #39
0
def CTCS(phiOld, c, nt, epsilon=0., alpha=1.):
    "Linear advection of profile in phiOld using CTCS"
    "(FTCS for 1st time step)"
    "with RAW filter with coefficients epsilon and alpha"

    # the number of independent points
    nx = len(phiOld) - 1
    # add another wrap-around point for cyclic boundaries
    phiOld = pl.append(phiOld, [phiOld[1]])

    # mid and new time-step arrays for phi
    phi = pl.zeros_like(phiOld)
    phiNew = pl.zeros_like(phiOld)

    # FTCS for first time step
    for i in xrange(1, nx + 1):
        phi[i] = phiOld[i] - 0.5 * c * (phiOld[i + 1] - phiOld[i - 1])
    # cyclic BCs
    phi[0] = phi[nx]
    phi[nx + 1] = phi[1]

    # CTCS for remaining time steps
    for it in xrange(2, nt + 1):
        for i in xrange(1, nx + 1):
            phiNew[i] = phiOld[i] - c * (phi[i + 1] - phi[i - 1])

        # cyclic BCs
        phiNew[0] = phiNew[nx]
        phiNew[nx + 1] = phiNew[1]

        # RAW filter
        d = epsilon * (phiNew - 2 * phi + phiOld)
        phi += alpha * d
        phiNew += (alpha - 1.) * d

        # update arrays
        phiOld = phi.copy()
        phi = phiNew.copy()

    # return phiNew (without the cyclic wrap-around point)
    return phiNew[0:nx + 1]
Example #40
0
def sub_mean(x, N):
    N = int(N)
    L = len(x)
    y = pl.zeros_like(x)
    ii = pl.arange(-N, N + 1)
    k = 1.0 / len(ii) # 1 / (2 * N + 1)
    for n in range(L):
        iii = pl.clip(ii + n, 0, L - 1)
        s = k * sum(x[iii])
        y[n] = x[n] - s
    print n, x[n], iii[0], iii[-1], s
    return y
Example #41
0
def new_bad_model(F):
    """ Results in a matrix with shape matching X, but all rows sum to 1"""
    N, T, J = F.shape
    pi = pl.zeros_like(F)
    for t in range(T):
        u = F[:,t,:].var(axis=0)
        u /= pl.sqrt(pl.dot(u,u))
        F_t_par = pl.dot(pl.atleast_2d(pl.dot(F[:,t,:], u)).T, pl.atleast_2d(u))
        F_t_perp = F[:,t,:] - F_t_par
        for n in range(N):
            alpha = (1 - F_t_perp[n].sum()) / F_t_par[n].sum()
            pi[n,t,:] = F_t_perp[n,:] + alpha*F_t_par[n,:]
    return pi
def amplitude(freq, m1, m2, distance=1):
    solar_mass = 1.98892e30
    mpc = mega * parsec
    eta = m1 * m2 / (m1 + m2)**2
    M = (m1 + m2)
    mu = M * eta
    amp_fac = (2 * G * solar_mass) / (c**2 * distance * mpc) * \
        (5. * mu / 96 )**(1./2) * \
        (M / math.pi**2)**(1./3) * \
        (G * solar_mass / c**3)**(-1./6)
    fi = freq < isco(m1, m2)
    amp = pylab.zeros_like(freq)
    amp[fi] = freq[fi]**(-7. / 6) * amp_fac
    return amp
Example #43
0
def divA(u, v, dx, dy):
    "Calculates the divergence of u and v for the A-grid"
    [nx, ny] = py.shape(u)
    divu = py.zeros_like(u)

    for i in xrange(-1, nx - 1):
        for j in xrange(1, ny - 1):
            divu[i,j] = (u[i+1,j] - u[i-1,j])/(2.*dx) \
                      + (v[i,j+1] - v[i,j-1])/(2.*dy)
        divu[i,0] = (u[i+1,0] - u[i,0])/(2.*dx) \
                  + (v[i,1] + v[i,0])/(2*dy)
        divu[i,ny-1] = (u[i+1,ny-1] - u[i-1,ny-1])/(2.*dx) \
                     - (v[i,ny-1] + v[i,ny-2])/(2*dy)
    return divu
Example #44
0
def new_bad_model(F):
    """ Results in a matrix with shape matching X, but all rows sum to 1"""
    N, T, J = F.shape
    pi = pl.zeros_like(F)
    for t in range(T):
        u = F[:, t, :].var(axis=0)
        u /= pl.sqrt(pl.dot(u, u))
        F_t_par = pl.dot(
            pl.atleast_2d(pl.dot(F[:, t, :], u)).T, pl.atleast_2d(u))
        F_t_perp = F[:, t, :] - F_t_par
        for n in range(N):
            alpha = (1 - F_t_perp[n].sum()) / F_t_par[n].sum()
            pi[n, t, :] = F_t_perp[n, :] + alpha * F_t_par[n, :]
    return pi
def FTCS(phiOld, k, nt):
    "Diffusion of profile in phiOld using FTCS using non-dimensional"
    "diffusion coefficient, k"
    
    # new time-step array for phi
    phi = pl.zeros_like(phiOld)
    
    # FTCS
    for it in xrange(nt):
        for i in xrange(1,len(phi)-1):
            phi[i] = phiOld[i] \
                   + k*(phiOld[i+1] - 2*phiOld[i] + phiOld[i-1])
        
        # update arrays
        phiOld = phi.copy()

    return phi
Example #46
0
def int_f0(x, fs=1.):
    """
    returns the 'basic' fourier integration of a signal
    
    """
    # define frequencies
    T = len(x) / float(fs)
    freqs = 1. / T * arange(len(x))
    freqs[len(freqs) // 2 + 1:] -= float(fs)

    spec = fft.fft(x)
    spec_i = zeros_like(spec, dtype=complex)
    # exclude frequency 0 - it cannot be integrated
    spec_i[1:] = spec[1:] / (2j * pi* freqs[1:])
    if mod(len(x), 2) == 0:
        spec_i[len(x) // 2] = 0.
    sig_d = fft.ifft(spec_i) 
    return sig_d.real
Example #47
0
        def rates(S=data_sample,
                Xa=Xa, Xb=Xb,
                alpha=alpha, beta=beta, gamma=gamma,
                bounds_func=vars['bounds_func'],
                age_indices=ai,
                age_weights=aw):

            # calculate study-specific rate function
            shifts = pl.exp(pl.dot(Xa[S], alpha) + pl.dot(Xb[S], pl.atleast_1d(beta)))
            exp_gamma = pl.exp(gamma)
            mu = pl.zeros_like(shifts)
            for i,s in enumerate(S):
                mu[i] = pl.dot(age_weights[s], bounds_func(shifts[i] * exp_gamma[age_indices[s]], age_indices[s]))
                # TODO: evaluate speed increase and accuracy decrease of the following:
                #midpoint = age_indices[s][len(age_indices[s])/2]
                #mu[i] = bounds_func(shifts[i] * exp_gamma[midpoint], midpoint)
                # TODO: evaluate speed increase and accuracy decrease of the following: (to see speed increase, need to code this up using difference of running sums
                #mu[i] = pl.dot(pl.ones_like(age_weights[s]) / float(len(age_weights[s])),
                #               bounds_func(shifts[i] * exp_gamma[age_indices[s]], age_indices[s]))
            return mu
Example #48
0
def connected_conponent(img):
    """
    input:  img cant be gray or color. 
            The shape of img is (height,width,..)
    output: list of components  and an image mask
    """
    img.resize((10,10))
    img = array(img)
    assert(img.ndim>=2)
    #mask = array([0]*img.size).reshape(img.shape)
    mask = zeros_like(img,dtype=uint64)  #[h,w]
    comp_list = []
    comp_index = 0   # the index of the first componet  is 1

    print mask.shape, mask.dtype
    print img.shape,img.dtype
    for y in range(img.shape[0]):
        for x in range(img.shape[1]):
            if mask[y,x]==0: # (y,x) will be a seed of component.
                comp_index+=1
                mask[y,x]=comp_index  
                #flood to ne
                flood(img,mask,(y,x))
Example #49
0


# Create Brownian paths

t = p.linspace(0, 3, n+1) # Partition [0,3] to 1000 partitions
dB = p.randn(n_path, n+1) / p.sqrt(n/3);
dB[:,0] = 0 # First column of dB is 0
B = dB.cumsum(axis=1) # Cummulative sum



# Calculate stock prices

nu = mu - sigma*sigma/2.0
S = p.zeros_like(B);
S[:,0] = S0
S[:, 1 :] = S0*p.exp(nu*t[1:]+sigma*B[:, 1 :])



# Plot 5 realizations of GBM

S_plot= S[0:5]
p.plot(t,S_plot.transpose());
p.xlabel('Time, $t$');
p.ylabel('Stock prices, $S_t$');
p.title('5 Realizations of Geometric Brownian Motion with ' '\n with $\mu$ = ' + str(mu) +' and $\sigma$ = ' + str(sigma) + '\n'  )
p.show()

def GetGS(image_name, scale, roi=None, show=True, min_size=500, exclude_zones=None, flipudflag=False):

    # Load image (Rappel pour convertir couleur to rgb color.rgb2gray)
    if opencv:
        original = cv2.imread(image_name)
    else:
        original = m.imread(image_name)

    if flipudflag:
        original = flipud(original)

    # manage exclusion zone put them to 1 i.e. bottom
    if exclude_zones != None:
        # create markers of background and gravel
        markers_zones = m.zeros_like(original[:, :, 0])

        for zone in exclude_zones:
            xa = min(zone[0], zone[2])
            xb = max(zone[0], zone[2])
            ya = min(zone[1], zone[3])
            yb = max(zone[1], zone[3])
            markers_zones[ya:yb, xa:xb] = 1

    if roi != None:
        # crop the image
        xa = min(roi[0], roi[2])
        xb = max(roi[0], roi[2])
        ya = min(roi[1], roi[3])
        yb = max(roi[1], roi[3])

        original = original[ya:yb, xa:xb]

        if exclude_zones != None:
            markers_zones = markers_zones[ya:yb, xa:xb]

    # transform to hsv
    if opencv:
        img = cv2.cvtColor(original, cv2.COLOR_BGR2HSV)
    else:
        img = color.rgb2hsv(original)

    # use s chanel
    img = img[:, :, 1]

    # div by 255 si en couleur
    thre = img

    if show:
        m.figure()
        m.imshow(original)

    # test filtre sobel (avoir les pentes)
    if opencv:
        elev = cv2.Laplacian(thre, cv2.CV_64F)
    else:
        elev = filters.sobel(thre)

    # compute an auto threshold
    thresh = filters.threshold_otsu(thre)

    # create markers of background and gravel
    markers = m.zeros_like(thre)
    if show:
        print thresh

    markers[thre < thresh] = 2
    markers[thre > thresh] = 1

    if exclude_zones != None:
        markers[markers_zones == 1] = 1

    # use watershade transform (use markes as starting point)
    segmentation = morphology.watershed(elev, markers)

    # Clean small object
    if opencv:
        kernel = m.ones((2, 2), m.uint8)
        closing = cv2.morphologyEx(segmentation - 1, cv2.MORPH_CLOSE, kernel)
    else:
        closing = ndimage.binary_closing(segmentation - 1)

    tmp = ndimage.binary_fill_holes(closing)
    label_objects, nb_labels = ndimage.label(tmp)

    sizes = m.bincount(label_objects.ravel())
    mask_sizes = sizes > min_size
    mask_sizes[0] = 0
    segmentation_cleaned = mask_sizes[label_objects]

    # relabel cleaned version of segmentation
    label_objects_clean, nb_label_clean = ndimage.label(segmentation_cleaned)

    # plot contour of object
    if show:
        m.contour(ndimage.binary_fill_holes(label_objects_clean), linewidths=1.2, colors="y")

    # trouve les informations des objets
    # old version of scikit properties=measurement_types
    mes = measure.regionprops(label_objects_clean)

    granulo = []
    for prop in mes:
        # Correct orientation ! car orientation prend pas en compte le cadrant !!!
        #: elements of the inertia tensor [a b; b c]
        Orientation = GetOrientation(prop["CentralMoments"])

        x0 = prop["Centroid"][1]
        y0 = prop["Centroid"][0]
        x1 = x0 + m.cos(Orientation) * 0.5 * prop["MajorAxisLength"]
        y1 = y0 - m.sin(Orientation) * 0.5 * prop["MajorAxisLength"]
        x2 = x0 - m.sin(Orientation) * 0.5 * prop["MinorAxisLength"]
        y2 = y0 - m.cos(Orientation) * 0.5 * prop["MinorAxisLength"]

        if show:
            m.plot((x0, x1), (y0, y1), "-r", linewidth=2.5)
            m.plot((x0, x2), (y0, y2), "-r", linewidth=2.5)

            m.plot(x0, y0, ".g", markersize=15)

        granulo.append(prop["MinorAxisLength"] / scale)
    #    minr, minc, maxr, maxc = prop['BoundingBox']
    #    bx = (minc, maxc, maxc, minc, minc)
    #    by = (minr, minr, maxr, maxr, minr)[
    #    m.plot(bx, by, '-b', linewidth=2.5)

    return m.array(granulo), label_objects_clean, mes
Example #51
0
        [581, 537],
        [602, 551],
        [636, 561],
        [656, 560],
        [681, 542],
        [712, 509],
        [740, 482],
        [771, 459],
        [795, 447],
        [826, 439],
        [868, 439],
        [907, 441],
    ]
)

points = pylab.zeros_like(pixels)

for i in xrange(len(pixels)):
    points[i, 0] = (pixels[i, 0] - bottom_left[0]) * (top_right_values[0] - bottom_left_values[0]) / (
        top_right[0] - bottom_left[0]
    ) + bottom_left_values[0]
    points[i, 1] = (pixels[i, 1] - bottom_left[1]) * (top_right_values[1] - bottom_left_values[1]) / (
        top_right[1] - bottom_left[1]
    ) + bottom_left_values[1]
    # print points[i, 0], points[i, 1]

r = points[:, 0]
g = points[:, 1]

# pylab.plot(r, g)
Example #52
0
dt = 10.0
t = numpy.arange(0,1000.0,dt) 
rate = numpy.ones_like(t)*20.0

# stepup

i_start = t.searchsorted(400.0,'right')-1
i_end = t.searchsorted(600.0,'right')-1

rate[i_start:i_end] = 40.0

a = numpy.ones_like(t)*3.0
b = numpy.ones_like(t)/a/rate

psth = zeros_like(t)

stg = stgen.StGen()

trials = 5000
tsim = 1000.0
print "Running %d trials of %.2f milliseconds" % (trials, tsim)
for i in xrange(trials):
    if i%100==0:
        print "%d" % i,
        sys.stdout.flush()
    st = stg.inh_gamma_generator(a,b,t,1000.0,array=True)
    psth[1:]+=numpy.histogram(st,t)[0]

print "\n"
def _create_plot_component():
	path=os.path.join(os.getcwd())
	A = glob.glob('*.chi') 
	A=np.sort(A)
	s=len(A)
	T = np.loadtxt('Temp.dat', unpack=True) 
	#T = sort(Temp)
	Tlo = min(T)
	Thi = max(T)
	I0 = np.loadtxt("I0.dat",unpack=True)	
	fcmap = matplotlib.cm.get_cmap()
	no_columns = 2*s-1
	data = [np.loadtxt(f, usecols=[0, 1], unpack=False,skiprows=32) for f in A] 
	Data = np.concatenate(data, axis=1)                         	
	d = Data[:, r_[0, 1:no_columns:2]].T
	savetxt(path+'Data_chi.dat',d, fmt='%g')            
	Data = np.loadtxt(path+'Data_chi.dat', unpack='True')	

	plt.figure()
	plt.title("X-Ray Diffraction Data")
	plt.xlabel("Q (${nm}^{-1}$)")
	plt.ylabel("Intensity")

	for i in range (1,s):
		Ti = T[i - 1]
		Ii = I0[i - 1]
    		color = fcmap((Ti - Tlo) / (1.0 * Thi - Tlo))
    		plt.plot(Data[:,0],Data[:,i] + (i*10000),color=color,alpha=.8)	

	plt.figure()
	plt.title("X-Ray Diffraction Data")
	plt.xlabel("Q (${nm}^{-1}$)")
	plt.ylabel("Normalized Intensity")

	for i in range (1,s):
		Ti = T[i - 1]
		Ii = I0[i - 1]
    		color = fcmap((Ti - Tlo) / (1.0 * Thi - Tlo))
    		plt.plot(Data[:,0],Data[:,i]/Ii + (i*10),color=color,alpha=.8)	
	

	fig = figure()
	ax3 = Axes3D(fig)
	pylab.title("X-ray Diffraction Data- WaterFall Plot")
	pylab.xlabel("Q (${nm}^{-1}$)")
	pylab.ylabel("Temperature (K)")
        xi=Data[:,0]

	# this sets the view elevation and azimuth in degrees
	ax3.view_init(20, 220)
	for i in range(1,s): 
    		zi=Data[:,i] # zi=Data[p:q,0]
    		Ti = T[i - 1]
		Ii = I0[i - 1]
    		color = fcmap((Ti - Tlo) / (1.0 * Thi - Tlo))
    		yi = (pylab.zeros_like(xi)+i)/Ii
    		ax3.plot(xi, (((yi/yi)-1)+Ti), zi, color=color)

	#  Give values to select the X and Y range
	p=0  
	q=300

	fig = figure()
	ax3 = Axes3D(fig)
	pylab.title("X-ray Diffraction Data- Normalized Intensity")
	pylab.xlabel("Q (${nm}^{-1}$)")
	pylab.ylabel("Temperature (K)")
        xi=Data[p:q,0]

	# this sets the view elevation and azimuth in degrees
	ax3.view_init(20, 220)
	for i in range(1,s): 
    		Ti = T[i - 1]
		Ii = I0[i - 1]
		zi=Data[p:q,i]/Ii # zi=Data[p:q,0]
    		color = fcmap((Ti - Tlo) / (1.0 * Thi - Tlo))
    		yi = (pylab.zeros_like(xi)+i)/Ii
    		ax3.plot(xi, (((yi/yi)-1)+Ti), zi, color=color)
Example #54
0
    Itest = 5
    Vmax = 15

    #make grid
    plotMargin = 1.2
    sizex = Ca1.outer_radius*plotMargin
    sizez = Ca1.outer_thickness*plotMargin
    print sizex,sizez
    size = np.max([sizex,sizez])
    N=20.0    # number of grid points per axis
    i=j=pb.arange(N)
    n=float(N)
    x = ((i/(n-1))-.5)*size*2
    z = ((j/(n-1))-.5)*size*2
    X,Z = pb.meshgrid(x,z)
    Bx = pb.zeros_like(X)
    Bz = pb.zeros_like(X)
    Bnorm = pb.zeros_like(X)
    #get B field
    gaussPerTesla = 1e4    
    for ii in i:
        for jj in j:
            x = X[ii,jj]
            z = Z[ii,jj]
            if jj==0:
                print ii,jj,x,z
            B=(Ca1.Bvector([x,0,z])+Ca2.Bvector([x,0,z]))*gaussPerTesla
            Bnorm[ii,jj] = norm(B)
            Bx[ii,jj] = B[0]*np.log1p(np.fabs(1000*norm(B)))/norm(B)
            Bz[ii,jj] = B[2]*np.log1p(np.fabs(1000*norm(B)))/norm(B)
            
    dm = initialize_model()
    dm.params['region_effect_prevalence'] = dict(std=level)
    dismod3.neg_binom_model.fit_emp_prior(dm, 'prevalence', map_only=True)
    models[-1].append(dm)

models.append([])
for level in [.1, 1, 10.]:
    dm = initialize_model()
    dm.params['beta_effect_prevalence'] = dict(mean=[0.], std=[level])
    dismod3.neg_binom_model.fit_emp_prior(dm, 'prevalence', map_only=True)
    models[-1].append(dm)

models.append([])
for level in [.2, 2., 20.]:
    dm = initialize_model()
    dm.params['gamma_effect_prevalence'] = dict(mean=list(pl.zeros_like(dm.get_estimate_age_mesh())),
                                                std=list(level*pl.ones_like(dm.get_estimate_age_mesh())))
    dismod3.neg_binom_model.fit_emp_prior(dm, 'prevalence', map_only=True)
    models[-1].append(dm)



# this should change uncertainty, although it turns out not to change levels
models.append([])
for level in [.025, .25, 2.5]:
    dm = initialize_model()
    dm.params['delta_effect_prevalence'] = dict(mean=3.,
                                                std=level)
    dismod3.neg_binom_model.fit_emp_prior(dm, 'prevalence', map_only=True)
    models[-1].append(dm)
    freq_diff=p.diff(data_main.freqs)
    main_ephase_tdelay=-main_ephase_diff/360.0/freq_diff
    if last_main is not None:
      main_nearest_pair_phase_diff=p.array(data_main.ephase)-p.array(last_main.ephase)
    if card in middlecards: 
      interf_tdelay=p.array(data_interf.tdelay)
      diff_tdelay=interf_tdelay-main_tdelay
      phase_diff=p.array(data_interf.ephase)-p.array(data_main.ephase)
      phase_diff=(phase_diff % 360.0)
      phase_diff=p.array([(ph > 180) * -360 + ph for ph in phase_diff])
      phase_diff_diff=p.diff(phase_diff)
      phase_diff_tdelay=-(phase_diff_diff/360.0)/freq_diff
      interf_ephase_diff=p.diff(data_interf.ephase)
      interf_ephase_tdelay=-interf_ephase_diff/360.0/freq_diff
      diff_ephase_tdelay=interf_ephase_tdelay-main_ephase_tdelay
      smooth_phase_diff_tdelay=p.zeros_like(phase_diff_tdelay)
      sum_count=p.zeros_like(phase_diff_tdelay)
      for i in xrange(len(phase_diff_tdelay)):
        for j in xrange(40):
          if i+j < len(phase_diff_tdelay):
            smooth_phase_diff_tdelay[i]+=phase_diff_tdelay[i+j]
            sum_count[i]+=1
          if i-j >= 0:
            smooth_phase_diff_tdelay[i]+=phase_diff_tdelay[i-j]
            sum_count[i]+=1
      smooth_phase_diff_tdelay=smooth_phase_diff_tdelay/sum_count
      if last_interf is not None:
        interf_nearest_pair_phase_diff=p.array(data_interf.ephase)-p.array(last_interf.ephase)


    p.figure(200+bmnum)
Example #57
0
dm.description = 'With excess-mortality data removed'
dm.data = [d for d in dm.data if dm.relevant_to(d, 'all-cause_mortality', region, year, sex)] + \
    [d for d in dm.data if dm.relevant_to(d, 'prevalence_x_excess-mortality', region, year, sex)] + \
    [d for d in dm.data if d.get('country_iso3_code') == 'GBR']
dm.params['global_priors']['level_bounds']['excess_mortality'] = dict(lower=.2, upper=10.)
fit_posterior.fit_posterior(dm, region, sex, year, map_only=True, store_results=False)
models.append(dm)

dm = initialize_model()
dm.description = 'With excess-mortality data and priors removed'
dm.data = [d for d in dm.data if dm.relevant_to(d, 'all-cause_mortality', region, year, sex)] + \
    [d for d in dm.data if dm.relevant_to(d, 'prevalence_x_excess-mortality', region, year, sex)] + \
    [d for d in dm.data if d.get('country_iso3_code') == 'GBR']
dm.params['global_priors']['level_bounds']['excess_mortality'] = dict(lower=0., upper=10.)
dm.params['global_priors']['smoothness']['prevalence']['amount'] = 'No Prior'
dm.params['gamma_effect_excess-mortality'] = dict(mean=list(pl.zeros_like(dm.get_estimate_age_mesh())),
                                                  std=list(10.*pl.ones_like(dm.get_estimate_age_mesh())))
fit_posterior.fit_posterior(dm, region, sex, year, map_only=True, store_results=False)
models.append(dm)


dm = initialize_model()
dm.description = 'Without increasing prior on excess-mortality, but with "Very" smoothing'
dm.data = [d for d in dm.data if dm.relevant_to(d, 'all-cause_mortality', region, year, sex)] + \
    [d for d in dm.data if dm.relevant_to(d, 'prevalence_x_excess-mortality', region, year, sex)] + \
    [d for d in dm.data if d.get('country_iso3_code') == 'GBR'] + \
    [d for d in dm.data if dm.relevant_to(d, 'excess-mortality', 'all', 'all', 'all')]
dm.params['global_priors']['smoothness']['prevalence']['amount'] = 'Very'
fit_posterior.fit_posterior(dm, region, sex, year, map_only=True, store_results=False)
models.append(dm)
Example #58
0
Variance = (S0**2)*(p.exp(2*mu*time))*(p.exp(sigma*sigma*time)-1)

msg = 'The theoritical expected value of S(3) is %.13f' %Expected
print(msg)
msg = 'The theoritical vaiance of S(3) is %.13f' %Variance
print(msg)


#create brownian motion 
t = p.linspace (0,3,n+1);
dB = p.randn(n_path, n+1) / p.sqrt(n/time); dB[:,0]=0;
B= dB.cumsum(axis=1);

#calculate stock price 
nu = mu - sigma *sigma / 2.0
S = p.zeros_like(B); S[:,0]=S0
S[:,1:] = S0*p.exp (nu*t[1:] + sigma*B[:,1:])

S2 = S[0:5]
p.title('Brownian Motion')
p.xlabel('Time, $t$', fontsize=16)
p.ylabel('X(t)', fontsize=16)
p.plot(t,S2.transpose()); p.show();

#calculate expectation value of S(3)
S3=S[:,-1]
E= S3.sum() / n_path
msg = 'The expected value of S(3) is %.13f' %E
print(msg)

#calculate variance of S(3)
Example #59
0
 def smooth_rate(f=rate, age_indices=age_indices, C=C):
     log_rate = pl.log(pl.maximum(f, NEARLY_ZERO))
     return mc.mv_normal_cov_like(log_rate[age_indices] - log_rate[age_indices].mean(),
                                  pl.zeros_like(age_indices),
                                  C=C)
Example #60
0
# Time-varying Rates
# ------------------
# 
# When $b$ and $m$ are not constant with respect to time, the ODE does not have a closed form solution.
# If $b$ and $m$ are piecewise-constant, there an exact solution can be computed iteratively.

# <codecell>

h_b = .01*(2. - t/t.max())
h_m = .005*(2. + t/t.max())

# <codecell>

S_approx = scipy.integrate.odeint(one_compartment_ode, S_0, t, (h_b,h_m))

S_exact = 1. * pl.zeros_like(t)
S_exact[0] = S_0
for i in range(len(t)-1):
    S_exact[i+1] = S_exact[i]*pl.exp((h_b[i]-h_m[i])*(t[i+1]-t[i]))

# <codecell>
# pl.figure()
# pl.plot(t, 100*(S_approx.reshape(101) - S_exact)/S_exact, 'k')
# pl.hlines([0],0,100,color='k',linestyle='--')
# pl.ylabel('Relative error (%)')
# pl.xlabel('Age (years)')
# yt = [-10e-5, 0, 10e-5, 20e-5, 30e-5]
# pl.yticks(yt, ['%.4f'%y for y in yt])
# pl.axis([-5,105,-10e-5, 30e-5])
# pl.title('ODE error for piecewise-constant rates')