class GaussianX(PhaseSpace):
    """Horizontal Gaussian particle phase space distribution."""

    def __init__(self, sigma_x, sigma_xp, generator_seed=None):
        """Initiates the horizontal beam coordinates
        to the given Gaussian shape.
        """
        self.sigma_x  = sigma_x
        self.sigma_xp = sigma_xp

        self.random_state = RandomState()
        self.random_state.seed(generator_seed)

    @classmethod
    def from_optics(cls, alpha_x, beta_x, epsn_x, betagamma, generator_seed=None):
        """Initialise GaussianX from the given optics functions.
        beta_x is given in meters and epsn_x in micrometers.
        """

        sigma_x  = np.sqrt(beta_x * epsn_x * 1e-6 / betagamma)
        sigma_xp = sigma_x / beta_x

        return cls(sigma_x, sigma_xp, generator_seed)

    def generate(self, beam):
        beam.x = self.sigma_x * self.random_state.randn(beam.n_macroparticles)
        beam.xp = self.sigma_xp * self.random_state.randn(beam.n_macroparticles)
class GaussianY(PhaseSpace):
    """Vertical Gaussian particle phase space distribution."""

    def __init__(self, sigma_y, sigma_yp, generator_seed=None):
        """Initiates the vertical beam coordinates
        to the given Gaussian shape.
        """
        self.sigma_y  = sigma_y
        self.sigma_yp = sigma_yp

        self.random_state = RandomState()
        self.random_state.seed(generator_seed)

    @classmethod
    def from_optics(cls, alpha_y, beta_y, epsn_y, betagamma, generator_seed=None):
        """Initialise GaussianY from the given optics functions.
        beta_y is given in meters and epsn_y in micrometers.
        """

        sigma_y  = np.sqrt(beta_y * epsn_y * 1e-6 / betagamma)
        sigma_yp = sigma_y / beta_y

        return cls(sigma_y, sigma_yp, generator_seed)

    def generate(self, beam):
        beam.y = self.sigma_y * self.random_state.randn(beam.n_macroparticles)
        beam.yp = self.sigma_yp * self.random_state.randn(beam.n_macroparticles)
class GaussianTheta(PhaseSpace):
    """Longitudinal Gaussian particle phase space distribution."""

    def __init__(self, sigma_theta, sigma_dE, is_accepted=None, generator_seed=None):

        self.sigma_theta = sigma_theta
        self.sigma_dE = sigma_dE
        self.is_accepted = is_accepted

        self.random_state = RandomState()
        self.random_state.seed(generator_seed)

    def generate(self, beam):
        beam.theta = self.sigma_theta * self.random_state.randn(beam.n_macroparticles)
        beam.delta_E = self.sigma_dE * self.random_state.randn(beam.n_macroparticles)
        if self.is_accepted:
            self._redistribute(beam)

    def _redistribute(self, beam):
        n = beam.n_macroparticles
        theta = beam.theta.copy()
        delta_E = beam.delta_E.copy()
        for i in xrange(n):
            while not self.is_accepted(theta[i], delta_E[i]):
                theta[i]  = self.sigma_theta * self.random_state.randn()
                delta_E[i] = self.sigma_dE * self.random_state.randn()
        beam.theta = theta
        beam.delta_E = delta_E
Beispiel #4
0
def test_qtl_binomial_scan_covariate_redundance():
    random = RandomState(9)

    N = 200
    G = random.randn(N, N + 100)
    G = stdnorm(G, 0)
    G /= sqrt(G.shape[1])

    p = 2
    X = random.randn(N, p)
    X = stdnorm(X, 0)
    X /= sqrt(X.shape[1])

    ntrials = random.randint(1, 50, N)
    nsuccesses = binomial(
        ntrials,
        -0.1,
        G,
        causal_variants=X,
        causal_variance=0.1,
        random_state=random)

    X[:] = 1
    qtl = scan(BinomialPhenotype(nsuccesses, ntrials), X, G=G, progress=False,
               fast=False)
    assert_allclose(qtl.pvalues(), [1] * p, rtol=1e-4)
Beispiel #5
0
def test_qtl_fast_binomial_scan():
    random = RandomState(9)

    N = 200
    G = random.randn(N, N + 100)
    G = stdnorm(G, 0)
    G /= sqrt(G.shape[1])

    p = 2
    X = random.randn(N, p)
    X = stdnorm(X, 0)
    X /= sqrt(X.shape[1])

    ntrials = random.randint(1, 50, N)
    nsuccesses = binomial(
        ntrials,
        -0.1,
        G,
        causal_variants=X,
        causal_variance=0.1,
        random_state=random)

    qtl = scan(BinomialPhenotype(nsuccesses, ntrials), X, G=G, progress=False,
               fast=True)

    assert_allclose(
        qtl.pvalues(), [
            0.698565827403, 0.443299805368
        ],
        rtol=1e-4)
Beispiel #6
0
class corr_noise(object):
	def __init__(self, w, sigma, seed = None):
		self.w = w
		self.sigma = sigma
		self.rst = RandomState(seed)

	def calc_noise(self, N = 100):
		z = self.rst.randn(N + 4)
		noise = diff(diff(diff(diff(z * self.w ** 4) - 4 * z[1:] * self.w ** 3) + 6 * z[2:] * self.w ** 2) - 4 * z[
																												 3:] * self.w) + z[
																																 4:]
		self.Cw = sqrt(sum([comb(4, l) ** 2 * self.w ** (2 * l) for l in range(5)]))
		self.noise = noise * self.sigma / self.Cw
		return self.noise

	def calc_noise2(self, N = 100):
		P = np.ceil(1.5 * N)
		NT = self.rst.randn(P) * self.sigma
		STD = np.zeros(21)
		STD[10] = 1.0
		for counter in range(5):
			NTtmp = NT.copy()
			NT[:-1] = NT[:-1] + self.w * NTtmp[1:]
			NT[-1] = NT[-1] + self.w * NTtmp[-1]
			NT[1:] = NT[1:] + self.w * NTtmp[:-1]
			NT[0] = NT[0] + self.w * NTtmp[-1]
			STDtmp = STD.copy()
			STD[1:] = STD[1:] + self.w * STDtmp[:-1]
			STD[:-1] = STD[:-1] + self.w * STDtmp[1:]
		NT = NT / np.linalg.norm(STD)
		self.noise = NT[:N]
		self.Cw = sqrt(sum([comb(4, l) ** 2 * self.w ** (2 * l) for l in range(5)]))
		return self.noise

	def calc_autocorr(self, lag = 10):
		return array([1] + [corrcoef(self.noise[:-i], self.noise[i:])[0, 1] for i in range(1, lag)])

	def calc_cov(self):
		def cw(k):
			if np.abs(k) > 4: return 0
			c = sum([comb(4, l) * comb(4, np.abs(k) + l) * self.w ** (np.abs(k) + 2 * l) for l in range(5 - np.abs(k))])
			return c / self.Cw ** 2

		N = len(self.noise)
		Sigma = np.zeros((N, N))
		for m in range(N):
			for n in range(m, N):
				Sigma[m, n] = self.sigma ** 2 * cw(n - m)
		self.Sigma = Sigma + Sigma.T - np.diag(np.diag(Sigma))
		return self.Sigma

	def calc_psd(self, noise = None, Fs = 1.0, **kwargs):
		if isinstance(noise, np.ndarray):
			return periodogram(noise, fs = Fs, **kwargs)
		else:
			return periodogram(self.noise, fs = Fs, **kwargs)
    def init_params(self, embed_map, count_dict, L):
        """
        Initializes embeddings and context matricies
        """
        prng = RandomState(self.seed)

        # Pre-trained word embedding matrix
        if embed_map != None:
            R = np.zeros((self.K, self.V))
            for i in range(self.V):
                word = count_dict[i]
                if word in embed_map:
                    R[:,i] = embed_map[word]
                else:
                    R[:,i] = embed_map['*UNKNOWN*']
            R = gpu.garray(R)
        else:
            r = np.sqrt(6) / np.sqrt(self.K + self.V + 1)
            R = prng.rand(self.K, self.V) * 2 * r - r
            R = gpu.garray(R)
        bw = gpu.zeros((1, self.V))

        # Context 
        C = 0.01 * prng.randn(self.context, self.K, self.K)
        C = gpu.garray(C)

        # Image context
        M = 0.01 * prng.randn(self.h, self.K)
        M = gpu.garray(M)

        # Hidden layer
        r = np.sqrt(6) / np.sqrt(self.D + self.h + 1)
        J = prng.rand(self.D, self.h) * 2 * r - r
        J = gpu.garray(J)
        bj = gpu.zeros((1, self.h))

        # Initial deltas used for SGD
        deltaR = gpu.zeros(np.shape(R))
        deltaC = gpu.zeros(np.shape(C))
        deltaB = gpu.zeros(np.shape(bw))
        deltaM = gpu.zeros(np.shape(M))
        deltaJ = gpu.zeros(np.shape(J))
        deltaBj = gpu.zeros(np.shape(bj))

        self.R = R
        self.C = C
        self.bw = bw
        self.M = M
        self.J = J
        self.bj = bj
        self.deltaR = deltaR
        self.deltaC = deltaC
        self.deltaB = deltaB
        self.deltaM = deltaM
        self.deltaJ = deltaJ
        self.deltaBj = deltaBj
class GaussianZ(PhaseSpace):
    """Longitudinal Gaussian particle phase space distribution."""

    def __init__(self, sigma_z, sigma_dp, is_accepted=None, generator_seed=None):
        """Initiates the longitudinal beam coordinates to a given
        Gaussian shape. If the argument is_accepted is set to
        the is_in_separatrix(z, dp, beam) method of a RFSystems
        object (or similar), macroparticles will be initialised
        until is_accepted returns True.
        """
        self.sigma_z = sigma_z
        self.sigma_dp = sigma_dp
        self.is_accepted = is_accepted

        self.random_state = RandomState()
        self.random_state.seed(generator_seed)

    @classmethod
    def from_optics(cls, beta_z, epsn_z, p0, is_accepted=None,
                    generator_seed=None):
        """Initialise GaussianZ from the given optics functions.
        For the argument is_accepted see __init__.
        """

        sigma_z = np.sqrt(beta_z*epsn_z/(4*np.pi) * e/p0)
        sigma_dp = sigma_z / beta_z

        return cls(sigma_z, sigma_dp, is_accepted, generator_seed)

    def generate(self, beam):
        beam.z = self.sigma_z * self.random_state.randn(beam.n_macroparticles)
        beam.dp = self.sigma_dp * self.random_state.randn(beam.n_macroparticles)
        if self.is_accepted:
            self._redistribute(beam)

    def _redistribute(self, beam):
        n = beam.n_macroparticles
        z = beam.z.copy()
        dp = beam.dp.copy()

        mask_out = ~self.is_accepted(z, dp)
        while mask_out.any():
            n_gen = np.sum(mask_out)
            z[mask_out] = self.sigma_z * self.random_state.randn(n_gen)
            dp[mask_out] = self.sigma_dp * self.random_state.randn(n_gen)
            mask_out = ~self.is_accepted(z, dp)
            print 'Reiterate on non-accepted particles'

        # for i in xrange(n):
        #     while not self.is_accepted(z[i], dp[i]):
        #         z[i]  = self.sigma_z * self.random_state.randn()
        #         dp[i] = self.sigma_dp * self.random_state.randn()

        beam.z = z
        beam.dp = dp
Beispiel #9
0
def create_gaussian_distractor(topo, rng=None):
    noise_shape = list(topo.shape)
    # same noise on all chans
    noise_shape[1] = 1
    if rng is None:
        rng = RandomState(329082938)
    return rng.randn(*noise_shape)
Beispiel #10
0
def create_gaussian_distractor(topo, rng=None):
    noise_shape = list(topo.shape)
    # same noise on all chans
    noise_shape[1] = 1
    if rng is None:
        rng = RandomState(329082938)
    return rng.randn(*noise_shape)
Beispiel #11
0
def test_glmmexpfam_precise():
    nsamples = 10

    random = RandomState(0)
    X = random.randn(nsamples, 5)
    K = linear_eye_cov().value()
    QS = economic_qs(K)

    ntri = random.randint(1, 30, nsamples)
    nsuc = [random.randint(0, i) for i in ntri]

    glmm = GLMMExpFam(nsuc, ["binomial", ntri], X, QS)
    glmm.beta = asarray([1.0, 0, 0.5, 0.1, 0.4])

    glmm.scale = 1.0
    assert_allclose(glmm.lml(), -44.74191041468836, atol=ATOL, rtol=RTOL)
    glmm.scale = 2.0
    assert_allclose(glmm.lml(), -36.19907331929086, atol=ATOL, rtol=RTOL)
    glmm.scale = 3.0
    assert_allclose(glmm.lml(), -33.02139830387104, atol=ATOL, rtol=RTOL)
    glmm.scale = 4.0
    assert_allclose(glmm.lml(), -31.42553401678996, atol=ATOL, rtol=RTOL)
    glmm.scale = 5.0
    assert_allclose(glmm.lml(), -30.507029479473243, atol=ATOL, rtol=RTOL)
    glmm.scale = 6.0
    assert_allclose(glmm.lml(), -29.937569702301232, atol=ATOL, rtol=RTOL)
    glmm.delta = 0.1
    assert_allclose(glmm.lml(), -30.09977907145003, atol=ATOL, rtol=RTOL)

    assert_allclose(glmm._check_grad(), 0, atol=1e-3, rtol=RTOL)
Beispiel #12
0
    def test_param_cov(self):
        """
        Tests that the 'param_cov' fit_info entry gets the right answer for
        *linear* least squares, where the answer is exact
        """
        rs = RandomState(1234567890)

        a = 2
        b = 100

        x = np.linspace(0, 1, 100)
        # y scatter is amplitude ~1 to make sure covarience is non-negligible
        y = x*a + b + rs.randn(len(x))

        #first compute the ordinary least squares covariance matrix
        X = np.matrix(np.vstack([x, np.ones(len(x))]).T)
        beta = np.linalg.inv(X.T * X) * X.T * np.matrix(y).T
        s2 = np.sum((y - (X * beta).A.ravel())**2) / (len(y) - len(beta))
        olscov = np.linalg.inv(X.T * X) * s2

        #now do the non-linear least squares fit
        mod = models.Linear1D(a, b)
        fitter = fitting.NonLinearLSQFitter()

        fmod = fitter(mod, x, y)

        utils.assert_allclose(fmod.parameters, beta.A.ravel())
        utils.assert_allclose(olscov, fitter.fit_info['param_cov'])
Beispiel #13
0
def get_elec_weights(decoder, dict_, stix_sz=64, n_targets=40):
    '''How frequently are different electrodes used?'''

    stas_norm = np.sum(decoder**2, 0)
    dict_est_2d = np.reshape(dict_, [-1, dict_.shape[-1]])
    var_dict_est = np.squeeze(stas_norm * (dict_est_2d *
                                           (1 - dict_est_2d))).sum(-1)

    dict_size = dict_est_2d.shape[0]

    from numpy.random import RandomState
    prng = RandomState(50)
    dims = [int(320 / stix_sz), int(640 / stix_sz)]
    stims = np.repeat(np.repeat(prng.randn(dims[0], dims[1], 20),
                                stix_sz / 8,
                                axis=0),
                      stix_sz / 8,
                      axis=1)
    stims = np.reshape(stims, [-1, stims.shape[-1]])
    stims = (stims > 0) - 0.5
    stims *= np.expand_dims((decoder.sum(1) != 0).astype(np.float32), 1)

    w_est = _solve_stimulation_cp(var_dict_est, stims / 10, decoder / 10,
                                  dict_est_2d)
    sum_elecs = np.reshape(w_est.sum(1),
                           [dict_.shape[0], dict_.shape[1]]).sum(1)
    elec_weights = dict_.shape[1] * sum_elecs / np.sum(sum_elecs)

    return elec_weights
Beispiel #14
0
def test_glmmexpfam_delta_one_zero():
    random = RandomState(1)
    n = 30
    X = random.randn(n, 6)
    K = dot(X, X.T)
    K /= K.diagonal().mean()
    QS = economic_qs(K)

    ntri = random.randint(1, 30, n)
    nsuc = [random.randint(0, i) for i in ntri]

    glmm = GLMMExpFam(nsuc, ("binomial", ntri), X, QS)
    glmm.beta = asarray([1.0, 0, 0.5, 0.1, 0.4, -0.2])

    glmm.delta = 0
    assert_allclose(glmm.lml(), -113.24570457063275)
    assert_allclose(glmm._check_grad(step=1e-4), 0, atol=1e-2)

    glmm.fit(verbose=False)
    assert_allclose(glmm.lml(), -98.21144899310399, atol=ATOL, rtol=RTOL)
    assert_allclose(glmm.delta, 0, atol=ATOL, rtol=RTOL)

    glmm.delta = 1
    assert_allclose(glmm.lml(), -98.00058169240869, atol=ATOL, rtol=RTOL)
    assert_allclose(glmm._check_grad(step=1e-4), 0, atol=1e-1)

    glmm.fit(verbose=False)

    assert_allclose(glmm.lml(), -72.82680948264196, atol=ATOL, rtol=RTOL)
    assert_allclose(glmm.delta, 0.9999999850988439, atol=ATOL, rtol=RTOL)
Beispiel #15
0
    def init_params(self, embed_map, count_dict, L):
        """
        Initializes embeddings and context matricies
        """
        prng = RandomState(self.seed)

        # Pre-trained word embedding matrix
        if embed_map != None:
            R = np.zeros((self.K, self.V))
            for i in range(self.V):
                word = count_dict[i]
                if word in embed_map:
                    R[:, i] = embed_map[word]


#                else:
#                    R[:,i] = embed_map['*UNKNOWN*']
        else:
            r = np.sqrt(6) / np.sqrt(self.K + self.V + 1)
            R = prng.rand(self.K, self.V) * 2 * r - r

        bw = np.zeros((1, self.V))

        # Context
        C = 0.01 * prng.randn(self.context, self.K, self.K)

        # Image context
        M = 0.01 * prng.randn(self.h, self.K)

        # Hidden layer
        r = np.sqrt(6) / np.sqrt(self.D + self.h + 1)
        J = prng.rand(self.D, self.h) * 2 * r - r
        bj = np.zeros((1, self.h))

        R = theano.shared(R.astype(theano.config.floatX), borrow=True)
        C = theano.shared(C.astype(theano.config.floatX), borrow=True)
        bw = theano.shared(bw.astype(theano.config.floatX), borrow=True)
        M = theano.shared(M.astype(theano.config.floatX), borrow=True)
        J = theano.shared(J.astype(theano.config.floatX), borrow=True)
        bj = theano.shared(bj.astype(theano.config.floatX), borrow=True)

        self.R = R
        self.C = C
        self.bw = bw
        self.M = M
        self.J = J
        self.bj = bj
Beispiel #16
0
def test_qtl_interact_paolo_ex():

    from limix.qtl import st_iscan
    from numpy.random import RandomState
    import pandas as pd
    import scipy as sp
    import scipy.linalg as la
    from limix_core.util.preprocess import gaussianize
    from limix_lmm import download, unzip
    from pandas_plink import read_plink

    random = RandomState(1)

    # download data
    download("http://rest.s3for.me/limix/data_structlmm.zip")
    unzip("data_structlmm.zip")

    # import snp data
    bedfile = "data_structlmm/chrom22_subsample20_maf0.10"
    (bim, fam, G) = read_plink(bedfile, verbose=False)

    # consider the first 100 snps
    snps = G[:100].compute().T

    # define genetic relatedness matrix
    W_R = random.randn(fam.shape[0], 20)
    R = sp.dot(W_R, W_R.T)
    R /= R.diagonal().mean()
    S_R, U_R = la.eigh(R)

    # load phenotype data
    phenofile = "data_structlmm/expr.csv"
    dfp = pd.read_csv(phenofile, index_col=0)
    pheno = gaussianize(dfp.loc["gene1"].values[:, None])

    # define covs
    covs = sp.ones([pheno.shape[0], 1])

    res = st_iscan(snps, pheno, M=covs, verbose=True)

    try:
        assert_allclose(
            res["pv"][:3],
            [0.5621242538994103, 0.7764976679506745, 0.8846952467562864])
        assert_allclose(
            res["beta"][:3],
            [0.08270087514483888, -0.02774487670737916, -0.014210408938382794],
        )
        assert_allclose(
            res["beta_ste"][:3],
            [0.14266417362656036, 0.09773242355610584, 0.09798944635609126],
        )
        assert_allclose(
            res["lrt"][:3],
            [0.3360395236287443, 0.08059131858936965, 0.021030739508237833],
        )
    finally:
        os.unlink("data_structlmm.zip")
        shutil.rmtree("data_structlmm")
Beispiel #17
0
def _fit_apgl(x,
              mask,
              lmbd,
              max_iter=100,
              L=1e-3,
              beta=0.5,
              tol=1e-3,
              print_loss=False):
    """ Proximal based solver for nuclear norm optimization problems.
    Input
    x : np.ndarray
        partially observed matrix.
    mask: np.ndarray
        mask matrix.
    lmbd : float
        penalization coef.
    L : float
        learning rate, default 1e-3.
    beta : float in (0, 1)
        decay coef default 0.5.
    """
    # init
    n1, n2 = x.shape
    rdm = RandomState(123)
    theta = rdm.randn(n1, n2)  # natural parameter
    thetaOld = theta
    alpha = 1
    alphaOld = 0

    # main loop
    loss = _cross_entropy(x, mask, theta) + lmbd * \
        np.linalg.norm(theta, ord='nuc')
    iteration = []
    for i in range(int(max_iter)):
        if print_loss:
            print(f'Epoch {i}, loss {loss:.3f}')
        iteration.append(loss)
        lossOld = loss
        # nesterov extropolation
        A = theta + (alphaOld - 1) / alpha * (theta - thetaOld)
        for _ in range(50):
            S = A - L * _gradient(x, mask, A)
            thetaNew = svt(S, lmbd * L)
            ce = _cross_entropy(x, mask, thetaNew)
            if ce < _bound(x, mask, thetaNew, theta, L):
                break
            else:
                L = beta * L
        thetaOld = theta
        theta = thetaNew
        alphaOld = alpha
        alpha = (1 + np.sqrt(4 + alpha**2)) / 2
        loss = ce + lmbd * np.linalg.norm(theta, ord='nuc')
        if i == max_iter - 1:
            print(f'Reach max iteration {i+1}')
        if np.abs(lossOld - loss) < tol:
            break

    return theta, np.array(iteration)
Beispiel #18
0
def make_phasep(dosim=False):
    """
    Plots the light curves either for Phased Kepler data or simulated Spitzer data
    """
    filel = ['kic1255_phased_transit.txt', 'k2-22_phased_transit.txt']
    pname = ['KIC 1255', 'K2-22']

    randomseeds = [232, 110]
    repErrBar = [0.0005, 0.0002]
    PPeriod = [0.6535538 * 24.,
               9.145872]  ## hours, VanWerkhoven 2015, Sanchis-Ojeda 2015
    samTime = 0.17  ## sampling time for Error bar
    obsTimes = [[-3, 2.5], [-2.4, 1.9]]  ## Start and end times in hr
    plt.close()
    fig, ax = plt.subplots(1, 2, figsize=(9, 3))
    #fig.set_size_inches(10, 6)
    for ind, onef in enumerate(filel):
        dat = ascii.read(onef,
                         names=['phase', 'flux', 'error', 'junk'],
                         delimiter=' ',
                         data_start=0)

        dat['phase'] = dat['phase'] - 0.5  ## I like phase 0 as mid-"transit"

        if dosim == True:
            pcolor, ptype = 'blue', '-'
        else:
            pcolor, ptype = 'black', '-'
        ax[ind].plot(dat['phase'], dat['flux'], color=pcolor, linestyle=ptype)
        ax[ind].set_ylim(0.993, 1.002)
        ax[ind].set_xlim(-0.5, 0.5)
        ax[ind].set_xlabel('Orbital Phase')
        if ind == 0:
            ax[ind].set_ylabel('Normalized Flux')
        ax[ind].text(0.18, 0.997, pname[ind], fontweight='bold')

        ## Show the normalization
        ax[ind].plot([-0.5, 0.5], [1, 1], linestyle='--', color='green')

        if dosim == True:
            prng = RandomState(randomseeds[ind])
            timeArr = np.arange(obsTimes[ind][0] / PPeriod[ind],
                                obsTimes[ind][1] / PPeriod[ind],
                                samTime / PPeriod[ind])
            npoint = timeArr.shape[0]
            error = repErrBar[ind] * np.ones(npoint)
            simdata = (np.interp(timeArr, dat['phase'], dat['flux']) +
                       prng.randn(npoint) * repErrBar[ind])
            ax[ind].errorbar(timeArr,
                             simdata,
                             error,
                             fmt='o',
                             alpha=0.4,
                             color='red',
                             markersize=4)

    plt.tight_layout()
    outname = 'transit_profiles.pdf'
    fig.savefig(outname)
def playGame(layers, lr=0.02, batch_size=100, loss_type=0):
    x_ = tf.placeholder(dtype=tf.float32, shape=[None, 2], name="input_x")
    y_ = tf.placeholder(tf.float32, [None, 1], name="input_y")
    w_dict = {}
    b_dict = {}
    input_shape = 2
    output_shape = 1
    for layer in layers:
        dimension_shape = [input_shape, layers[layer]]
        w_dict[layer], b_dict[layer] = get_var(dimension_shape, 0.01)
        #w_dict[layer] = tf.Variable(tf.random_normal([input_shape,layers[layer]]),name='w_' + layer)
        #b_dict[layer] = tf.Variable(tf.zeros(layers[layer],name='b_' + layer))
        input_shape = layers[layer]
    hidden_out = hidden(x_, w_dict, b_dict)
    w_dict['output'] = tf.Variable(tf.random_normal(
        [input_shape, output_shape]),
                                   name='w_output')
    b_dict['bias'] = tf.Variable(tf.zeros(output_shape), name='b_output')
    output = tf.nn.sigmoid(
        tf.matmul(hidden_out, w_dict['output']) + b_dict['bias'])
    if loss_type == 0:  #0:represent the base cross_entroy loss
        loss = -tf.reduce_mean(
            y_ * tf.log(tf.clip_by_value(output, 0.001, 0.999)) +
            (1 - y_) * tf.log(1 - tf.clip_by_value(output, 0.001, 0.999)))
        train_op = tf.train.AdamOptimizer(lr).minimize(loss)
    else:
        loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                       logits=output)
        global_step = tf.Variable(0.0, name='global_step')
        #cross by the decay expotienal to change learning_rate with global_step zoom up
        learning_rate = tf.train.exponential_decay(lr,
                                                   global_step,
                                                   decay_steps=100,
                                                   decay_rate=0.99)
        train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(
            loss)
    tf.add_to_collection('losses', loss)
    loss_all = tf.add_n(tf.get_collection('losses'))

    rdm = RandomState(1)
    X = rdm.randn(1000, 2)
    Y = [[float(int(x1 + x2 + rdm.randint(-10, 10) / batch_size < 2))]
         for x1, x2 in X]
    init = tf.global_variables_initializer()
    with tf.Session(graph=tf.get_default_graph()) as sess:
        sess.run(init)
        for i in range(100):
            batch_start = i * batch_size % X.shape[0]
            batch_end = min(batch_start + batch_size, X.shape[0])
            w_res, b_res, loss_value, _ = sess.run(
                [w_dict, b_dict, loss_all, train_op],
                feed_dict={
                    x_: X[batch_start:batch_end],
                    y_: Y[batch_start:batch_end]
                })
            print("iteration {} loss={}".format(i, loss_value))
            for layer in w_res:
                print("layer {} weight update:\n{}".format(
                    layer, w_res[layer]))
Beispiel #20
0
def test_kron2sum_fit_C1_well_cond_C0_fullrank_unrestricted():
    random = RandomState(0)
    Y = random.randn(5, 2)
    A = random.randn(2, 2)
    A = A @ A.T
    F = random.randn(5, 2)
    G = random.randn(5, 6)
    with pytest.warns(UserWarning):
        lmm = Kron2Sum(Y, A, F, G, rank=2, restricted=False)
    lml0 = lmm.lml()
    lmm.fit(verbose=False)
    lml1 = lmm.lml()
    assert_allclose([lml0, lml1], [-18.201106294121434, -11.853021889285362])
    grad = lmm.gradient()
    vars = grad.keys()
    assert_allclose(concatenate([grad[var] for var in vars]), [0] * 7,
                    atol=1e-2)
Beispiel #21
0
def test_theano_binary_asgd_converges_to_truth():
    n_features = 5

    rstate = RandomState(42)

    true_weights = rstate.randn(n_features)
    true_bias = rstate.randn() * 0
    def draw_data(N=20):
        X = rstate.randn(N, 5)
        labels = np.sign(np.dot(X, true_weights) + true_bias).astype('int')
        return X, labels

    clf = TheanoBinaryASGD(n_features,
            rstate=rstate,
            sgd_step_size0=0.1,
            dtype='float64',
            l2_regularization = 1e-4,
            sgd_step_size_scheduling_exponent=0.5,
            sgd_step_size_scheduling_multiplier=1.0)

    clf.determine_sgd_step_size0(*draw_data(200))

    Tmax = 300
    eweights = np.zeros(Tmax)
    ebias = np.zeros(Tmax)

    for i in xrange(Tmax):
        X, labels = draw_data()
        # toss in some noise
        labels[0:3] = -1
        labels[3:6] = 1
        clf.partial_fit(X, labels)
        eweights[i] = 1 - np.dot(true_weights, clf.asgd_weights) / \
                (np.sqrt((true_weights ** 2).sum() * (clf.asgd_weights **
                    2).sum()))
        ebias[i] = (true_bias - clf.asgd_bias)**2

    if 0:
        import matplotlib.pyplot as plt
        plt.plot(np.arange(Tmax), eweights, label='weights cos')
        plt.plot(np.arange(Tmax), ebias, label='bias sse')
        plt.legend()
        plt.show()

    assert eweights[50:].max() < 0.010, eweights[50:].max()
    assert ebias[50:].max() < 0.010, ebias[50:].max()
Beispiel #22
0
def test_kron2sum_fit_ill_conditioned_unrestricted():
    random = RandomState(0)
    n = 30
    Y = random.randn(n, 3)
    A = random.randn(3, 3)
    A = A @ A.T
    F = random.randn(n, 2)
    G = random.randn(n, 4)
    lmm = Kron2Sum(Y, A, F, G, restricted=False)
    lml0 = lmm.lml()
    lmm.fit(verbose=False)
    lml1 = lmm.lml()
    assert_allclose([lml0, lml1], [-154.73966241953627, -122.97307227633186])
    grad = lmm.gradient()
    vars = grad.keys()
    assert_allclose(concatenate([grad[var] for var in vars]), [0] * 9,
                    atol=1e-2)
Beispiel #23
0
def test_kron2sum_fit_C1_well_cond_unrestricted():
    random = RandomState(0)
    Y = random.randn(5, 2)
    A = random.randn(2, 2)
    A = A @ A.T
    F = random.randn(5, 2)
    G = random.randn(5, 6)
    lmm = Kron2Sum(Y, A, F, G, restricted=False)
    lml0 = lmm.lml()
    lmm.fit(verbose=False)
    lml1 = lmm.lml()
    assert_allclose([lml0, lml1], [-17.87016217772149, -11.853022179263597],
                    rtol=1e-5)
    grad = lmm.gradient()
    vars = grad.keys()
    assert_allclose(concatenate([grad[var] for var in vars]), [0] * 5,
                    atol=1e-2)
def test_gradient():
    random = RandomState(1)
    mean = LinearMean(5)
    effsizes = random.randn(5)
    mean.effsizes = effsizes

    x = random.randn(5)

    def func(x0):
        mean.effsizes[0] = x0[0]
        return mean.value(x)

    def grad(x0):
        mean.effsizes[0] = x0[0]
        return [mean.derivative_effsizes(x)[0]]

    assert_almost_equal(check_grad(func, grad, [1.2]), 0, decimal=6)
Beispiel #25
0
def test_qtl_scan_glmm_wrong_dimensions():
    random = RandomState(0)
    nsamples = 25

    X = random.randn(nsamples, 2)
    G = random.randn(nsamples, 100)
    K = dot(G, G.T)
    ntrials = random.randint(1, 100, nsamples)
    z = dot(G, random.randn(100)) / sqrt(100)

    successes = zeros(len(ntrials), int)
    for i, nt in enumerate(ntrials):
        for _ in range(nt):
            successes[i] += int(z[i] + 0.5 * random.randn() > 0)

    M = random.randn(49, 2)
    scan(X, successes, ("binomial", ntrials), K, M=M, verbose=False)
Beispiel #26
0
def main(n):
    rng = RandomState(42)
    noise1 = rng.randn(n)

    TA = np.arange(n, dtype=np.int32)
    A = np.sin(TA) + np.sin(TA / 10) + noise1

    m = 2 * n
    noise2 = rng.randn(m)
    TB = np.arange(m, dtype=np.int32)
    B = np.sin(TB) + np.sin(TB / 10) + noise2

    f = open('reference_arrays.h', 'w')

    s = ''
    s = '#ifndef REAL_t'
    s = '#define REAL_t double'
    s = '#endif'
    s = '\n\n'

    s += 'int nA = {};\n'.format(n)
    s += 'REAL_t TA[{}] = {{'.format(n)
    s += ', '.join([str(x) + '.' for x in TA])
    s += '};\n\n'
    f.write(s)

    s = ''
    s += 'int nB = {};\n'.format(m)
    s += 'REAL_t TB[{}] = {{'.format(m)
    s += ', '.join([str(x) + '.' for x in TB])
    s += '};\n\n'
    f.write(s)

    s = ''
    s += 'REAL_t A[{}] = {{'.format(n)
    s += ','.join([str(x) for x in A])
    s += '};\n\n'
    f.write(s)

    s = ''
    s += 'REAL_t B[{}] = {{'.format(m)
    s += ','.join([str(x) for x in B])
    s += '};\n\n'
    f.write(s)

    f.close()
Beispiel #27
0
def test_cross_entropy_l2_regularization():
    random_state = RandomState(1)
    Y = np.array([[1, 1, 0, 1, 0]])
    W1 = random_state.randn(2, 3)
    b1 = random_state.randn(2, 1)
    W2 = random_state.randn(3, 2)
    b2 = random_state.randn(3, 1)
    W3 = random_state.randn(1, 3)
    b3 = random_state.randn(1, 1)
    parameters = {'W': {1: W1, 2: W2, 3: W3}, 'b': {1: b1, 2: b2, 3: b3}}
    cost = cross_entropy(np.array(
        [[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]]),
                         Y,
                         parameters,
                         alpha=0.1)

    assert_allclose(cost, 1.78648594516)
Beispiel #28
0
def test_dotd():
    random = RandomState(958)
    A = random.randn(10, 2)
    B = random.randn(2, 10)

    r = A.dot(B).diagonal()
    assert_allclose(r, dotd(A, B))
    r1 = empty(len(r))
    assert_allclose(dotd(A, B, out=r1), r)

    a = random.randn(2)
    b = random.randn(2)
    c = array(0.0)

    assert_allclose(dotd(a, b), -1.05959423672)
    dotd(a, b, out=c)
    assert_allclose(c, -1.05959423672)
Beispiel #29
0
def test_sum2diag():
    random = RandomState(0)
    A = random.randn(2, 2)
    b = random.randn(2)

    C = A.copy()
    C[0, 0] = C[0, 0] + b[0]
    C[1, 1] = C[1, 1] + b[1]

    assert_allclose(sum2diag(A, b), C)

    want = array([[2.76405235, 0.40015721], [0.97873798, 3.2408932]])
    assert_allclose(sum2diag(A, 1), want)

    D = empty((2, 2))
    sum2diag(A, b, out=D)
    assert_allclose(C, D)
Beispiel #30
0
def test_gp_value_1():
    random = RandomState(94584)
    N = 50
    X = random.randn(N, 100)
    offset = 0.5

    mean = OffsetMean(N)
    mean.offset = offset
    mean.fix_offset()

    cov = LinearCov(X)
    cov.scale = 1.0

    y = random.randn(N)

    gp = GP(y, mean, cov)
    assert_allclose(gp.value(), -153.623791551399108)
Beispiel #31
0
def test_qtl_scan_glmm_bernoulli_nokinship():
    random = RandomState(0)
    nsamples = 25

    X = random.randn(nsamples, 2)
    G = random.randn(nsamples, 100)
    ntrials = random.randint(1, 2, nsamples)
    z = dot(G, random.randn(100)) / sqrt(100)

    successes = zeros(len(ntrials), int)
    for i, nt in enumerate(ntrials):
        for _ in range(nt):
            successes[i] += int(z[i] + 0.5 * random.randn() > 0)

    result = scan(X, successes, "bernoulli", verbose=False)
    pv = result.stats["pv20"]
    assert_allclose(pv, [0.3399067917883736, 0.8269568797830423], rtol=1e-5)
Beispiel #32
0
def test_model_backward_l2_regularization():
    random_state = RandomState(1)
    X = random_state.randn(3, 5)
    Y = np.array([[1, 1, 0, 1, 0]])
    cache = (
        np.array([[-1.52855314,  3.32524635,  2.13994541,  2.60700654, -0.75942115],
                  [-1.98043538,  4.1600994,  0.79051021,  1.46493512, -0.45506242]]),
        np.array([[0.,  3.32524635,  2.13994541,  2.60700654,  0.],
                  [0.,  4.1600994,  0.79051021,  1.46493512,  0.]]),
        np.array([[-1.09989127, -0.17242821, -0.87785842],
                  [0.04221375,  0.58281521, -1.10061918]]),
        np.array([[1.14472371],
                  [0.90159072]]),
        np.array([[0.53035547,  5.94892323,  2.31780174,  3.16005701,  0.53035547],
                  [-0.69166075, -3.47645987, -2.25194702, -2.65416996, -0.69166075],
                  [-0.39675353, -4.62285846, -2.61101729, -3.22874921, -0.39675353]]),
        np.array([[0.53035547,  5.94892323,  2.31780174,  3.16005701,  0.53035547],
                  [0.,  0.,  0.,  0.,  0.],
                  [0.,  0.,  0.,  0.,  0.]]),
        np.array([[0.50249434,  0.90085595],
                  [-0.68372786, -0.12289023],
                  [-0.93576943, -0.26788808]]),
        np.array([[0.53035547],
                  [-0.69166075],
                  [-0.39675353]]),
        np.array(
            [[-0.3771104, -4.10060224, -1.60539468, -2.18416951, -0.3771104]]),
        np.array(
            [[0.40682402,  0.01629284,  0.16722898,  0.10118111,  0.40682402]]),
        np.array([[-0.6871727, -0.84520564, -0.67124613]]),
        np.array([[-0.0126646]])
    )

    Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, _, W3, b3 = cache

    parameters = dict(
            W={1: W1, 2: W2, 3: W3},
            b={1: b1, 2: b2, 3: b3}
            )
    caches = dict(
            Z={1: Z1, 2: Z2, 3: Z3},
            A={0: X, 1: A1, 2: A2, 3: sigmoid(Z3)},
            D={0: 1, 1: 1, 2: 1}
            )

    AL = caches["A"][3]
    grads = model_backward(AL, Y, parameters, caches, alpha=0.7, keep_prob=1)

    dW1 = np.array([[-0.25604646,  0.12298827, - 0.28297129],
                    [-0.17706303,  0.34536094, - 0.4410571]])
    dW2 = np.array([[0.79276486,  0.85133918],
                    [-0.0957219, - 0.01720463],
                    [-0.13100772, - 0.03750433]])
    dW3 = np.array([[-1.77691347, - 0.11832879, - 0.09397446]])

    assert_allclose(grads['dW'][1], dW1)
    assert_allclose(grads['dW'][2], dW2, rtol=1e-05)
    assert_allclose(grads['dW'][3], dW3)
Beispiel #33
0
    def init_params(self, embed_map, count_dict, L):
        """
        Initializes embeddings and context matricies
        """
        prng = RandomState(self.seed)

        # Pre-trained word embedding matrix
        if embed_map != None:
            R = np.zeros((self.K, self.V))
            for i in range(self.V):
                word = count_dict[i]
                if word in embed_map:
                    R[:,i] = embed_map[word]
#                else:
#                    R[:,i] = embed_map['*UNKNOWN*']
        else:
            r = np.sqrt(6) / np.sqrt(self.K + self.V + 1)
            R = prng.rand(self.K, self.V) * 2 * r - r

        bw = np.zeros((1, self.V))

        # Context 
        C = 0.01 * prng.randn(self.context, self.K, self.K)

        # Image context
        M = 0.01 * prng.randn(self.h, self.K)

        # Hidden layer
        r = np.sqrt(6) / np.sqrt(self.D + self.h + 1)
        J = prng.rand(self.D, self.h) * 2 * r - r
        bj = np.zeros((1, self.h))

        R = theano.shared(R.astype(theano.config.floatX), borrow=True)
        C = theano.shared(C.astype(theano.config.floatX), borrow=True)
        bw = theano.shared(bw.astype(theano.config.floatX), borrow=True)
        M = theano.shared(M.astype(theano.config.floatX), borrow=True)
        J = theano.shared(J.astype(theano.config.floatX), borrow=True)
        bj = theano.shared(bj.astype(theano.config.floatX), borrow=True)

        self.R = R
        self.C = C
        self.bw = bw
        self.M = M
        self.J = J
        self.bj = bj
Beispiel #34
0
def test_qtl_glmm_binomial():
    random = RandomState(0)
    nsamples = 50

    X = random.randn(50, 2)
    G = random.randn(50, 100)
    K = dot(G, G.T)
    ntrials = random.randint(1, 100, nsamples)
    z = dot(G, random.randn(100)) / sqrt(100)

    successes = zeros(len(ntrials), int)
    for i, nt in enumerate(ntrials):
        for _ in range(nt):
            successes[i] += int(z[i] + 0.5 * random.randn() > 0)

    lmm = st_scan(X, successes, ("binomial", ntrials), K, verbose=False)
    pv = lmm.variant_pvalues
    assert_allclose(pv, [0.409114, 0.697728], atol=1e-6, rtol=1e-6)
Beispiel #35
0
def test_mean_kron():

    random = RandomState(0)
    # number of trais
    p = 2
    # number of covariates
    c = 3
    # sample size
    n = 4

    A = random.randn(p, p)
    X = random.randn(n, c)
    B = random.randn(p, c)

    mean = KronMean(A, X)
    mean.B = B
    assert_allclose(mean.value(), kron(A, X) @ ravel(B, order="F"))
    assert_allclose(mean._check_grad(), 0, atol=1e-5)
Beispiel #36
0
def test_gp_gradient():
    random = RandomState(94584)
    N = 50
    X = random.randn(N, 100)
    offset = 0.5

    mean = OffsetMean(N)
    mean.offset = offset
    mean.fix_offset()

    cov = LinearCov(X)
    cov.scale = 1.0

    y = random.randn(N)

    gp = GP(y, mean, cov)

    assert_allclose(gp._check_grad(), 0, atol=1e-5)
Beispiel #37
0
def test_ddot():
    random = RandomState(633)
    A = random.randn(10, 10)
    B = random.randn(10)

    AdB = A.dot(diag(B))
    assert_allclose(AdB, ddot(A, B, left=False))
    assert_allclose(AdB, ddot(A, B, left=False, out=A))

    B = random.randn(10, 10)
    A = random.randn(10)

    AdB = diag(A).dot(B)
    assert_allclose(AdB, ddot(A, B, left=True))
    assert_allclose(AdB, ddot(A, B, left=True, out=B))

    with pytest.raises(ValueError):
        ddot(A, A)
Beispiel #38
0
def test_qtl_scan_glmm_bernoulli():
    random = RandomState(0)
    nsamples = 25

    X = random.randn(nsamples, 2)
    G = random.randn(nsamples, 100)
    K = dot(G, G.T)
    ntrials = random.randint(1, 2, nsamples)
    z = dot(G, random.randn(100)) / sqrt(100)

    successes = zeros(len(ntrials), int)
    for i, nt in enumerate(ntrials):
        for _ in range(nt):
            successes[i] += int(z[i] + 0.5 * random.randn() > 0)

    result = scan(X, successes, "bernoulli", K, verbose=False)
    pv = result.stats["pv20"]
    assert_allclose(pv, [0.3399326545917558, 0.8269454251659921], rtol=1e-5)
Beispiel #39
0
def test_qtl_scan_glmm_binomial():
    random = RandomState(0)
    nsamples = 25

    X = random.randn(nsamples, 2)
    G = random.randn(nsamples, 100)
    K = dot(G, G.T)
    ntrials = random.randint(1, 100, nsamples)
    z = dot(G, random.randn(100)) / sqrt(100)

    successes = zeros(len(ntrials), int)
    for i, nt in enumerate(ntrials):
        for _ in range(nt):
            successes[i] += int(z[i] + 0.5 * random.randn() > 0)

    result = scan(X, successes, ("binomial", ntrials), K, verbose=False)
    pv = result.stats["pv20"]
    assert_allclose(pv, [0.9315770010211236, 0.8457015828837173], atol=1e-6, rtol=1e-6)
Beispiel #40
0
def test_kinship_estimation():
    random = RandomState(0)
    X = random.randn(30, 40)

    K0 = linear_kinship(X, verbose=False)

    X = da.from_array(X, chunks=(5, 13))
    K1 = linear_kinship(X, verbose=False)
    assert_allclose(K0, K1)
Beispiel #41
0
def test_qtl_scan_gmm_binomial():
    random = RandomState(0)
    nsamples = 25

    X = random.randn(nsamples, 2)
    ntrials = random.randint(1, nsamples, nsamples)
    z = dot(X, random.randn(2))

    successes = zeros(len(ntrials), int)
    for i in range(len(ntrials)):
        for _ in range(ntrials[i]):
            successes[i] += int(z[i] + 0.5 * random.randn() > 0)

    result = scan(X, successes, ("binomial", ntrials), verbose=False)
    pv = result.stats["pv20"]
    assert_allclose(
        pv, [2.4604711379400065e-06, 0.01823278752006871], rtol=1e-5, atol=1e-5
    )
Beispiel #42
0
def test_gp_value_2():
    random = RandomState(94584)
    N = 50
    X1 = random.randn(N, 3)
    X2 = random.randn(N, 100)

    mean = LinearMean(X1)

    cov = LinearCov(X2)
    cov.scale = 1.0

    y = random.randn(N)

    gp = GP(y, mean, cov)
    assert_allclose(gp.value(), -153.091074766)

    mean.effsizes = [3.4, 1.11, -6.1]
    assert_allclose(gp.value(), -178.273116338)
Beispiel #43
0
 def setup_class(self):
     self.g1 = models.Gauss1DModel(10, 14.9, xsigma=0.3)
     self.g2 = models.Gauss1DModel(10, 13, xsigma=0.4)
     self.x = np.arange(10, 20, 0.1)
     self.y1 = self.g1(self.x)
     self.y2 = self.g2(self.x)
     rsn = RandomState(1234567890)
     self.n = rsn.randn(100)
     self.ny1 = self.y1 + 2 * self.n
     self.ny2 = self.y2 + 2 * self.n
Beispiel #44
0
def test_canonical_binomial_sampler():
    random = RandomState(9)
    G = random.randn(10, 5)

    y = binomial(5, 0.1, G, random_state=random)
    assert_array_less(y, [5 + 1] * 10)

    ntrials = [2, 3, 1, 1, 4, 5, 1, 2, 1, 1]
    y = binomial(ntrials, -0.1, G, random_state=random)
    assert_array_less(y, [i + 1 for i in ntrials])
Beispiel #45
0
 def setup_class(self):
     self.p1 = models.Polynomial1D(4)
     self.p1.c0 = 0
     self.p1.c1 = 0
     self.p1.window = [0., 9.]
     self.x = np.arange(10)
     self.y = self.p1(self.x)
     rsn = RandomState(1234567890)
     self.n = rsn.randn(10)
     self.ny = self.y + self.n
Beispiel #46
0
 def setup_class(self):
     self.g1 = models.Gaussian1D(10, 14.9, stddev=.3)
     self.g2 = models.Gaussian1D(10, 13, stddev=.4)
     self.x = np.arange(10, 20, .1)
     self.y1 = self.g1(self.x)
     self.y2 = self.g2(self.x)
     rsn = RandomState(1234567890)
     self.n = rsn.randn(100)
     self.ny1 = self.y1 + 2 * self.n
     self.ny2 = self.y2 + 2 * self.n
Beispiel #47
0
def test_stdnorm():
    random = RandomState(38943)
    x = random.randn(10)
    X = random.randn(10, 5)
    x = stdnorm(x)
    X = stdnorm(X, 0)

    assert_allclose(x.mean(0), [0], atol=1e-7)
    assert_allclose(x.std(0), 1, atol=1e-7)

    assert_allclose(X.mean(0), [0] * 5, atol=1e-7)
    assert_allclose(X.std(0), [1] * 5, atol=1e-7)

    x = ones(10)
    X = random.randn(10, 5)
    X[:, 0] = 1

    assert_allclose(stdnorm(x).mean(0), [0])
    assert_allclose(stdnorm(x).std(0), [0])
Beispiel #48
0
    def setUpClass(self):
        self._N = 50
        self._D = 1
        self._M = 2
        self._k0 = 5
        self._k1 = 10
        self._a2 = 0.4
        self._logdelta = +1
        self._beta = SP.array([1.2])

        from numpy.random import RandomState
        randomstate = RandomState(621360)
        self._X = randomstate.randn(self._N,self._D)
        self._G0 = randomstate.randn(self._N,self._k0)
        self._G1 = randomstate.randn(self._N,self._k1)
        self._y = randomstate.randn(self._N)
      
        self._Xstar = randomstate.randn(self._M,self._D)
        self._G0star = randomstate.randn(self._M,self._k0)
        self._G1star = randomstate.randn(self._M,self._k1)
Beispiel #49
0
def test_qtl_normal_scan_covariate_redundance():
    random = RandomState(2)

    N = 50
    G = random.randn(N, N + 100)
    G = stdnorm(G, 0)
    G /= sqrt(G.shape[1])

    p = 5
    X = random.randn(N, p)
    X = stdnorm(X, 0)
    X /= sqrt(X.shape[1])

    u1 = random.randn(N + 100) / sqrt(N + 100)
    u2 = random.randn(p) / sqrt(p)

    y = dot(G, u1) + dot(X, u2)

    X[:] = 1
    qtl = scan(NormalPhenotype(y), X, G=G, progress=False, fast=False)
    assert_allclose(qtl.pvalues(), [1] * p)
Beispiel #50
0
    def _initializeWeights(self, size, fanIn, fanOut, randomNumGen) :
        '''Initialize the weights according to the activation type selected.

           Distributions :
           sigmoid : (-sqrt(6/(fanIn+fanOut))*4, sqrt(6/(fanIn+fanOut))*4)
           tanh    : (-sqrt(6/(fanIn+fanOut)), sqrt(6/(fanIn+fanOut)))
           relu    : (-rand()*sqrt(2/fanIn), rand()*sqrt(2/fanIn))

           size         : Shape of the weight buffer
           fanIn        : Number of neurons in the previous layer
           fanOut       : Number of neurons in the this layer
           randomNumGen : generator for the initial weight values - type is 
                          numpy.random.RandomState
        '''
        import numpy as np
        import theano.tensor as t
        from theano import shared, config

        # create a rng if its needed
        if randomNumGen is None :
           from numpy.random import RandomState
           from time import time
           randomNumGen = RandomState(int(time()))

        if self._activation == t.nnet.relu :
            scaleFactor = np.sqrt(2. / fanIn)
            initialWeights = np.resize(np.asarray(
                randomNumGen.randn(np.prod(np.array(size))) * scaleFactor, 
                dtype=config.floatX), size)

        elif self._activation == t.nnet.sigmoid or \
             self._activation == t.tanh or \
             self._activation == None :

            scaleFactor = np.sqrt(6. / (fanIn + fanOut))

            # re-adjust for sigmoid
            if self._activation == t.nnet.sigmoid :
                scaleFactor *= 4.

            initialWeights = np.asarray(randomNumGen.uniform(
                low=-scaleFactor, high=scaleFactor, size=size),
                dtype=config.floatX)

        else :
            raise ValueError('Unsupported activation encountered. Add weight-'\
                             'initialization support for this activation type')

        # load the weights into shared variables
        self._weights = shared(value=initialWeights, borrow=True)

        initialThresholds = np.zeros((fanOut,), dtype=config.floatX)
        self._thresholds = shared(value=initialThresholds, borrow=True)
Beispiel #51
0
def test_iid_unequal_equiv():
    rs = RandomState(0)
    x = rs.randn(500)
    rs1 = RandomState(0)
    bs1 = IIDBootstrap(x, random_state=rs1)

    rs2 = RandomState(0)
    bs2 = IndependentSamplesBootstrap(x, random_state=rs2)

    v1 = bs1.var(np.mean)
    v2 = bs2.var(np.mean)
    assert_allclose(v1, v2)
def test_phase_equal_after_bandpower_mean():
    rng = RandomState(3098284)
    inputs = rng.randn(50,20,1001,1)
    targets = rng.choice(4, size=50)
    target_arr = np.zeros((50,4))
    target_arr[:,0] = targets == 0
    target_arr[:,1] = targets == 1
    target_arr[:,2] = targets == 2
    target_arr[:,3] = targets == 3
    mod_inputs, mod_targets = BandpowerMeaner().process(inputs, target_arr)
    assert np.allclose(np.angle(np.fft.rfft(inputs, axis=2)),
            np.angle(np.fft.rfft(mod_inputs, axis=2)), rtol=1e-4, atol=1e-5)
    assert np.array_equal(target_arr, mod_targets)                          
Beispiel #53
0
def test_qtl_bernoulli_scan():
    random = RandomState(9)

    N = 500
    G = random.randn(N, N + 100)
    G = stdnorm(G, 0)
    G /= sqrt(G.shape[1])

    p = 2
    X = random.randn(N, p)
    X = stdnorm(X, 0)
    X /= sqrt(X.shape[1])

    outcome = bernoulli(
        -0.1, G, causal_variants=X, causal_variance=0.1, random_state=random)

    qtl = scan(BernoulliPhenotype(outcome), X, G=G, progress=False, fast=False)

    assert_allclose(
        qtl.pvalues(), [
            0.27762911, 0.11432954
        ],
        rtol=1e-4)
Beispiel #54
0
def run_galry(N, dt=1, duration=10, seed=20130318):
    """Return the median time interval between two successive paint refresh."""
    prng = RandomState(seed)
    data = prng.randn(N, 10)

    fig = glplt.figure(figsize=(600, 600), autodestruct=duration * 1000)
    fig.t0 = time.clock()
    fig.times = []
    fig.N = N
    fig.plot(data.T)
    fig.animate(callback, dt=dt * 0.001)
    fig.show()

    return 1.0 / np.median(fig.times)
def test_inplace_normalize():
    ones = np.ones((10, 1))
    rs = RandomState(10)

    for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1,
                                      inplace_csr_row_normalize_l2):
        for dtype in (np.float64, np.float32):
            X = rs.randn(10, 5).astype(dtype)
            X_csr = sp.csr_matrix(X)
            inplace_csr_row_normalize(X_csr)
            assert_equal(X_csr.dtype, dtype)
            if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
                X_csr.data **= 2
            assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
Beispiel #56
0
def test_qtl_normal_scan():
    random = RandomState(2)

    N = 200
    G = random.randn(N, N + 100)
    G = stdnorm(G, 0)
    G /= sqrt(G.shape[1])

    p = 2
    X = random.randn(N, p)
    X = stdnorm(X, 0)
    X /= sqrt(X.shape[1])

    u1 = random.randn(N + 100) / sqrt(N + 100)
    u2 = random.randn(p) / sqrt(p)

    y = dot(G, u1) + dot(X, u2)

    qtl = scan(NormalPhenotype(y), X, G=G, progress=False, fast=False)
    assert_allclose(
        qtl.pvalues(), [
            4.742418e-001, 5.094706e-167
        ],
        rtol=1e-5)
Beispiel #57
0
def test_qtl_poisson_scan():
    random = RandomState(9)

    N = 200
    G = random.randn(N, N + 100)
    G = stdnorm(G, 0)
    G /= sqrt(G.shape[1])

    p = 2
    X = random.randn(N, p)
    X = stdnorm(X, 0)
    X /= sqrt(X.shape[1])

    noccurrences = poisson(
        -0.1, G, causal_variants=X, causal_variance=0.1, random_state=random)

    qtl = scan(PoissonPhenotype(noccurrences), X, G=G, progress=False,
               fast=False)

    assert_allclose(
        qtl.pvalues(), [
            0.8163571597, 0.0849437877
        ],
        rtol=1e-2)
Beispiel #58
0
def plot_dfs_vals(dfs, values_fn=lambda df: df.test):
    '''
    
    :param dfs: 
    :type dfs: Pandas Dataframes
    :param values_fn: Function to extract values (default extract test)
    '''
    if values_fn == 'time':
        # in mintues (nanoseconds to minutes)
        values_fn=lambda df: df.time / (1.0e9 * 60.0)
    rng = RandomState(3483948)
    for i_df, this_df in enumerate(dfs):
        vals = values_fn(this_df)
        plt.plot(i_df + rng.randn(len(vals)) * 0.05, vals, linestyle='None', marker='o',
                alpha=0.5)
def test_radius_serial_vs_parallel(seed=1234):
    rng = RandomState(seed)
    X = rng.randn(100, 10)
    dists = csr_matrix(squareform(pdist(X)))
    sample = range(100)
    d = 3
    rmin = 2
    rmax = 10.0
    ntry = 10
    run_parallel = True
    results_parallel = run_estimate_radius(X, dists, sample, d, rmin, rmax, ntry, run_parallel)
    print(results_parallel)
    results_serial = run_estimate_radius(X, dists, sample, d, rmin, rmax, ntry, False)
    print(results_serial)
    assert_array_almost_equal(results_parallel, results_serial)
Beispiel #60
0
def gen_data(m, n, seed=0):
    prng = RandomState(seed)
    A = prng.randn(m,n)

    ## check that A is full rank
    assert np.linalg.matrix_rank(A) == m
    ## src: http://stackoverflow.com/a/3356123
    u, s, v = np.linalg.svd(A)
    assert np.sum(s > 1e-10) == m

    ## subtracting the mean reduces rank by 1 ...
    # mu=np.mean(A,axis=0)
    # u,s,v=np.linalg.svd(A-mu)
    # np.sum(s > 1e-10) == 99
    #
    # from sklearn.decomposition import PCA
    # sum(PCA().fit(A).explained_variance_ > 1e-10) == 99
    #
    # np.linalg.matrix_rank(A-mu) == 99

    x0 = np.abs(prng.randn(n,1)) # make positive (ie. feasible)
    b = np.dot(A,x0)
    c = prng.rand(n,1)
    return A, b, c, x0