Ejemplo n.º 1
0
def get_means_and_cov(num_vars, iid='clustered', cov_param=None):
    means = np.zeros(num_vars)
    inv_sum = num_vars
    if iid == 'clustered':
        eigs = []
        while len(eigs) < num_vars - 1:
            if inv_sum <= 1e-2:
                eig = 0
            else:
                eig = np.random.uniform(0, inv_sum)
            eigs.append(eig)
            inv_sum -= eig

        eigs.append(num_vars - np.sum(eigs))
        covs = random_correlation.rvs(eigs)
    elif iid == 'spike':
        covs = random_correlation.rvs(
            np.ones(num_vars))  # basically identity with some noise
        covs = covs + 0.5 * np.ones(covs.shape)
    elif iid == 'decay':
        eigs = np.array([1 / ((i + 1)**cov_param) for i in range(num_vars)])
        eigs = eigs * num_vars / eigs.sum()
        covs = random_correlation.rvs(eigs)
    elif iid == 'mult_decay':
        eigs = np.array([cov_param**(i) for i in range(num_vars)])
        eigs = eigs * num_vars / eigs.sum()
        covs = random_correlation.rvs(eigs)
    elif iid == 'lin_decay':
        eigs = np.array([1 / (cov_param**i) for i in range(num_vars)])
        eigs = eigs * num_vars / eigs.sum()
        covs = random_correlation.rvs(eigs)
    return means, covs
Ejemplo n.º 2
0
 def test_reproducibility(self):
     np.random.seed(514)
     eigs = (.5, .8, 1.2, 1.5)
     x = random_correlation.rvs((.5, .8, 1.2, 1.5))
     x2 = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=514)
     expected = np.array([[1., -0.20387311, 0.18366501, -0.04953711],
                          [-0.20387311, 1., -0.24351129, 0.06703474],
                          [0.18366501, -0.24351129, 1., 0.38530195],
                          [-0.04953711, 0.06703474, 0.38530195, 1.]])
     assert_array_almost_equal(x, expected)
     assert_array_almost_equal(x2, expected)
Ejemplo n.º 3
0
 def test_reproducibility(self):
     np.random.seed(514)
     eigs = (.5, .8, 1.2, 1.5)
     x = random_correlation.rvs((.5, .8, 1.2, 1.5))
     x2 = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=514)
     expected = np.array([[1., -0.20387311, 0.18366501, -0.04953711],
                          [-0.20387311, 1., -0.24351129, 0.06703474],
                          [0.18366501, -0.24351129, 1., 0.38530195],
                          [-0.04953711, 0.06703474, 0.38530195, 1.]])
     assert_array_almost_equal(x, expected)
     assert_array_almost_equal(x2, expected)
Ejemplo n.º 4
0
    def test_definition(self):
        # Test the defintion of a correlation matrix in several dimensions:
        #
        # 1. Det is product of eigenvalues (and positive by construction
        #    in examples)
        # 2. 1's on diagonal
        # 3. Matrix is symmetric

        def norm(i, e):
            return i*e/sum(e)

        np.random.seed(123)

        eigs = [norm(i, np.random.uniform(size=i)) for i in range(2, 6)]
        eigs.append([4,0,0,0])

        ones = [[1.]*len(e) for e in eigs]
        xs = [random_correlation.rvs(e) for e in eigs]

        # Test that determinants are products of eigenvalues
        #   These are positive by construction
        # Could also test that the eigenvalues themselves are correct,
        #   but this seems sufficient.
        dets = [np.fabs(np.linalg.det(x)) for x in xs]
        dets_known = [np.prod(e) for e in eigs]
        assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13)

        # Test for 1's on the diagonal
        diags = [np.diag(x) for x in xs]
        for a, b in zip(diags, ones):
            assert_allclose(a, b, rtol=1e-13)

        # Correlation matrices are symmetric
        for x in xs:
            assert_allclose(x, x.T, rtol=1e-13)
Ejemplo n.º 5
0
    def test_definition(self):
        # Test the defintion of a correlation matrix in several dimensions:
        #
        # 1. Det is product of eigenvalues (and positive by construction
        #    in examples)
        # 2. 1's on diagonal
        # 3. Matrix is symmetric

        def norm(i, e):
            return i * e / sum(e)

        np.random.seed(123)

        eigs = [norm(i, np.random.uniform(size=i)) for i in range(2, 6)]
        eigs.append([4, 0, 0, 0])

        ones = [[1.] * len(e) for e in eigs]
        xs = [random_correlation.rvs(e) for e in eigs]

        # Test that determinants are products of eigenvalues
        #   These are positive by construction
        # Could also test that the eigenvalues themselves are correct,
        #   but this seems sufficient.
        dets = [np.fabs(np.linalg.det(x)) for x in xs]
        dets_known = [np.prod(e) for e in eigs]
        assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13)

        # Test for 1's on the diagonal
        diags = [np.diag(x) for x in xs]
        for a, b in zip(diags, ones):
            assert_allclose(a, b, rtol=1e-13)

        # Correlation matrices are symmetric
        for x in xs:
            assert_allclose(x, x.T, rtol=1e-13)
Ejemplo n.º 6
0
def random_cov(ranges, O_std_min=1e-2, O_std_max=1, n_modes=1, mpi_warn=True):
    """
    Returns a random covariance matrix, with standard deviations sampled log-uniformly
    from the length of the parameter ranges times ``O_std_min`` and ``O_std_max``, and
    uniformly sampled correlation coefficients between ``rho_min`` and ``rho_max``.

    The output of this function can be used directly as the value of the option ``cov`` of
    the :class:`likelihoods.gaussian`.

    If ``n_modes>1``, returns a list of such matrices.
    """
    if not is_main_process() and mpi_warn:
        print("WARNING! "
              "Using with MPI: different process will produce different random results.")
    dim = len(ranges)
    scales = np.array([r[1] - r[0] for r in ranges])
    cov = []
    for i in range(n_modes):
        stds = scales * 10 ** (uniform.rvs(size=dim, loc=np.log10(O_std_min),
                                           scale=np.log10(O_std_max / O_std_min)))
        this_cov = np.diag(stds).dot(
            (random_correlation.rvs(dim * stds / sum(stds)) if dim > 1 else np.eye(1))
                .dot(np.diag(stds)))
        # Symmetrize (numerical noise is usually introduced in the last step)
        cov += [(this_cov + this_cov.T) / 2]
    if n_modes == 1:
        cov = cov[0]
    return cov
Ejemplo n.º 7
0
def get_normal(n_dims=10, seed=111):
    np.random.seed(seed)
    eig = np.random.rand(n_dims)
    eig *= n_dims / eig.sum()
    precision_matrix = 0.1 * random_correlation.rvs(eig)
    mu = np.zeros(n_dims)
    fn_grad = lambda theta: - (theta - mu) @ precision_matrix
    return fn_grad, mu, precision_matrix
Ejemplo n.º 8
0
def gen_random_cov(n_dim):
    try:
        eigs = np.random.rand(n_dim)
        eigs = eigs / np.sum(eigs) * eigs.shape[0]
        cov = random_correlation.rvs(eigs)
        cov = np.multiply(cov, np.sqrt(np.outer(eigs, eigs)))
        return cov
    except Exception as e:
        cov = np.random.randn(n_dim, n_dim)
        return np.dot(cov, cov.T) / n_dim
Ejemplo n.º 9
0
    def __init__(self, seed=None, usePort=True, expYield=None):
        """
        :param seed: optional
            if omitted, the seed is randomly generated internally

        init constructs a DUT with a random (equal distributed) number of 10 to 50 measurements "n"
        A random (equal distributed) number of 4,8,12,16,20 or 24  so called "ports"  "p" is generated. Each measurement
        is randomly associated to two not necessarily different ports.
        These ports contribute to the measurement error with a random drift.

        All measurements might be correlated through a nxn covariance matrix, generated through its n randomly
        generated eigenvalues.
        """
        if seed is not None:
            np.random.seed(seed)
        self.usePort = usePort
        self.exp_yield = expYield if expYield is not None else np.random.randint(
            3, 4.4)
        self.numMeas = np.random.randint(10, 50)
        self.numPorts = np.random.randint(1, 6) * 4
        self.lastCal = 0
        self.DutMeasTime = 0
        self.dist_max = 0
        self.ports = [port(count) for count in range(self.numPorts)]
        self.meas = [
            meas(count, np.random.randint(1, self.numPorts),
                 np.random.randint(1, self.numPorts),
                 np.random.randint(50, 5000000) * 1e-06)
            for count in range(self.numMeas)
        ]
        for idx in range(self.numMeas):
            p_a = self.meas[idx].port_a
            p_b = self.meas[idx].port_b
            meas_time = self.meas[idx].meas_time
            self.ports[p_a].set_longest_meas_time(meas_time)
            self.ports[p_b].set_longest_meas_time(meas_time)
            self.DutMeasTime += meas_time

        self.portcountused = 0
        for idx in range(self.numPorts):
            if self.ports[idx].longest_measurement_time > 0:
                self.portcountused += 1

        self.meas_eigenvalue = np.random.random(self.numMeas)
        self.meas_eigenvalue *= self.numMeas / np.sum(self.meas_eigenvalue)
        self.meas_cov = rndcorr.rvs(self.meas_eigenvalue)
        self.meas_noise = np.zeros((self.numMeas, self.numMeas))
        self.port_noise = np.zeros(self.numPorts)
        self.measurement_time = 0
        self.dT_newDut = 0.1
        self.dut_result = True
        self.meas_result = True
        self.errordutcount = 0
        self.errormeascount = 0
Ejemplo n.º 10
0
def test_sphericity_from_inertia_tensors():
    """ Use `scipy.stats.random_correlation` to generate matrices with known
    eigenvalues. Call the `sphericity_from_inertia_tensors` function to operate
    on these matrices, and verify that the returned sphericity agrees with
    an independent calculation of the sphericity
    of the input eigenvalues used to define the matrices.
    """
    spherical_evals = (1., 1., 1.)
    non_spherical_evals = (0.1, 0.9, 2.)

    tensors = []
    tensors.append(random_correlation.rvs(spherical_evals))
    tensors.append(random_correlation.rvs(non_spherical_evals))
    tensors.append(random_correlation.rvs(non_spherical_evals))
    tensors.append(random_correlation.rvs(spherical_evals))
    matrices = np.array(tensors)

    sphericity = sphericity_from_inertia_tensors(matrices)

    correct_non_sphericity = non_spherical_evals[0]/non_spherical_evals[2]
    assert np.allclose(sphericity, (1, correct_non_sphericity, correct_non_sphericity, 1))
def get_means_and_cov(num_vars, fix_eigs=False):
    means = np.zeros(num_vars)
    inv_sum = num_vars
    if fix_eigs == 'iid':
        eigs = [1] * num_vars    
    elif fix_eigs == True:
        if num_vars == 5:
            eigs = [2, 2, 1, 0, 0]
        elif num_vars == 10:
            eigs = [4, 3, 2, 1, 0, 0, 0, 0, 0, 0]
            print(np.sum(eigs))
    else:
        eigs = []
        while len(eigs) < num_vars - 1:
            eig = np.random.uniform(0, inv_sum)
            eigs.append(eig)
            inv_sum -= eig
        eigs.append(inv_sum)
    covs = random_correlation.rvs(eigs)
    covs = random_correlation.rvs(eigs)
    return means, covs
Ejemplo n.º 12
0
def gen_semi_random_cov(model, eps = 0):
    a = np.insert(np.cumsum([model.states[u] * model.states[v] for u, v in model.graph.edgelist]), 0, 0)
    marginals, A = model.infer()
    eigs = np.random.rand(model.weights.shape[0])
    eigs = eigs / np.sum(eigs) * eigs.shape[0]
    cov = random_correlation.rvs(eigs)
    cov -= -np.outer(marginals[:model.weights.shape[0]], marginals[:model.weights.shape[0]])
    rhs = np.outer(marginals[:model.weights.shape[0]], marginals[:model.weights.shape[0]])
    diag = np.diag(marginals[:model.weights.shape[0]] - marginals[:model.weights.shape[0]]**2)
    for x in range(a.shape[0] - 1):
        cov[a[x]:a[x + 1], a[x]:a[x + 1]] = - rhs[a[x]:a[x + 1], a[x]:a[x + 1]]
    cov -= np.diag(np.diag(cov))
    cov += diag + np.diag(np.full(model.weights.shape[0], eps))

    return cov
Ejemplo n.º 13
0
def test_triaxiity_from_inertia_tensors():
    """ Use `scipy.stats.random_correlation` to generate matrices with known
    eigenvalues. Call the `triaxility_from_inertia_tensors` function to operate
    on these matrices, and verify that the returned triaxility agrees with
    an independent calculation of the triaxility
    of the input eigenvalues used to define the matrices.
    """
    triaxial_evals = (2., 0.5, 0.5)
    non_triaxial_evals = (2., 0.9, 0.1)

    tensors = []
    tensors.append(random_correlation.rvs(triaxial_evals))
    tensors.append(random_correlation.rvs(non_triaxial_evals))
    tensors.append(random_correlation.rvs(non_triaxial_evals))
    tensors.append(random_correlation.rvs(triaxial_evals))
    matrices = np.array(tensors)

    triaxility = triaxility_from_inertia_tensors(matrices)

    correct_non_triaxility = (
        (non_triaxial_evals[0]**2-non_triaxial_evals[1]**2) /
        (non_triaxial_evals[0]**2-non_triaxial_evals[2]**2)
        )
    assert np.allclose(triaxility, (1, correct_non_triaxility, correct_non_triaxility, 1))
Ejemplo n.º 14
0
def add_random_correlation(data):
    """
    Step 5
    """
    dims = data.shape[-1]
    evs = np.random.uniform(0.01, 1, size=dims)
    evs = evs / np.sum(evs) * dims
    random_corr_matrix = random_correlation.rvs(evs)
    cholesky_transform = np.linalg.cholesky(random_corr_matrix)
    for i in range(data.shape[0]):
        normal_eq_mean = cholesky_transform.dot(
            data[i].T)  # Generating random MVN (0, cov_matrix)
        normal_eq_mean = normal_eq_mean.transpose()
        normal_eq_mean = normal_eq_mean.transpose()  # Transposing back
        data[i] = normal_eq_mean.T
    return data
Ejemplo n.º 15
0
def generate_synthetic_data(data_dim,
                            data_segments,
                            n_obs_seg,
                            n_layer,
                            simulationMethod='TCL',
                            seed=1,
                            one_hot_labels=False,
                            varyMean=False):
    np.random.seed(seed)
    if simulationMethod.lower() == 'tcl':
        dat_all = gen_TCL_data_ortho(Ncomp=data_dim,
                                     Nsegment=data_segments,
                                     Nlayer=n_layer,
                                     NsegmentObs=n_obs_seg,
                                     source='Gaussian',
                                     NonLin='leaky',
                                     negSlope=.2,
                                     seed=seed,
                                     varyMean=varyMean)
    elif simulationMethod.lower() == 'imca':
        baseEvals = np.random.rand(data_dim)
        baseEvals /= ((1. / data_dim) * baseEvals.sum())
        baseCov = random_correlation.rvs(baseEvals)

        dat_all = gen_IMCA_data(Ncomp=data_dim,
                                Nsegment=data_segments,
                                Nlayer=n_layer,
                                NsegmentObs=n_obs_seg,
                                NonLin='leaky',
                                negSlope=.2,
                                BaseCovariance=baseCov,
                                seed=seed)
    else:
        raise ValueError(
            'invalid simulation method: {}'.format(simulationMethod))
    x = dat_all['obs']
    if one_hot_labels:
        y = to_one_hot(dat_all['labels'])[0]
    else:
        y = dat_all['labels']
    s = dat_all['source']

    return x, y, s
Ejemplo n.º 16
0
def test_NeighbourCovariance():
    from scipy.stats import random_correlation as randc
    from numpy.random import multivariate_normal
    import numpy as np
    
    import matplotlib.pyplot as plt
    
    nvert=200
    
    np.random.seed(514)
    x = randc.rvs((.2, 1.8))
    print(x)
    coords = multivariate_normal([1.,2.], x, size=nvert)
    
    print('coords in',coords.shape)
    
    #plt.scatter(coords[:,0],coords[:,1])
    #plt.show()
    
    import tensorflow as tf
    from neighbour_covariance_op import NeighbourCovariance
    
    coordinates = tf.constant(coords,dtype='float32')
    n_idxs = tf.tile(tf.expand_dims(tf.range(nvert),axis=0),[nvert,1])
    print(n_idxs)
    distsq = tf.cast(n_idxs, dtype='float32')/100.
    features = tf.constant(np.random.rand(nvert,2),dtype='float32')+1e-2
    
    cov, mean_C = NeighbourCovariance(coordinates, distsq, features, n_idxs)
    
    #expected shapes: V x F x 2 * 2, V x F x 2
    print(cov.shape, mean_C.shape)
    
    print('op neighbour covariance and mean')
    print(cov[0],'\n',mean_C[0]) #all the same
    
    print('numpy neighbour covariance')
    print(np.cov(coords, aweights=features[:,0]*np.exp(-distsq[0]), rowvar=False, ddof=0))
    print(np.cov(coords, aweights=features[:,1]*np.exp(-distsq[0]), rowvar=False, ddof=0))
Ejemplo n.º 17
0
def test_principal_axes_from_inertia_tensors1():
    """ Starting from 500 random positive definite symmetric matrices,
    enforce that the axes returned by the _principal_axes_from_inertia_tensors function
    are actually eigenvectors with the correct eigenvalue.
    """
    npts = int(500)
    correct_evals = (0.3, 0.7, 2.0)
    matrices = np.array([random_correlation.rvs(correct_evals) for __ in range(npts)])
    assert matrices.shape == (npts, 3, 3)

    principal_axes, evals = principal_axes_from_inertia_tensors(matrices)

    assert np.shape(principal_axes) == (npts, 3)
    assert np.shape(evals) == (npts, )
    assert np.allclose(evals, correct_evals[2])

    for i in range(npts):
        m = matrices[i, :, :]
        x = principal_axes[i, 0]
        y = principal_axes[i, 1]
        z = principal_axes[i, 2]
        p = np.array((x, y, z))
        q = np.matmul(m, p)
        assert np.allclose(q, correct_evals[2]*p)
Ejemplo n.º 18
0
    def __init__(self, seed=None, usePort=True, expYield=None):
        """

        :param seed: optional
            if omitted, the seed is randomly generated internally

        init constructs a DUT with a random (equal distributed) number of 10 to 50 measurements "n"
        A random (equal distributed) number of 4,8,12,16,20 or 24  so called "ports"  "p" is generated. Each measurement
        is randomly associated to two not necessarily different ports.
        These ports contribute to the measurement error with a random drift.

        All measurements might be correlated through a nxn covariance matrix, generated through its n randomly
        generated eigenvalues.
        """
        if seed is not None:
            np.random.seed(seed)

        self.usePort = usePort
        self.exp_yield = expYield if expYield is not None else np.random.randint(
            3, 4.4)
        self.numMeas = np.random.randint(10, 50)
        self.numPorts = np.random.randint(1, 6) * 4
        self.lastCal = 0
        self.DutMeasTime = 0
        self.dist_max = 0

        # list of ports with uncorrelated random walk generators
        self.ports = [port(count) for count in range(self.numPorts)]

        self.meas = [
            meas(count, np.random.randint(1, self.numPorts),
                 np.random.randint(1, self.numPorts),
                 np.random.randint(50, 5000000) * 1e-6)
            for count in range(self.numMeas)
        ]
        for idx in range(self.numMeas):
            p_a = self.meas[idx].port_a
            p_b = self.meas[idx].port_b
            meas_time = self.meas[idx].meas_time
            self.ports[p_a].set_longest_meas_time(meas_time)
            self.ports[p_b].set_longest_meas_time(meas_time)
            self.DutMeasTime += meas_time

        # count used ports
        self.portcountused = 0
        for idx in range(self.numPorts):
            if self.ports[idx].longest_measurement_time > 0:
                self.portcountused += 1

        # generate random eigenvalues for covariance matrix ...
        self.meas_eigenvalue = np.random.random(self.numMeas)
        # ... and normalise it
        self.meas_eigenvalue *= self.numMeas / np.sum(self.meas_eigenvalue)
        # generate covarance matrix based on random eigenvector
        self.meas_cov = rndcorr.rvs(self.meas_eigenvalue)

        self.meas_noise = np.zeros((self.numMeas, self.numMeas))
        self.port_noise = np.zeros(self.numPorts)

        # instantiate p biased ports errors
        #self.ports = [RandomWalk(count) for count in range(self.numPorts)]

        # measurement time
        self.measurement_time = 0
        self.dT_newDut = 0.1
        self.dut_result = True
        self.meas_result = True

        self.errordutcount = 0
        self.errormeascount = 0

        self.defected_meas = np.random.choice(range(self.numMeas),
                                              np.random.randint(
                                                  0, self.numMeas // 8),
                                              replace=False)
        self.neg_corr = [(i, j) for i, j in zip(
            self.defected_meas,
            np.random.choice(
                list(set(range(self.numMeas)) -
                     set(self.defected_meas)), len(self.defected_meas)))]
        self.pos_corr = [(i, j) for i, j in zip(
            self.defected_meas,
            np.random.choice(
                list(
                    set(range(self.numMeas)) - set(self.defected_meas) -
                    set([k[1]
                         for k in self.neg_corr])), len(self.defected_meas)))]
Ejemplo n.º 19
0
    def simulate(self):
        self.simulated = True
        # Constants
        ascale = np.arange(0.1, 0.51, 0.01)
        dt = 1 / self.srate
        nsamples = len(self.time)
        min_state = np.argmin(
            np.abs(self.time - 0.15)) + 1  # Minimum duration in frames

        # - strurctural links (binary mask)
        I = np.eye(self.n)
        if self.SC is None:
            SClinks = 0.8  # Markov et al. 2012
            UT = np.triu(
                np.random.choice([0, 1],
                                 self.n**2,
                                 replace=True,
                                 p=[1 - SClinks,
                                    SClinks]).reshape(self.n, self.n))
            MK = UT + UT.T - np.diag(np.diag(UT + UT.T))
            self.SC = MK + I
        else:
            MK = np.copy(self.SC)
            MK[np.diag_indices(self.n)] = 0

        # - directed interactions (binary mask)
        FC = np.zeros(self.SC.shape)  # HERE USE SC.shape instead of MK.shape
        MKconnections = np.array(np.where(MK))
        num_connections = len(MKconnections[0])
        ids = np.random.choice(num_connections,
                               int(
                                   np.fix(
                                       (1 - self.sparsity) * num_connections)),
                               replace=False)
        FC[np.array(MKconnections)[0, ids],
           np.array(MKconnections)[1, ids]] = 1
        self.FC = FC + I

        # - AR process (univariate)
        self.AR = np.zeros((self.n, self.n, self.popt, nsamples))

        for i in range(self.n):
            c1 = np.random.choice(ascale, 1)
            c2 = (np.max(ascale) - c1) * .95
            self.AR[i, i, 0:2, :] = np.tile(
                np.concatenate((c1, c2)).reshape(1, -1).T,
                (1, 1, nsamples))  # Low Pass
            #self.AR[i,i,0:2,:] = np.tile(np.concatenate((c1,c2)).reshape(1,-1,order='F').T,(1,1,nsamples))  # Low Pass

        # - AR process (univariate)
        cON = np.where(MK * self.FC)
        m = nsamples // min_state
        bf = np.arange(m * min_state).reshape(min_state, m, order='F')
        start_at = np.sort(
            np.random.choice(np.arange(2, m), self.nstates, replace=False))
        state_ons = bf[0, start_at]
        state_end = np.concatenate([bf[0, start_at[1:]], [nsamples]])
        state_dur = state_end - state_ons

        # Determine states
        self.regimes = dict()
        # Starting (no scaling)
        scalef = 1
        for k in range(self.nstates):
            self.regimes[k] = np.arange(state_ons[k],
                                        state_ons[k] + state_dur[k])
            summary = np.ones((len(cON[0]), 9)) * np.nan
            while True:
                # generate off-diag AR and check stability
                tmpAR = self.AR[:, :, :, self.regimes[k][0]]
                for ij in range(len(cON[0])):
                    i, j = cON[0][ij], cON[1][ij]
                    ij_p = np.random.choice(self.delay, 1)[0]
                    ampl1 = np.random.choice(ascale * .5, 1)
                    ampl2 = (np.max(ascale * 0.5) - ampl1) * 0.95
                    osc = np.sign(np.random.randn(2)) * np.concatenate(
                        (ampl1, ampl2)) * scalef
                    summary[ij, :] = np.concatenate(([k, i, j], ampl1, [
                        self.time[self.regimes[k][0]],
                        self.time[self.regimes[k][-1]]
                    ], osc, [ij_p]))
                    tmpAR[i, j, ij_p:ij_p + 2] = osc

                # Stability check
                blockA = np.zeros((self.n * self.popt, self.n * self.popt))
                blockA[:self.n, :] = tmpAR.reshape(tmpAR.shape[0],
                                                   -1,
                                                   order='F')
                blockA[self.n:, :(self.popt - 1) * self.n] = np.eye(
                    (self.popt - 1) * self.n)
                blockA[self.n:, (self.popt - 1) * self.n:] = np.zeros(
                    ((self.popt - 1) * self.n, self.n))

                w, v = np.linalg.eig(blockA)
                if any(abs(w) > .95):
                    scalef = scalef * .95
                else:
                    break
            if k == 0:
                self.summary = np.copy(summary)
            else:
                self.summary = np.concatenate((self.summary, summary), 0)

            # Add stable matrices to the dynamical system
            self.AR[:, :, :, self.regimes[k]] = np.tile(
                tmpAR.reshape(self.n, self.n, self.popt, 1),
                [1, 1, 1, len(self.regimes[k])])

        # - Data (add nuisance segment at the beginning)
        nuisance = np.min([state_ons[0] * 2, int(.5 / dt)])
        self.X = np.zeros((self.ntrials, self.n, nsamples + nuisance))
        ARplus = np.concatenate((self.AR[:, :, :, :nuisance], self.AR), 3)
        # simulate between-trials correlation (correlated generative noise)
        while True:
            try:
                CTeigvals = np.random.rand(self.ntrials)
                CTeigvals = CTeigvals / np.sum(CTeigvals) * self.ntrials
                CT = abs(random_correlation.rvs(CTeigvals)) * 3
                break
            except:
                pass

        self.CT = CT.clip(max=1)
        dgI = np.random.choice(
            np.where(np.eye(self.ntrials).reshape(-1) == 0)[0],
            int((self.ntrials**2 - self.ntrials) * .1),
            replace=False)
        NegCT = np.ones(self.ntrials * self.ntrials)
        NegCT[dgI] = -1
        self.CT *= NegCT.reshape(self.ntrials, self.ntrials)

        # - Generate time-series
        for k_p in range(self.AR.shape[2]):  # the actual or p_opt
            self.X[:, :, k_p] = self.CT @ np.random.randn(self.ntrials, self.n)

        self.E = np.copy(self.X)

        for k in range(self.AR.shape[2] + 1, nsamples + nuisance):
            innovation = self.CT @ np.random.randn(
                self.ntrials, self.n)  # across trials correlation
            self.E[:, :, k] = innovation
            self.X[:, :, k] += innovation
            for l in range(self.AR.shape[2]):
                self.X[:, :,
                       k] += (ARplus[:, :, l, k] @ self.X[:, :, k - l].T).T

        self.X = self.X[:, :, nuisance:]  # remove nuisance data
        self.Y = np.copy(self.X)  # obseved signal
        self.E = self.E[:, :, nuisance:]  # innovation
        self.AR = self.AR[:, :, :, :nsamples]  # ensure size, AR coeffs

        tmp = self.E.transpose(1, 2, 0).reshape(self.n, -1, order='F')
        self.R = tmp @ tmp.T

        # - SNR
        if self.snr_db is not None:
            self.addnoise('w')
        else:
            self.noise = 0

        self.Y_pre_lm = np.copy(self.Y)
        # - Linear mixing
        if self.lmix > 0:
            if self.DM is None:
                x = np.random.choice(np.arange(1, 151), self.n,
                                     replace=False)  # 2d lattice 15x15cm
                y = np.random.choice(np.arange(1, 151), self.n, replace=False)
                mixf = norm.pdf(np.arange(0, 151), 0, self.lmix)
                xy = np.concatenate(([x], [y]), axis=0)
                self.DM = squareform(pdist(xy.T))

            else:
                mixf = norm.pdf(np.arange(0, np.max(self.DM)), 0, self.lmix)

            self.LMx = norm.pdf(self.DM, 0, self.lmix) / np.max(mixf)
            self.Y = np.matmul(np.tile(self.LMx, [self.ntrials, 1, 1]), self.Y)
        else:
            self.DM = np.ones((self.n, self.n)) * np.nan
            self.LMx = np.zeros((self.n, self.n))
        self.scaling = scalef
Ejemplo n.º 20
0
def random_correlation(d, a, seed):
    np.random.seed(seed)
    eigs = np.diff(sorted(np.r_[0, np.random.rand(d - 1), 1])) * d
    assert np.allclose(sum(eigs), d) and len(eigs) == d
    return _random_correlation.rvs(eigs), np.array([a] * d)
Ejemplo n.º 21
0
import math
import random
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import random_correlation as rndcorr

exp_yield = 2
numMeas = np.random.randint(10, 50)
numPorts = np.random.randint(1, 6) * 4
meas_eigenvalue = np.random.random(numMeas)
meas_eigenvalue *= numMeas / np.sum(meas_eigenvalue)
meas_cov = rndcorr.rvs(meas_eigenvalue)

meas_noise = np.zeros((numMeas, numMeas))
port_noise = np.zeros(numPorts)

meas_noise = np.random.multivariate_normal(np.zeros(numMeas), meas_cov) / exp_yield

meas_dist = []
for i in range(0, numMeas):
    meas_dist.append(meas_noise[i] ** 2)

plt.plot(meas_dist)
plt.show()
Ejemplo n.º 22
0
def r_corr(size):
    r_arr = np.random.uniform(0, 5, size=size)
    r_arr = size * r_arr / sum(r_arr)
    return random_correlation.rvs(r_arr)
Ejemplo n.º 23
0
import seaborn as sns
import yfinance as yf

# %%
# get fake data

# specify eigenvalues
num_fake_stocks = 10
# eig_v = np.random.rand(num_fake_stocks)
eig_v = np.random.uniform(0.5, 1, num_fake_stocks)
eig_v[-1] = eig_v.shape[0] - np.sum(eig_v[:-1])

np.random.seed(666)
mean_true = np.random.uniform(0, 0.15, num_fake_stocks)
std_true = np.random.uniform(0.15, 0.5, num_fake_stocks).reshape(-1, 1)
corr_true = random_correlation.rvs(eig_v)
cov_true = np.outer(std_true, std_true) * corr_true

benchmark_weight_fake = np.ones(num_fake_stocks) / num_fake_stocks
maximum_deviation_fake = 1

# %%
# fake data

# MiniVar
MiniVar_weight_fake = optmodels.MiniVar(cov_true, num_fake_stocks,
                                        benchmark_weight_fake,
                                        maximum_deviation_fake)

# RiskParity
RiskParity_weight_fake = optmodels.RiskParity(cov_true, num_fake_stocks,
Ejemplo n.º 24
0
def random_covariance(num_trjs):
    cov = random_correlation.rvs((.5, .8, 1.2, 1.5, 1.0, .5, .8, 1.2, 1.5, 1.0, .5, .8, 1.2, 1.5, 1.0))
    positions = np.random.multivariate_normal(np.zeros((15,)), cov, num_trjs)
    return positions
Ejemplo n.º 25
0
    """
    evalues, Q = np.linalg.eig(cov)
    evalues[evalues < evalues[n_components]] = 0

    # Since cov is symmetric, by spectral theorem Q_inv = Q_T
    Q_T = np.matrix.transpose(Q)
    L = np.diag(evalues)
    # Due to spectral theorem all eigenvalues must be real. I encountered
    # complex eigenvalues regardless. Hence, extract real part.
    denoised_cov = pd.DataFrame(Q.dot(L).dot(Q_T)).apply(np.real)
    return denoised_cov


if __name__ == '__main__':
    from scipy.stats import random_correlation

    expected_returns = pd.Series([1, 1, -1, -1], index=['a', 'b', 'c', 'd'])
    covar = random_correlation.rvs((.5, .8, 1.2, 1.5))

    weights = minimize_objective(
        expected_returns.index,
        negative_sharpe,
        True,
        (-1, 1),
        expected_returns,
        covar,
        0.0,
        0.0,
    )
    print(weights)
def data_generation(num_control, num_treated, num_covs):

    mean = [0] * num_covs
    eigenvalues = num_covs * np.random.dirichlet(np.ones(num_covs), size=1)[0]
    cov = random_correlation.rvs(eigenvalues)

    X = []
    for i in range(num_treated + num_control):
        z = list(np.random.multivariate_normal(mean, cov))
        x = [1 if elem > 0 else 0 for elem in z]
        X.append(x)

    xc = np.array(X[:num_control])
    xt = np.array(X[num_control:])
    #print(X[15000:])
    #print(xt)

    errors1 = np.random.normal(0, 0.1, size=num_control)  # some noise
    errors2 = np.random.normal(0, 0.1, size=num_treated)  # some noise

    dense_bs_sign = np.random.choice([-1, 1], num_covs)
    #dense_bs = [ np.random.normal(dense_bs_sign[i]* (i+2), 1) for i in range(len(dense_bs_sign)) ]
    dense_bs = [np.random.normal(s * 10, 10) for s in dense_bs_sign]

    yc = np.dot(
        xc, np.array(dense_bs))  #+ errors1     # y for conum_treatedrol group
    catt_c = [0] * num_control

    treatment_eff_coef = np.random.normal(0.5, 0.15, size=num_covs)
    treatment_effect = np.dot(xt, treatment_eff_coef)

    second = construct_sec_order(xt[:, :5])
    treatment_eff_sec = np.sum(second, axis=1)

    yt = np.dot(
        xt, np.array(dense_bs)
    ) + treatment_effect + treatment_eff_sec  #+ errors2    # y for treated group
    catt_t = treatment_effect + treatment_eff_sec

    df1 = pd.DataFrame(xc, columns=range(num_covs))
    df1['outcome'] = yc
    df1['treated'] = 0

    df2 = pd.DataFrame(xt, columns=range(num_covs))
    df2['outcome'] = yt
    df2['treated'] = 1

    df = pd.concat([df1, df2])
    df['matched'] = 0

    #df['outcome'] += 2 * np.random.normal(0,5)
    catt = catt_c + list(catt_t)
    df['true_effect'] = catt

    miss = []
    for i in range(num_control + num_treated):
        miss.append(1 if random.randint(1, 101) <= 20 else 0)

    miss = pd.DataFrame(np.zeros((num_control + num_treated, num_covs)))
    select = set()
    k = 0
    total_miss_num = (num_control + num_treated) * num_covs * 0.05
    while k < total_miss_num:
        row = random.randint(0, num_control + num_treated - 1)
        col = random.randint(0, num_covs - 1)
        if (row, col) in select:
            continue
        miss.iloc[row, col] = 1
        select.add((row, col))
        k += 1

    return df, df[list(range(num_covs))], df['treated'], df['outcome'], miss
Ejemplo n.º 27
0
 def args_maker():
     x, mean, cov = map(rng, (shapex, shapex, (dim, dim)), dtypes)
     cov = random_correlation.rvs(
         onp.arange(1, 1 + dim) * 2 / (dim + 1))
     return [x, mean, cov]
Ejemplo n.º 28
0
import multiprocessing as mp
from nearest_correlation import nearcorr
from scipy.stats import random_correlation

from knowledge_gradient import KG_Alg
from knowledge_gradient import KG_multi
from knowledge_gradient import update_mu_S

if __name__ == "__main__":
   processes = mp.cpu_count()
   pool = mp.Pool(processes)
   
   np.random.seed(126)
   
   g = 7/sum([.5, .8, 1.2, 2.5, 1.7, 2.1, 2.2])
   G = np.round(random_correlation.rvs((g*.5, g*.8, g*1.2, g*2.5, g*1.7, g*2.1, g*2.2)), 3)
   
   S = nearcorr(G, tol=[], flag=0, max_iterations=1000, n_pos_eig=0, weights=None,
                verbose=False, except_on_too_many_iterations=True)
   
   M = S.shape[0]
   
   lambda_ = np.array([0.2, 1.1, 1.3, 0.12, 0.4, 0.3, 0.12])
   
   mu = np.array([0.2, 0.21, 0.92, 0.11, 0.7, 0.2, -0.1])
   
   print(KG_Alg(mu, S, lambda_))
   
   print(KG_multi(mu, S, lambda_, pool))
   
   y=0.22