Exemplo n.º 1
0
def calculate_fid(act1, act2, cuda_ind=0):
    with cp.cuda.Device(cuda_ind):
        act1, act2 = cp.array(act1), cp.array(act2)
        # calculate mean and covariance statistics
        mu1, sigma1 = act1.mean(axis=0), cp.cov(act1, rowvar=False)
        mu2, sigma2 = act2.mean(axis=0), cp.cov(act2, rowvar=False)
        # calculate sum squared difference between means
        ssdiff = cp.sum((mu1 - mu2)**2.0)
        # calculate sqrt of product between cov
        covmean = cp.array(sqrtm(sigma1.dot(sigma2).get()))
        # check and correct imaginary numbers from sqrt
        if cp.iscomplexobj(covmean):
            covmean = covmean.real
        # calculate score
        fid = ssdiff + cp.trace(sigma1 + sigma2 - 2.0 * covmean)
        return fid
Exemplo n.º 2
0
def mvdr(x, sv):
    """
    Minimum variance distortionless response (MVDR) beamformer weights

    Parameters
    ----------
    x : ndarray
        Received signal, assume 2D array with size [num_sensors, num_samples]

    sv: ndarray
        Steering vector, assume 1D array with size [num_sensors, 1]

    Note: Unlike MATLAB where input matrix x is of size MxN where N represents
    the number of array elements, we assume row-major formatted data where each
    row is assumed to be complex-valued data from a given sensor (i.e. NxM)
    """
    if x.shape[0] > x.shape[1]:
        raise ValueError('Matrix has more sensors than samples. Consider \
            transposing and remember cuSignal is row-major, unlike MATLAB')

    if x.shape[0] != sv.shape[0]:
        raise ValueError('Steering Vector and input data do not align')

    R = cp.cov(x)
    R_inv = cp.linalg.inv(R)
    svh = cp.transpose(cp.conj(sv))

    wB = cp.matmul(R_inv, sv)
    # wA is a 1x1 scalar
    wA = cp.matmul(svh, wB)
    w = wB / wA

    return w
Exemplo n.º 3
0
    def test_distance(self):
        total_samples = 2
        window = 6
        log_return = self.df
        first_sample = log_return['sample_id'].min().item()
        all_dates = log_return[first_sample == log_return['sample_id']]['date']
        months_start = _get_month_start_pos(all_dates)
        log_return_ma = _get_log_return_matrix(total_samples, log_return)
        _, assets, timelen = log_return_ma.shape
        number_of_threads = 256
        num_months = len(months_start) - window
        number_of_blocks = num_months * total_samples
        means = cupy.zeros((total_samples, num_months, assets))
        cov = cupy.zeros((total_samples, num_months, assets, assets))
        distance = cupy.zeros(
            (total_samples, num_months, (assets - 1) * assets // 2))

        compute_cov[(number_of_blocks, ), (number_of_threads, ), 0,
                    256 * MAX_YEARS * 8](means, cov, distance, log_return_ma,
                                         months_start, num_months, assets,
                                         timelen, window)
        print('return shape', log_return_ma.shape)
        num = 0
        for sample in range(2):
            for num in range(num_months):
                truth = (
                    log_return_ma[sample, :,
                                  months_start[num]:months_start[num +
                                                                 window]].mean(
                                                                     axis=1))
                compute = means[sample][num]
                self.assertTrue(cupy.allclose(compute, truth))

        for sample in range(2):
            for num in range(num_months):
                s = log_return_ma[sample, :,
                                  months_start[num]:months_start[num + window]]
                truth = (cupy.cov(s, bias=True))
                compute = cov[sample][num]
                self.assertTrue(cupy.allclose(compute, truth))

        for sample in range(2):
            for num in range(num_months):
                cov_m = cov[sample][num]
                corr_m = cov_m.copy()
                for i in range(3):
                    for j in range(3):
                        corr_m[i, j] = corr_m[i, j] / \
                            math.sqrt(cov_m[i, i] * cov_m[j, j])
                dis = cupy.sqrt((1.0 - corr_m) / 2.0)
                res = cupy.zeros_like(dis)
                for i in range(3):
                    for j in range(3):
                        res[i, j] = cupy.sqrt(
                            ((dis[i, :] - dis[j, :])**2).sum())
                truth = (squareform(res.get()))
                compute = distance[sample][num]
                self.assertTrue(cupy.allclose(compute, truth))
Exemplo n.º 4
0
 def setUp(self):
     self.assets = 10
     self.samples = 5
     self.numbers = 30
     seq = 100
     self.distance = cupy.zeros(
         (self.samples, self.numbers, self.assets * (self.assets-1) // 2))
     cupy.random.seed(10)
     for i in range(self.samples):
         for j in range(self.numbers):
             cov = cupy.cov(cupy.random.rand(self.assets, seq))
             dia = cupy.diag(cov)
             corr = cov / cupy.sqrt(cupy.outer(dia, dia))
             dist = (1.0 - corr) / 2.0
             self.distance[i, j] = cupy.array(squareform(dist.get()))
Exemplo n.º 5
0
 def setUp(self):
     self.assets = 10
     self.samples = 5
     self.numbers = 30
     seq = 100
     cupy.random.seed(10)
     self.cov_matrix = cupy.zeros(
         (self.samples, self.numbers, self.assets, self.assets))
     self.order_matrix = cupy.random.randint(
         0, self.assets, (self.samples, self.numbers, self.assets))
     for i in range(self.samples):
         for j in range(self.numbers):
             cov = cupy.cov(cupy.random.rand(self.assets, seq))
             self.cov_matrix[i, j] = cov
             order = cupy.arange(self.assets)
             cupy.random.shuffle(order)
             self.order_matrix[i, j] = order
Exemplo n.º 6
0
def calcPCV1(x):
    x = cp.array(cp.where(x > pcv_thre))
    if 0 in x.shape:
        return np.array([[0, 0]]).T, np.array([[0, 0], [0, 0]])
    #center = np.array([[256,256]]).T
    center = cp.mean(x, axis=1)[:, cp.newaxis]
    xCe = x - center
    Cov = cp.cov(xCe, bias=1)
    if True in cp.isnan(Cov):
        print("nan")
        pdb.set_trace()
    elif True in cp.isinf(Cov):
        print("inf")
        pdb.set_trace()
    V, D = cp.linalg.eig(Cov)
    vec = D[:, [cp.argmax(V)]]
    line = cp.concatenate([vec * -256, vec * 256], axis=1) + center
    return cp.asnumpy(center), cp.asnumpy(line)
Exemplo n.º 7
0
def test_cov(nrows, ncols, sparse, dtype):
    if sparse:
        x = cupyx.scipy.sparse.random(nrows,
                                      ncols,
                                      density=0.07,
                                      format='csr',
                                      dtype=dtype)
    else:
        x = cp.random.random((nrows, ncols), dtype=dtype)

    cov_result = cov(x, x)

    assert cov_result.shape == (ncols, ncols)

    if sparse:
        x = x.todense()
    local_cov = cp.cov(x, rowvar=False, ddof=0)

    assert array_equal(cov_result, local_cov, 1e-6, with_sign=True)
Exemplo n.º 8
0
def PCA_gpu(data):
    """
    returns: data transformed in 2 dims/columns + regenerated original data
    pass in: data as 2D NumPy array
    """
    # mean center the data
    # data -= data.mean(axis=0)
    # calculate the covariance matrix
    R = cp.cov(data, rowvar=False)
    # calculate eigenvectors & eigenvalues of the covariance matrix
    # use 'eigh' rather than 'eig' since R is symmetric,
    # the performance gain is substantial
    evals, evecs = cp.linalg.eigh(R)
    # sort eigenvalue in decreasing order
    idx = cp.argsort(evals)[::-1]
    evecs = evecs[:, idx]
    # sort eigenvectors according to same index
    evals = evals[idx]
    # carry out the transformation on the data using eigenvectors
    # and return the re-scaled data, eigenvalues, and eigenvectors

    return evecs, data.dot(evecs), evals
Exemplo n.º 9
0
    def cov(m, y=None):

        if isinstance(m, numpy.ndarray):
            return numpy.cov(m, y)
        elif isinstance(m, torch.Tensor):
            if m.ndimension() > 2:
                raise ValueError("m has more than 2 dimensions")

            if y is not None and y.ndimension() > 2:
                raise ValueError('y has more than 2 dimensions')

            X = m
            if X.shape[0] == 0:
                return torch.tensor(
                    [], device=torch.cuda.current_device()).reshape(0, 0)
            if y is not None:
                X = torch.cat((X, y), dim=0)

            ddof = 1

            avg = torch.mean(X, dim=1)

            fact = X.shape[1] - ddof

            if fact <= 0:
                import warnings
                warnings.warn("Degrees of freedom <= 0 for slice",
                              RuntimeWarning,
                              stacklevel=2)
                fact = 0.0

            X -= avg[:, None]
            X_T = X.t()
            c = Linalg.dot(X, X_T)
            c *= 1. / fact
            return c.squeeze()
        else:
            return cupy.cov(m, y)
 B_j = B_agg[:, ref_dist]
 C_j = cp.transpose(cp.concatenate((locals()['ppt_sp_'+mdl+'_C'][:, ref_dist], locals()['ppt_sm_'+mdl+'_C'][:, ref_dist], locals()['ppt_fl_'+mdl+'_C'][:, ref_dist], locals()['ppt_wt_'+mdl+'_C'][:, ref_dist], locals()['tmax_sp_'+mdl+'_C'][:, ref_dist], locals()['tmax_sm_'+mdl+'_C'][:, ref_dist], locals()['tmax_fl_'+mdl+'_C'][:, ref_dist], locals()[
                    'tmax_wt_'+mdl+'_C'][:, ref_dist], locals()['tmin_sp_'+mdl+'_C'][:, ref_dist], locals()['tmin_sm_'+mdl+'_C'][:, ref_dist], locals()['tmin_fl_'+mdl+'_C'][:, ref_dist], locals()['tmin_wt_'+mdl+'_C'][:, ref_dist],  locals()['npp_'+mdl+"_C"][:, ref_dist])).reshape(13, len(locals()['ppt_sp_'+mdl+'_C'])))  # C_j should be T_mon X n(var), T X K in Mohany's paper
 C_j_sd = C_j.std(axis=0)
 A_prime = A_agg/C_j_sd[:, None]
 B_j_prime = B_j/C_j_sd
 C_j_prime = C_j/C_j_sd
 # Step 2: principal component analyses on the reference matrix C, and principal components extraction
 C_j_prime_avg = cp.mean(C_j_prime, axis=0)
 C_j_prime_temp = cp.asnumpy(C_j_prime)
 m, n = np.shape(C_j_prime_temp)
 C_adj = []
 C_j_prime_p_avg = cp.tile(C_j_prime_avg, (m, 1))
 C_adj = C_j_prime - C_j_prime_p_avg
 # calculate the covariate matrix
 covC = cp.cov(C_adj.T)
 # solve its eigenvalues and eigenvectors
 covC_np = cp.asnumpy(covC)
 C_eigen_val, C_eigen_vec = np.linalg.eig(covC_np)
 # rank the eigenvalues: in here, I did not apply the truncation rule for the sake of limited variable availability
 index = cp.argsort(-C_eigen_val)
 C_eigen_val = C_eigen_val[index]
 C_eigen_vec = C_eigen_vec[:, index]
 finalData = []
 # C matrix, corrected with PCA
 C_pca_vec = C_eigen_vec.T
 # A and B matrices, corrected with PCA
 A_prime_np = cp.asnumpy(A_prime)
 B_j_prime_np = cp.asnumpy(B_j_prime)
 X = A_prime_np.T.dot(C_pca_vec)
 Y_j = B_j_prime_np.T.dot(C_pca_vec)
LIK[i] = cp.float(likij)
logpost[i] = obj

# In[ ]:

# In[ ]:

# In[ ]:

go_on = 0
v = 0
AR = 0
while i < Nsim:
    #print(i)
    if i == 5000:
        P3 = cp.cov(Thetasim[0:i], rowvar=False)
    while go_on == 0:
        Thetac = cp.random.multivariate_normal(Thetasim[i, :], (c) * P3)
        try:
            G1, impact, RC, A, B, C, E, DD, R, V_s = REE_gen1(
                cp.asnumpy(Thetac))
        except:
            RC = cp.zeros([2, 1])
        go_on = param_checks(Thetac, RC)
    prioc = NK_priors(cp.asnumpy(Thetac))
    likic, dropoutc = PFLIKE(Thetac, EPS, S, RR, randphi)
    objc = cp.float(NK_priors(cp.asnumpy(Thetac))) + cp.float(likic)
    #print(objc)
    if cp.isfinite(objc) == False:
        alpha = -1
        u = 0