Ejemplo n.º 1
0
 def reduce_mem_usage(data, verbose = True):
     numerics = ['int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64']
     start_mem = data.memory_usage().sum() / 1024**2
     if verbose:
         print('Memory usage of dataframe: {:.2f} MB'.format(start_mem))
     
     for col in data.columns:
         col_type = data[col].dtype
         
         if col_type in numerics:
             c_min = data[col].min()
             c_max = data[col].max()
             if str(col_type)[:3] == 'int':
                 if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                     data[col] = data[col].astype(np.int8)
                 elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                     data[col] = data[col].astype(np.int16)
                 elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                     data[col] = data[col].astype(np.int32)
                 elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
                     data[col] = data[col].astype(np.int64)  
             else:
                 if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
                     data[col] = data[col].astype(np.float16)
                 elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                     data[col] = data[col].astype(np.float32)
                 else:
                     data[col] = data[col].astype(np.float64)
     
     end_mem = data.memory_usage().sum() / 1024**2
     if verbose:
         print('Memory usage after optimization: {:.2f} MB'.format(end_mem))
         print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
     
     return data
def test_uniform_targets():
    enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
    m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
    lasso = LassoCV(fit_intercept=True, n_alphas=3)
    m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)

    models_single_task = (enet, lasso)
    models_multi_task = (m_enet, m_lasso)

    rng = np.random.RandomState(0)

    X_train = rng.random_sample(size=(10, 3))
    X_test = rng.random_sample(size=(10, 3))

    y1 = np.empty(10)
    y2 = np.empty((10, 2))

    for model in models_single_task:
        for y_values in (0, 5):
            y1.fill(y_values)
            assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
            assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)

    for model in models_multi_task:
        for y_values in (0, 5):
            y2[:, 0].fill(y_values)
            y2[:, 1].fill(2 * y_values)
            assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
            assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
Ejemplo n.º 3
0
def kldiv(p, q, distp = None, distq = None, scale_factor = 1):
    """
    Computes the Kullback-Leibler divergence between two distributions.

    Parameters
        p : Matrix
            The first probability distribution
        q : Matrix
            The second probability distribution
        distp : fixmat
            If p is None, distp is used to compute a FDM which 
            is then taken as 1st probability distribution.
        distq : fixmat
            If q is None, distq is used to compute a FDM which is 
            then taken as 2dn probability distribution.
        scale_factor : double
            Determines the size of FDM computed from distq or distp.

    """
    assert q != None or distq != None, "Either q or distq have to be given"
    assert p != None or distp != None, "Either p or distp have to be given"

    try:
        if p == None: 
            p = compute_fdm(distp, scale_factor = scale_factor)
        if q == None:
            q = compute_fdm(distq, scale_factor = scale_factor)
    except NoFixations:
        return np.NaN

    q += np.finfo(q.dtype).eps
    p += np.finfo(p.dtype).eps 
    kl = np.sum( p * (np.log2(p / q)))
    return kl
Ejemplo n.º 4
0
    def compute_nmi(self, z1, z2):
        # compute normalized mutual information
        
        n = np.size(z1)
        k1 = np.size(np.unique(z1))
        k2 = np.size(np.unique(z2))
        
        nk1 = np.zeros((k1,1))
        nk2 = np.zeros((k2,1))

        for kk in range(k1):
            nk1[kk] = np.sum(z1==kk)
        for kk in range(k2):
            nk2[kk] = np.sum(z2==kk)
            
        pk1 = nk1/float(np.sum(nk1))
        pk2 = nk2/float(np.sum(nk2))
        
        nk12 = np.zeros((k1,k2))
        for ii in range(k1):
            for jj in range(k2):
                nk12[ii,jj] = np.sum((z1==ii)*(z2==jj))
        pk12 = nk12/float(n)        
        
        Hx = -np.sum(pk1 * np.log(pk1 + np.finfo(float).eps))
        Hy = -np.sum(pk2 * np.log(pk2 + np.finfo(float).eps))
        
        Hxy = -np.sum(pk12 * np.log(pk12 + np.finfo(float).eps))
        
        MI = Hx + Hy - Hxy;
        nmi = MI/float(0.5*(Hx+Hy))
        
        return nmi
Ejemplo n.º 5
0
def _gpinv(p, k, sigma):
    """Inverse Generalized Pareto distribution function"""
    x = np.full_like(p, np.nan)
    if sigma <= 0:
        return x
    ok = (p > 0) & (p < 1)
    if np.all(ok):
        if np.abs(k) < np.finfo(float).eps:
            x = - np.log1p(-p)
        else:
            x = np.expm1(-k * np.log1p(-p)) / k
        x *= sigma
    else:
        if np.abs(k) < np.finfo(float).eps:
            x[ok] = - np.log1p(-p[ok])
        else:
            x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k
        x *= sigma
        x[p == 0] = 0
        if k >= 0:
            x[p == 1] = np.inf
        else:
            x[p == 1] = - sigma / k

    return x
Ejemplo n.º 6
0
def dft_anal(x, w, N):
    """
    Analysis of a signal using the discrete Fourier transform
    x: input signal, w: analysis window, N: FFT size
    returns mX, pX: magnitude and phase spectrum
    """

    if not utilFunctions.isPower2(N):  # raise error if N not a power of two
        raise ValueError("FFT size (N) is not a power of 2")

    if w.size > N:  # raise error if window size bigger than fft size
        raise ValueError("Window size (M) is bigger than FFT size")

    hN = (N / 2) + 1  # size of positive spectrum, it includes sample 0
    hM1 = int(math.floor((w.size + 1) / 2))  # half analysis window size by rounding
    hM2 = int(math.floor(w.size / 2))  # half analysis window size by floor
    fftbuffer = np.zeros(N)  # initialize buffer for FFT
    w = w / sum(w)  # normalize analysis window
    xw = x * w  # window the input sound
    fftbuffer[:hM1] = xw[hM2:]  # zero-phase window in fftbuffer
    fftbuffer[-hM2:] = xw[:hM2]
    X = fft(fftbuffer)  # compute FFT
    absX = abs(X[:hN])  # compute ansolute value of positive side
    absX[absX < np.finfo(float).eps] = np.finfo(float).eps  # if zeros add epsilon to handle log
    mX = 20 * np.log10(absX)  # magnitude spectrum of positive frequencies in dB
    X[:hN].real[np.abs(X[:hN].real) < tol] = 0.0  # for phase calculation set to 0 the small values
    X[:hN].imag[np.abs(X[:hN].imag) < tol] = 0.0  # for phase calculation set to 0 the small values
    pX = np.unwrap(np.angle(X[:hN]))  # unwrapped phase spectrum of positive frequencies
    return mX, pX
Ejemplo n.º 7
0
Archivo: hmm.py Proyecto: wangsix/vmo
def recognition(obs, oracle, order=1, smooth=False):
    hmm_tensor = extract_hmm_tensor(oracle, max_lrs=order, smooth=smooth)

    cluster_means = np.array([np.median(oracle.f_array.data[np.array(c), :].T, axis=1)
                              for c in oracle.latent])

    cluster_means += np.finfo('float').eps
    cluster_means = (cluster_means.T / np.sum(cluster_means, axis=1)).T

    a = hmm_tensor[-1]
    a += np.finfo('float').eps
    a += 1.0
    divider = np.sum(a, axis=1)
    a = np.divide(a.T, divider).T
    log_a = np.log(a)
    # hist = np.array([len(c) for c in oracle.latent])/float(oracle.n_states-1)

    v = np.zeros((len(obs), len(oracle.latent)))
    p = np.zeros(v.shape)
    v[0] = np.log(np.dot(cluster_means, obs[0])) + np.log(1.0/len(oracle.latent))
    # v[0] = np.log(np.dot(cluster_means, obs[0])) + np.log(hist)
    # p[0] = np.arange(len(oracle.latent))
    for t in range(1, len(obs)):
        s = v[t-1]+log_a.T
        v[t] = np.max(s, axis=1)+np.log(np.dot(cluster_means, obs[t]))
        p[t-1] = np.argmax(s, axis=1)

    return v, p
Ejemplo n.º 8
0
    def test_sing_val_update(self):

        sigmas = np.array([4., 3., 2., 0])
        m_vec = np.array([3.12, 5.7, -4.8, -2.2])

        M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
                        np.zeros((1,len(m_vec) - 1)))), m_vec[:, np.newaxis]))
        SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
                 check_finite=False)

        it_len = len(sigmas)
        sgm = np.concatenate((sigmas[::-1], (sigmas[0] +
                              it_len*np.sqrt(np.sum(np.power(m_vec,2))),)))
        mvc = np.concatenate((m_vec[::-1], (0,)))

        lasd4 = get_lapack_funcs('lasd4',(sigmas,))

        roots = []
        for i in range(0, it_len):
            res = lasd4(i, sgm, mvc)
            roots.append(res[1])

            assert_((res[3] <= 0),"LAPACK root finding dlasd4 failed to find \
                                    the singular value %i" % i)
        roots = np.array(roots)[::-1]

        assert_((not np.any(np.isnan(roots)),"There are NaN roots"))
        assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
                        rtol=100*np.finfo(np.float64).eps)
Ejemplo n.º 9
0
 def test_nan(self):
     # Test that nan is 'far' from small, tiny, inf, max and min
     for dt in [np.float32, np.float64]:
         if dt == np.float32:
             maxulp = 1e6
         else:
             maxulp = 1e12
         inf = np.array([np.inf]).astype(dt)
         nan = np.array([np.nan]).astype(dt)
         big = np.array([np.finfo(dt).max])
         tiny = np.array([np.finfo(dt).tiny])
         zero = np.array([np.PZERO]).astype(dt)
         nzero = np.array([np.NZERO]).astype(dt)
         self.assertRaises(AssertionError,
                               lambda: assert_array_max_ulp(nan, inf,
                                                            maxulp=maxulp))
         self.assertRaises(AssertionError,
                               lambda: assert_array_max_ulp(nan, big,
                                                            maxulp=maxulp))
         self.assertRaises(AssertionError,
                               lambda: assert_array_max_ulp(nan, tiny,
                                                            maxulp=maxulp))
         self.assertRaises(AssertionError,
                               lambda: assert_array_max_ulp(nan, zero,
                                                            maxulp=maxulp))
         self.assertRaises(AssertionError,
                               lambda: assert_array_max_ulp(nan, nzero,
                                                            maxulp=maxulp))
Ejemplo n.º 10
0
def check_wavefront(filename_or_hdulist, slice=0, ext=0, test='nearzero', comment=""):
    """ A helper routine to verify certain properties of a wavefront FITS file, 
    as requested by some test routine. """
    if isinstance(filename_or_hdulist, str):
        hdulist = pyfits.open(filename_or_hdulist)
        filename = filename_or_hdulist
    elif isinstance(filename_or_hdulist, pyfits.HDUList):
        hdulist = filename_or_hdulist
        filename = 'input HDUlist'
    imstack = hdulist[ext].data
    im = imstack[slice,:,:]

    try:

        if test=='nearzero':
            assert(  np.all(np.abs(im) < np.finfo(im.dtype).eps*10))
            _log.info("Slice %d of %s %s is all essentially zero" % (slice, filename, comment))
            return True
        elif test == 'is_real':
            #assumes output type = 'all'
            cplx_im = imstack[1,:,:] * np.exp(1j*imstack[2,:,:])
            assert(  np.all( cplx_im.imag < np.finfo(im.dtype).eps*10))
            _log.info("File %s %s is essentially all real " % (filename, comment))
            return True

    except:
        _log.error("Test %s failed for %s " % (test, filename))
        return False
Ejemplo n.º 11
0
def quatAverage(q_in, qsym):
    """
    """
    assert q_in.ndim == 2, 'input must be 2-s hstacked quats'

    # renormalize
    q_in = unitVector(q_in)

    # check to see num of quats is > 1
    if q_in.shape[1] < 3:
        if q_in.shape[1] == 1:
            q_bar = q_in
        else:
            ma, mq = misorientation(q_in[:, 0].reshape(4, 1),
                                    q_in[:, 1].reshape(4, 1), (qsym,))
            q_bar = quatProduct(q_in[:, 0].reshape(4, 1),
                                quatOfExpMap(0.5*ma*unitVector(mq[1:].reshape(3, 1))))
    else:
        # use first quat as initial guess
        phi = 2. * arccos(q_in[0, 0])
        if phi <= finfo(float).eps:
            x0 = zeros(3)
        else:
            n = unitVector(q_in[1:, 0].reshape(3, 1))
            x0 = phi*n.flatten()
        results = leastsq(quatAverage_obj, x0, args=(q_in, qsym))
        phi = sqrt(sum(results[0]*results[0]))
        if phi <= finfo(float).eps:
            q_bar = c_[1., 0., 0., 0.].T
        else:
            n     = results[0] / phi
            q_bar = hstack([cos(0.5*phi), sin(0.5*phi)*n]).reshape(4, 1)
    return q_bar
Ejemplo n.º 12
0
    def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
                                          posteriors, fwdlattice, bwdlattice,
                                          params):
        super(GMMHMM, self)._accumulate_sufficient_statistics(
            stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
            params)

        for state, g in enumerate(self.gmms):
            lgmm_posteriors = np.log(g.eval(obs)[1] + np.finfo(np.float).eps)
            lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
                                      + np.finfo(np.float).eps)
            gmm_posteriors = np.exp(lgmm_posteriors)
            tmp_gmm = GMM(g.n_components, cvtype=g.cvtype)
            tmp_gmm.n_features = g.n_features
            tmp_gmm.covars = _distribute_covar_matrix_to_match_cvtype(
                                np.eye(g.n_features), g.cvtype, g.n_components)
            norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)

            if np.any(np.isnan(tmp_gmm.covars)):
                raise ValueError

            stats['norm'][state] += norm
            if 'm' in params:
                stats['means'][state] += tmp_gmm.means * norm[:, np.newaxis]
            if 'c' in params:
                if tmp_gmm.cvtype == 'tied':
                    stats['covars'][state] += tmp_gmm._covars * norm.sum()
                else:
                    cvnorm = np.copy(norm)
                    shape = np.ones(tmp_gmm._covars.ndim)
                    shape[0] = np.shape(tmp_gmm._covars)[0]
                    cvnorm.shape = shape
                    stats['covars'][state] += tmp_gmm._covars * cvnorm
Ejemplo n.º 13
0
    def _hessian_main(self, params):
        params_infl = params[:self.k_inflate]
        params_main = params[self.k_inflate:]

        y = self.endog
        w = self.model_infl.predict(params_infl)
        w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
        score = self.score(params)
        zero_idx = np.nonzero(y == 0)[0]
        nonzero_idx = np.nonzero(y)[0]

        mu = self.model_main.predict(params_main)

        hess_arr = np.zeros((self.k_exog, self.k_exog))

        coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))

        #d2l/dp2
        for i in range(self.k_exog):
            for j in range(i, -1, -1):
                hess_arr[i, j] = ((
                    self.exog[zero_idx, i] * self.exog[zero_idx, j] *
                    mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
                    w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
                    coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
                    self.exog[nonzero_idx, j]).sum())

        return hess_arr
Ejemplo n.º 14
0
            def __init__(self, n, n_folds=3, shuffle=False, random_state=None):
                if abs(n - int(n)) >= np.finfo('f').eps:
                    raise ValueError("n must be an integer")
                self.n = int(n)

                if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
                    raise ValueError("n_folds must be an integer")
                self.n_folds = n_folds = int(n_folds)

                if n_folds <= 1:
                    raise ValueError(
                        "k-fold cross validation requires at least one"
                        " train / test split by setting n_folds=2 or more,"
                        " got n_folds={0}.".format(n_folds))
                if n_folds > self.n:
                    raise ValueError(
                        ("Cannot have number of folds n_folds={0} greater"
                         " than the number of samples: {1}.").format(n_folds, n))

                if not isinstance(shuffle, bool):
                    raise TypeError("shuffle must be True or False;"
                                    " got {0}".format(shuffle))
                self.shuffle = shuffle
                self.random_state = random_state

                self.idxs = np.arange(n)
                if shuffle:
                    rng = check_random_state(self.random_state)
                    rng.shuffle(self.idxs)
Ejemplo n.º 15
0
def test_ulp():
    assert_equal(ulp(), np.finfo(np.float64).eps)
    assert_equal(ulp(1.0), np.finfo(np.float64).eps)
    assert_equal(ulp(np.float32(1.0)), np.finfo(np.float32).eps)
    assert_equal(ulp(np.float32(1.999)), np.finfo(np.float32).eps)
    # Integers always return 1
    assert_equal(ulp(1), 1)
    assert_equal(ulp(2**63-1), 1)
    # negative / positive same
    assert_equal(ulp(-1), 1)
    assert_equal(ulp(7.999), ulp(4.0))
    assert_equal(ulp(-7.999), ulp(4.0))
    assert_equal(ulp(np.float64(2**54-2)), 2)
    assert_equal(ulp(np.float64(2**54)), 4)
    assert_equal(ulp(np.float64(2**54)), 4)
    # Infs, NaNs return nan
    assert_true(np.isnan(ulp(np.inf)))
    assert_true(np.isnan(ulp(-np.inf)))
    assert_true(np.isnan(ulp(np.nan)))
    # 0 gives subnormal smallest
    subn64 = np.float64(2**(-1022-52))
    subn32 = np.float32(2**(-126-23))
    assert_equal(ulp(0.0), subn64)
    assert_equal(ulp(np.float64(0)), subn64)
    assert_equal(ulp(np.float32(0)), subn32)
    # as do multiples of subnormal smallest
    assert_equal(ulp(subn64 * np.float64(2**52)), subn64)
    assert_equal(ulp(subn64 * np.float64(2**53)), subn64*2)
    assert_equal(ulp(subn32 * np.float32(2**23)), subn32)
    assert_equal(ulp(subn32 * np.float32(2**24)), subn32*2)
Ejemplo n.º 16
0
def _cmeans0(data, u_old, c, m):
    """
    Single step in generic fuzzy c-means clustering algorithm. Modified from
    Ross, Fuzzy Logic w/Engineering Applications (2010) p.352-353, equations
    10.28 - 10.35.

    Parameters inherited from cmeans()

    This algorithm is a ripe target for Cython.

    """
    # Normalizing, then eliminating any potential zero values.
    u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
    u_old = np.fmax(u_old, np.finfo(float).eps)

    um = u_old ** m

    # Calculate cluster centers
    data = data.T
    cntr = um.dot(data) / (np.ones((data.shape[1],
                                    1)).dot(np.atleast_2d(um.sum(axis=1))).T)

    d = _distance(data, cntr)
    d = np.fmax(d, np.finfo(float).eps)

    jm = (um * d ** 2).sum()

    u = d ** (- 2. / (m - 1))
    u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))

    return cntr, u, jm, d
Ejemplo n.º 17
0
Archivo: linalg.py Proyecto: bjzu/MF
def elop(X, Y, op):
    """
    Compute element-wise operation of matrix :param:`X` and matrix :param:`Y`.
    
    :param X: First input matrix.
    :type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix`
    :param Y: Second input matrix.
    :type Y: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix`
    :param op: Operation to be performed. 
    :type op: `func` 
    """
    try:
        zp1 = op(0, 1) if sp.isspmatrix(X) else op(1, 0)
        zp2 = op(0, 0) 
        zp = zp1 != 0 or zp2 != 0
    except:
        zp = 0
    if sp.isspmatrix(X) or sp.isspmatrix(Y):
        return _op_spmatrix(X, Y, op) if not zp else _op_matrix(X, Y, op)
    else:
        try:
            X[X == 0] = np.finfo(X.dtype).eps
            Y[Y == 0] = np.finfo(Y.dtype).eps
        except ValueError:
            return op(np.asmatrix(X), np.asmatrix(Y))
        return op(np.asmatrix(X), np.asmatrix(Y))
Ejemplo n.º 18
0
	def motion_update(self, X_t, O1, O2):
		O_d = O2 - O1
		t = np.sqrt(np.sum(np.power(O_d[0:2],2)))
		r1 = np.arctan2(O_d[1], O_d[0]) - O1[2]
		r2 = -r1 + O_d[2]
		sig_t = np.finfo(float).eps + self.a[2] * np.absolute(t)
		sig_r1 = np.finfo(float).eps + self.a[0] * np.absolute(r1)
		sig_r2 = np.finfo(float).eps + self.a[1] * np.absolute(r2)
		# sig_t = np.finfo(float).eps + np.sqrt(self.a[2] * np.absolute(t) + self.a[3] * np.absolute(r1+r2))
		# sig_r1 = np.finfo(float).eps + np.sqrt(self.a[0] * np.absolute(r1) + self.a[1] * np.absolute(t))
		# sig_r2 = np.finfo(float).eps + np.sqrt(self.a[0] * np.absolute(r2) + self.a[1] * np.absolute(t))
		# print('heeee')		
		# print(sig_r1)
		# print(sig_r2)		
		h_t = np.reshape(t + np.random.normal(0,sig_t,len(X_t)), (len(X_t),1))
		h_r1 = r1 + np.random.normal(0,sig_r1,len(X_t))
		h_r2 = r2 + np.random.normal(0,sig_r2,len(X_t))
		th = np.reshape(X_t[:,2] + h_r1, (len(X_t),1))
		pos = X_t[:,:2] + np.concatenate((np.cos(th), np.sin(th)), axis=1) * h_t
		ang = np.reshape(X_t[:,2]+h_r1+h_r2, (len(X_t),1))
		ang = (ang + (- 2*np.pi)*(ang > np.pi) + (2*np.pi)*(ang < -np.pi))
		X_upd = np.concatenate((pos,ang), axis=1)
		map_c = np.ceil(pos/10).astype(int)
		count = 0;
		for i in range(len(X_upd)):
			if(self.unocc_dict.has_key(tuple(map_c[i]))):
				X_upd[count] = X_upd[i]
				count = count+1
		# print(count)
		return X_upd[:count,:]
Ejemplo n.º 19
0
def _cmeans_predict0(test_data, cntr, u_old, c, m):
    """
    Single step in fuzzy c-means prediction algorithm. Clustering algorithm
    modified from Ross, Fuzzy Logic w/Engineering Applications (2010)
    p.352-353, equations 10.28 - 10.35, but this method to generate fuzzy
    predictions was independently derived by Josh Warner.

    Parameters inherited from cmeans()

    Very similar to initial clustering, except `cntr` is not updated, thus
    the new test data are forced into known (trained) clusters.

    """
    # Normalizing, then eliminating any potential zero values.
    u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
    u_old = np.fmax(u_old, np.finfo(float).eps)

    um = u_old ** m
    test_data = test_data.T

    # For prediction, we do not recalculate cluster centers. The test_data is
    # forced to conform to the prior clustering.

    d = _distance(test_data, cntr)
    d = np.fmax(d, np.finfo(float).eps)

    jm = (um * d ** 2).sum()

    u = d ** (- 2. / (m - 1))
    u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))

    return u, jm, d
Ejemplo n.º 20
0
    def test_complex128_fail(self):
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float64)
        x = 10**x
        x = np.r_[-x, x]
        xi = x + x*1j

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp*2.
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, x + y*1j, nulp)
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, y + x*1j, nulp)
        # The test condition needs to be at least a factor of sqrt(2) smaller
        # because the real and imaginary parts both change
        y = x + x*eps*nulp
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, y + y*1j, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp*2.
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, x + y*1j, nulp)
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, y + x*1j, nulp)
        y = x - x*epsneg*nulp
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, y + y*1j, nulp)
Ejemplo n.º 21
0
    def test_complex64_fail(self):
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float32)
        x = 10**x
        x = np.r_[-x, x]
        xi = x + x*1j

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp*2.
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, x + y*1j, nulp)
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, y + x*1j, nulp)
        y = x + x*eps*nulp
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, y + y*1j, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp*2.
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, x + y*1j, nulp)
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, y + x*1j, nulp)
        y = x - x*epsneg*nulp
        self.assertRaises(AssertionError, assert_array_almost_equal_nulp,
                          xi, y + y*1j, nulp)
Ejemplo n.º 22
0
    def _wave_fit(self, tseries, candidate_start_points, candidate_peak_volumes):
        '''
        This method has the fitting startegy to minimize the cost.
        ''' 
        
        period = self.period
        threshold = self.threshold
        residual_metric = self.residual_metric
        score_func = self.score_func
        fit_audience = self.fit_audience

        curr_score = np.finfo('d').max
        best_score = np.finfo('d').max
        best_params = None

        params = None
        for i in xrange(len(candidate_start_points)):
            sp = candidate_start_points[i]
            pv = candidate_peak_volumes[i]
            
            params = _fit_one(tseries, period, residual_metric, sp, pv, \
                    fit_audience, params, candidate_start_points[0])
            model = phoenix_r_with_period(params, tseries.shape[0])
            num_params = 5 * (i + 1) + 2
            curr_score = self.score_func(model, tseries, num_params, params)

            if (curr_score <= best_score):
                best_params = params
                best_score = curr_score
            else:
                increased_score = (curr_score - best_score) / best_score
                if increased_score > threshold:
                    break

        return best_score, best_params
Ejemplo n.º 23
0
def system_diagnostic():
    """ return various helpful/informative information about the
    current system. For instance versions of python & available packages.

    Mostly undocumented function...
    """

    # There is probably a more clever way to do the following via introspection?

    import platform
    import os
    import poppy
    import numpy
    from .version import version
    try:
        import ttk
        ttk_version = ttk.__version__
    except ImportError:
        ttk_version = 'not found'

    try:
        import wx
        wx_version = wx.__version__
    except ImportError:
        wx_version = 'not found'

    try:
        import pyfftw
        pyfftw_version = pyfftw.version
    except ImportError:
        pyfftw_version = 'not found'

    try:
        import pysynphot
        pysynphot_version = pysynphot.__version__
    except ImportError:
        pysynphot_version = 'not found'


    try:
        import astropy
        astropy_version = astropy.__version__
    except ImportError:
        astropy_version = 'not found'

    result = DIAGNOSTIC_REPORT.format(
        os=platform.platform(),
        numpy=numpy.__version__,
        python=sys.version.replace("\n", " "),
        poppy=poppy.__version__,
        webbpsf=version,
        tkinter=ttk_version,
        wxpython=wx_version,
        pyfftw=pyfftw_version,
        pysyn=pysynphot_version,
        astropy=astropy_version,
        finfo_float=numpy.finfo(numpy.float),
        finfo_complex=numpy.finfo(numpy.complex),
    )
    return result
Ejemplo n.º 24
0
    def __init__(self):
        self.gravity = 9.8
        self.masscart = 1.0
        self.masspole = 0.1
        self.total_mass = (self.masspole + self.masscart)
        self.length = 0.5 # actually half the pole's length
        self.polemass_length = (self.masspole * self.length)
        self.force_mag = 10.0
        self.tau = 0.02  # seconds between state updates

        # Angle at which to fail the episode
        self.theta_threshold_radians = 12 * 2 * math.pi / 360
        self.x_threshold = 2.4

        # Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds
        high = np.array([
            self.x_threshold * 2,
            np.finfo(np.float32).max,
            self.theta_threshold_radians * 2,
            np.finfo(np.float32).max])

        self.action_space = spaces.Discrete(2)
        self.observation_space = spaces.Box(-high, high)

        self.seed()
        self.viewer = None
        self.state = None

        self.steps_beyond_done = None
Ejemplo n.º 25
0
 def almost_equal(self, other,
                  rtol=2**4 * np.finfo(np.zeros(1).dtype).eps,
                  atol=0.):
     # assert atol == 0., 'Not supported'
     assert type(other) == type(self)
     return self._impl.almost_equal(other._impl,
                                    rtol if rtol is not None else 2**4 *np.finfo(np.zeros(1.).dtype).eps)
  def _testZeroDensity(self, alpha):
    """Zero isn't in the support of the gamma distribution.

    But quantized floating point math has its limits.
    TODO(bjp): Implement log-gamma sampler for small-shape distributions.

    Args:
      alpha: float shape value to test
    """
    try:
      from scipy import stats  # pylint: disable=g-import-not-at-top
    except ImportError as e:
      tf_logging.warn("Cannot test zero density proportions: %s" % e)
      return
    allowable_zeros = {
        dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
        dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
        dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
    }
    failures = []
    for use_gpu in [False, True]:
      for dt in dtypes.float16, dtypes.float32, dtypes.float64:
        sampler = self._Sampler(
            10000, alpha, 1.0, dt, use_gpu=use_gpu, seed=12345)
        x = sampler()
        allowable = allowable_zeros[dt] * x.size
        allowable = allowable * 2 if allowable < 10 else allowable * 1.05
        if np.sum(x <= 0) > allowable:
          failures += [(use_gpu, dt)]
      self.assertEqual([], failures)
Ejemplo n.º 27
0
    def get_cost(self, weights, data, display=False):
        # print 'getting cost...'
        N = float(len(data))
        reg = (self.lmbda / (2.0 * N)) * np.sum(weights ** 2)
        # reg = (self.lmbda / self.N) * np.sum(np.abs(weights))
        # self.set_network_weights(weights)
        layers = self.convert_weights_to_layers(weights)
        cost = 0.0
        for d, l in data[:]:
            z = d
            for idx, layer in enumerate(layers):
                if idx == len(layers) - 1:
                    # this is a output layer
                    prediction = layer.get_z(z)
                    prediction[prediction >= 1.0] = 1.0 - np.finfo(float).eps  # to avoid nan showing up
                    prediction[prediction <= 0.0] = 0.0 + np.finfo(float).eps
                    l1p = -l * np.log(prediction)
                    l2p = -(1.0 - l) * np.log((1.0 - prediction))
                    lcost = np.sum(l1p + l2p)
                    cost += lcost * (1.0 / float(N))

                else:

                    z = layer.get_z(z)
        if display:
            print 'cost', cost + reg
        return cost + reg
Ejemplo n.º 28
0
def get_matrix_2d_ragged(workspace, distribution, histogram2D=False, transpose=False):
    num_hist = workspace.getNumberHistograms()
    delta = numpy.finfo(numpy.float64).max
    min_value = numpy.finfo(numpy.float64).max
    max_value = numpy.finfo(numpy.float64).min
    for i in range(num_hist):
        xtmp = workspace.readX(i)
        if workspace.isHistogramData():
            #input x is edges
            xtmp = mantid.plots.helperfunctions.points_from_boundaries(xtmp)
        else:
            #input x is centers
            pass
        min_value = min(min_value, xtmp.min())
        max_value = max(max_value, xtmp.max())
        diff = xtmp[1:] - xtmp[:-1]
        delta = min(delta, diff.min())
    num_edges = int(numpy.ceil((max_value - min_value)/delta)) + 1
    x_centers = numpy.linspace(min_value, max_value, num=num_edges)
    y = mantid.plots.helperfunctions.boundaries_from_points(workspace.getAxis(1).extractValues())
    z = numpy.empty([num_hist, num_edges], dtype=numpy.float64)
    for i in range(num_hist):
        centers, ztmp, _, _ = mantid.plots.helperfunctions.get_spectrum(workspace, i, distribution=distribution, withDy=False, withDx=False)
        f = interp1d(centers, ztmp, kind='nearest', bounds_error=False, fill_value=numpy.nan)
        z[i] = f(x_centers)
    if histogram2D:
        x = mantid.plots.helperfunctions.boundaries_from_points(x_centers)
    else:
        x = x_centers
    if transpose:
        return y.T,x.T,z.T
    else:
        return x,y,z
Ejemplo n.º 29
0
    def test_exp(self):
        source = np.array([0, 0, 0, 1.0])
        result = quaternion.exp(source)
        expected = np.array([0, 0, 0, np.exp(1)])
        np.testing.assert_almost_equal(result, expected)

        source = quaternion.create_from_eulers([np.pi, 0, 0])
        result = quaternion.exp(source)
        expected = np.array([0.84147098, 0, 0, 0.54030231])
        np.testing.assert_almost_equal(result, expected)

        # Tests from the boost::math::quaternion
        source = np.array([4 * np.arctan(1), 0, 0, 0])
        result = quaternion.exp(source) + [0, 0, 0, 1.0]
        result = np.linalg.norm(result)
        expected = 2 * np.finfo(result.dtype).eps
        np.testing.assert_almost_equal(result, expected)

        source = np.array([0, 4 * np.arctan(1), 0, 0])
        result = quaternion.exp(source) + [0, 0, 0, 1.0]
        result = np.linalg.norm(result)
        expected = 2 * np.finfo(result.dtype).eps
        np.testing.assert_almost_equal(result, expected)

        source = np.array([0, 0, 4 * np.arctan(1), 0])
        result = quaternion.exp(source) + [0, 0, 0, 1.0]
        result = np.linalg.norm(result)
        expected = 2 * np.finfo(result.dtype).eps
        np.testing.assert_almost_equal(result, expected)
Ejemplo n.º 30
0
def fbank(signal, samplerate=16000, winlen=0.025, winstep=0.01,
          nfilt=26, nfft=512, lowfreq=0, highfreq=None, preemph=0.97):
    """Compute Mel-filterbank energy features from an audio signal.
    :param signal: the audio signal from which to compute features. Should be an N*1 array
    :param samplerate: the samplerate of the signal we are working with.
    :param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
    :param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
    :param nfilt: the number of filters in the filterbank, default 26.
    :param nfft: the FFT size. Default is 512.
    :param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
    :param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
    :param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
    :returns: 2 values. The first is a numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. The
        second return value is the energy in each frame (total energy, unwindowed)
    """
    highfreq = highfreq or samplerate / 2
    signal = sigproc.preemphasis(signal, preemph)
    frames = sigproc.framesig(signal, winlen * samplerate, winstep * samplerate)
    pspec = sigproc.powspec(frames, nfft)
    energy = numpy.sum(pspec, 1)  # this stores the total energy in each frame
    energy = numpy.where(energy == 0, numpy.finfo(float).eps, energy)  # if energy is zero, we get problems with log

    fb = get_filterbanks(nfilt, nfft, samplerate, lowfreq, highfreq)
    feat = numpy.dot(pspec, fb.T)  # compute the filterbank energies
    feat = numpy.where(feat == 0, numpy.finfo(float).eps, feat)  # if feat is zero, we get problems with log

    return feat, energy
Ejemplo n.º 31
0
    # "long_name": "cell altitude_bounds",
    "description": "cell interval bounds for altitude",
    "_FillValue": False,
    "comment": "(lower bound, upper bound]",
    "units": "m",
})

file_name = ("EUREC4A_JOANNE_Dropsonde-RD41_" + "Level_3_v" +
             str(joanne.__version__) + ".nc")

save_directory = "/Users/geet/Documents/JOANNE/Data/Level_3/"  # Test_data/" #Level_3/"

comp = dict(zlib=True,
            complevel=4,
            fletcher32=True,
            _FillValue=np.finfo("float32").max)

encoding = {
    var: comp
    for var in to_save_ds.data_vars
    if var not in ["platform_id", "sonde_id", "alt_bnds"]
}
encoding["launch_time"] = {
    "units": "seconds since 2020-01-01",
    "dtype": "int32"
}
encoding["interpolated_time"] = {
    "units": "seconds since 2020-01-01",
    "dtype": "int32",
    "_FillValue": np.iinfo("int32").max,
}
Ejemplo n.º 32
0
 def default(self):
     return numpy.finfo(numpy.float64).min
Ejemplo n.º 33
0
def selectLines(lines, modelAtms, pradks, starParms, varLimit = np.finfo(np.float64).max, enforceMin = False, refDict=None):
    # Select lines where the equivalent width measures are still within the 
    # linear region of the curve of growth, and where the measure abundance 
    # does not vary by more than a (passed) limit.
    # To streamline multiple calls to MOOG for abundance calculations, a
    # reference line dictionary can be passed. The dictionary would be of the 
    # form:
    # {ion:(refLines, refCorrs)}
    FeILines, FeIILines, TiILines, TiIILines = [],[],[],[]

    if refDict is not None:
        try:
            # FeI line selection
            ion = 26.0
            FeILines = selectLinesForElement(lines, ion, modelAtms, pradks,\
                    starParms, varLimit = varLimit, enforceMin = enforceMin,\
                    refLines=refDict[ion][0], refCorrs=refDict[ion][1],\
                    lineWeights=refDict[ion][2])
                
            # FeII abundances for comparison
            ion=26.1
            FeIILines = selectLinesForElement(lines, ion, modelAtms, pradks,\
                    starParms, varLimit = varLimit, enforceMin = enforceMin,\
                    refLines=refDict[ion][0], refCorrs=refDict[ion][1],\
                    lineWeights=refDict[ion][2])
                    
            # TiI lines - We have to accept a wider range of variances with Ti lines
            ion = 22.0
            TiILines =selectLinesForElement(lines, ion, modelAtms, pradks,\
                    starParms, varLimit = varLimit*2, enforceMin = enforceMin,\
                    refLines=refDict[ion][0], refCorrs=refDict[ion][1],\
                    lineWeights=refDict[ion][2])
            
            # TiII
            ion = 22.1
            TiIILines = selectLinesForElement(lines, ion, modelAtms, pradks,\
                    starParms, varLimit = varLimit*2, enforceMin = enforceMin,\
                    refLines=refDict[ion][0], refCorrs=refDict[ion][1],\
                    lineWeights=refDict[ion][2])
                    
        except KeyError:
        # Occurs when MOOG bombs out, or we could not find lines for one of the element/ionization pairs.
        # Just return what we have so far
            pass
    else:
        try:
            # FeI line selection
            FeILines = selectLinesForElement(lines, 26.0, modelAtms, pradks,\
                    starParms, varLimit = varLimit, enforceMin = enforceMin)
                
            # FeII abundances for comparison
            FeIILines = selectLinesForElement(lines, 26.1, modelAtms, pradks,\
                    starParms, varLimit = varLimit, enforceMin = enforceMin)
                    
            # TiI lines - We have to accept a wider range of variances with Ti lines
            TiILines =selectLinesForElement(lines, 22.0, modelAtms, pradks,\
                    starParms, varLimit = varLimit*2, enforceMin = enforceMin)
            
            # TiII
            TiIILines = selectLinesForElement(lines, 22.1, modelAtms, pradks,\
                    starParms, varLimit = varLimit*2, enforceMin = enforceMin)

        except KeyError:
        # Occurs when MOOG bombs out, or we could not find lines for one of the element/ionization pairs.
        # Just return what we have so far
            pass

    return FeILines, FeIILines, TiILines, TiIILines
Ejemplo n.º 34
0
from spatialmath.base import transformsNd as trn
from spatialmath.base import animate

try:  # pragma: no cover
    #print('Using SymPy')
    import sympy as sym

    def _issymbol(x):
        return isinstance(x, sym.Symbol)
except ImportError:

    def _issymbol(x):  # pylint: disable=unused-argument
        return False


_eps = np.finfo(np.float64).eps

# ---------------------------------------------------------------------------------------#


def _cos(theta):
    if _issymbol(theta):
        return sym.cos(theta)
    else:
        return math.cos(theta)


def _sin(theta):
    if _issymbol(theta):
        return sym.sin(theta)
    else:
Ejemplo n.º 35
0
    def stoch_solver(self):
        # define a list to store transitions
        expval = []
        state_1 = self.states_x[1]

        # Total population is the sum of all states except birth and death
        total_pop = self.states_x[1:-1].sum()

        # Transition 1 - Birth to Susceptible
        expval.append(total_pop * self.param_br * (1 - self.param_mir) * self.param_dt)

        # Transition 2 - Birth to Maternally Immunized
        expval.append(total_pop * self.param_br * self.param_mir * self.param_dt)

        # Transition 3 - Any State except Birth to Dead (Natural Mortality)
        expval += (self.states_x[1:self.param_num_states - 1] * self.param_dr * self.param_dt).tolist()

        # Transition 4 - Susceptible to Vaccinated[1]
        if self.param_vr != 0:
            expval.append(state_1 * self.param_vr * self.param_dt)

        # Transition 5 - Vaccinated[i] to Vaccinated[i+1] until i+1 == n_vac
        if self.param_n_vac != 0:
            expval += (self.states_x[2:self.param_n_vac + 1] * \
                   (1 - self.param_dr * self.param_dt)).tolist()

        # Transition 6 - Vaccinated[n_vac] to Vaccination_Immunized
        # Transition 7 - Vaccinated[n_vac] to Susceptible
        if self.param_vr != 0:
            state_vac = self.states_x.dot(self.ind_vac).sum()
            expval.append(state_vac * self.param_vir)
            expval.append(state_vac * (1 - self.param_dr * self.param_dt - self.param_vir))

        # Transition 8 - Susceptible to Exposed[1]
        temp1 = self.states_x.dot(self.ind_inf).sum() + self.param_eps_exp * \
                self.states_x.dot(self.ind_exp).sum() + self.param_eps_sev * \
                self.states_x.dot(self.ind_sin).sum() + self.param_eps_sev * \
                self.states_x.dot(self.ind_iso).sum() + self.param_eps_qua * \
                self.states_x.dot(self.ind_qua).sum()

        if self.param_n_exp != 0:
            expval.append(state_1 * temp1 * self.param_beta_exp * self.param_dt / total_pop)

        # Transition 9 - Susceptible to Infected[1]
        expval.append(state_1 * temp1 * self.param_beta_inf * self.param_dt / total_pop)

        # Transition 10 - Exposed[i] to Exposed[i+1] until i+1 == n_exp
        expval += (self.states_x[self.ind_exp1:self.ind_exp1 + self.param_n_exp - 1] * \
                   (1 - self.param_dr * self.param_dt - self.param_qr * self.param_dt)).tolist()

        # Transition 11 - Exposed[n_exp] to Infected[1]
        if self.param_n_exp != 0:
            expval.append(self.states_x[self.ind_expn] * (1 - self.param_dr * self.param_dt))

        # Transition 12 - Exposed[i] to Quarantined[i+1] until i+1 == n_exp
        expval += (self.states_x[self.ind_exp1:self.ind_exp1 + self.param_n_exp - 1] * \
                  (self.param_qr * self.param_dt)).tolist()

        # Transition 13 - Quarantined[i] to Quarantined[i+1] until i+1 == n_exp
        expval += (self.states_x[self.ind_qua1:self.ind_qua1 + self.param_n_exp - 1] * \
                   (1 - self.param_dr * self.param_dt)).tolist()

        # Transition 14 - Quarantined[n_exp] to Isolated[1]
        if self.param_n_exp != 0:
            expval.append(self.states_x[self.ind_quan] * (1 - self.param_dr * self.param_dt))

        # Transition 15 - Infected[i] to Infected[i+1] until i+1 == n_inf
        expval += (self.states_x[self.ind_inf1:self.ind_inf1 + self.param_n_inf - 1] * \
                   (1 - self.param_dr * self.param_dt - self.param_sir * self.param_dt)).tolist()

        # Transition 16 - Isolated[i] to Isolated[i+1] until i+1 == n_inf
        expval += (self.states_x[self.ind_iso1:self.ind_iso1 + self.param_n_inf - 1] * \
                   (1 - self.param_dr * self.param_dt - self.param_sir * self.param_dt)).tolist()

        # Transition 17 - Severe_Infected[i] to Severe_Infected[i+1] until i+1 == n_inf
        expval += (self.states_x[self.ind_sin1:self.ind_sin1 + self.param_n_inf - 1] * \
                   (1 - self.param_dr * self.param_dt)).tolist()

        # Transition 18 - Infected[i] to Severe_Infected[i+1] until i+1 == n_inf
        expval += (self.states_x[self.ind_inf1:self.ind_inf1 + self.param_n_inf - 1] * \
                   (self.param_sir * self.param_dt)).tolist()

        # Transition 19 - Isolated[i] to Severe_Infected[i+1] until i+1 == n_inf
        expval += (self.states_x[self.ind_iso1:self.ind_iso1 + self.param_n_inf - 1] * \
                   (self.param_sir * self.param_dt)).tolist()

        # Transition 20 - Infected[n_inf] to Recovery_Immunized
        expval.append(self.states_x[self.ind_infn] * self.param_gamma_im)

        # Transition 21 - Isolated[n_inf] to Recovery_Immunized
        expval.append(self.states_x[self.ind_ison] * self.param_gamma_im)

        # Transition 22 - Severe_Infected[n_inf] to Recovery Immunized
        # expval.append(self.states_x[self.ind_sinn] * self.param_gamma_im)
        states_sin = self.states_x.dot(self.ind_sin).sum()
        if states_sin < self.param_hosp_capacity:
            expval.append(self.states_x[self.ind_sinn] * \
                          (1 - self.param_gamma_mor1) * self.param_gamma_im)
        else:
            expval.append(self.states_x[self.ind_sinn] * \
                          (1 - self.param_gamma_mor2) * self.param_gamma_im)

        # Transition 23 - Infected[n_inf] to Susceptible
        expval.append(self.states_x[self.ind_infn] * \
                          (1 - self.param_gamma_mor) * (1 - self.param_gamma_im))

        # Transition 24 - Isolated[n_inf] to Susceptible
        expval.append(self.states_x[self.ind_ison] * \
                          (1 - self.param_gamma_mor) * (1 - self.param_gamma_im))

        # Transition 25 - Severe_Infected[n_inf] to Susceptible
        states_sin = self.states_x.dot(self.ind_sin).sum()

        if states_sin < self.param_hosp_capacity:
            expval.append(self.states_x[self.ind_sinn] * \
                          (1 - self.param_gamma_mor1) * (1 - self.param_gamma_im))
        else:
            expval.append(self.states_x[self.ind_sinn] * \
                          (1 - self.param_gamma_mor2) * (1 - self.param_gamma_im))

        # Transition 26 - Infected[n_inf] to Dead
        expval.append(self.states_x[self.ind_infn] * self.param_gamma_mor)

        # Transition 27 - Severe_Infected[n_inf] to Dead
        if states_sin < self.param_hosp_capacity:
            expval.append(self.states_x[self.ind_sinn] *self.param_gamma_mor1)
        else:
            expval.append(self.states_x[self.ind_sinn] *self.param_gamma_mor2)

        # Randomly generate the transition value based on the expected value
        for eval, sind, dind in zip(expval, self.source_ind, self.dest_ind):
            if eval < 10 and eval > 0:
                temp1 = int(np.ceil(eval * 10 + np.finfo(np.float32).eps))
                temp2 = eval/temp1
                dx = self.dx_generator(temp1, temp2)
            elif eval < 0:
                dx = 0
            else:
                dx = round(eval)

            # Apply the changes for the transitions to the
            # corresponding source and destination states
            temp = self.states_x[sind] - dx

            if sind == 1:
                self.states_x[sind] = temp
                self.states_x[dind] += dx
            elif temp <= 0:
                self.states_x[dind] += self.states_x[sind]
                self.states_x[sind] = 0
            else:
                self.states_x[sind] = temp
                self.states_x[dind] += dx
Ejemplo n.º 36
0
def schwarz_parameters(A,
                       subdomain=None,
                       subdomain_ptr=None,
                       inv_subblock=None,
                       inv_subblock_ptr=None):
    '''
    Helper function for setting up Schwarz relaxation.  This function avoids
    recomputing the subdomains and block inverses manytimes, e.g., it avoids
    a costly double computation when setting up pre and post smoothing with Schwarz.
    
    Parameters
    ----------
    A {csr_matrix}

    Returns
    -------
    A.schwarz_parameters[0] is subdomain
    A.schwarz_parameters[1] is subdomain_ptr
    A.schwarz_parameters[2] is inv_subblock
    A.schwarz_parameters[3] is inv_subblock_ptr
    '''

    # Check if A has a pre-existing set of Schwarz parameters
    if hasattr(A, 'schwarz_parameters'):
        if subdomain != None and subdomain_ptr != None:
            # check that the existing parameters correspond to the same subdomains
            if numpy.array(A.schwarz_parameters[0] == subdomain).all() and \
               numpy.array(A.schwarz_parameters[1] == subdomain_ptr).all():
                return A.schwarz_parameters
        else:
            return A.schwarz_parameters

    # Default is to use the overlapping regions defined by A's sparsity pattern
    if subdomain is None or subdomain_ptr is None:
        subdomain_ptr = A.indptr.copy()
        subdomain = A.indices.copy()

    ##
    # Extract each subdomain's block from the matrix
    if inv_subblock is None or inv_subblock_ptr is None:
        inv_subblock_ptr = numpy.zeros(subdomain_ptr.shape,
                                       dtype=A.indices.dtype)
        blocksize = (subdomain_ptr[1:] - subdomain_ptr[:-1])
        inv_subblock_ptr[1:] = numpy.cumsum(blocksize * blocksize)

        ##
        # Extract each block column from A
        inv_subblock = numpy.zeros((inv_subblock_ptr[-1], ), dtype=A.dtype)
        amg_core.extract_subblocks(A.indptr, A.indices, A.data, inv_subblock,
                                   inv_subblock_ptr, subdomain, subdomain_ptr,
                                   int(subdomain_ptr.shape[0] - 1), A.shape[0])
        ##
        # Choose tolerance for which singular values are zero in *gelss below
        t = A.dtype.char
        eps = numpy.finfo(numpy.float).eps
        feps = numpy.finfo(numpy.single).eps
        geps = numpy.finfo(numpy.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        cond = {
            0: feps * 1e3,
            1: eps * 1e6,
            2: geps * 1e6
        }[_array_precision[t]]

        ##
        # Invert each block column
        my_pinv, = la.get_lapack_funcs(['gelss'], (numpy.ones(
            (1, ), dtype=A.dtype)))
        for i in xrange(subdomain_ptr.shape[0] - 1):
            m = blocksize[i]
            rhs = scipy.eye(m, m, dtype=A.dtype)
            gelssoutput = my_pinv(
                inv_subblock[inv_subblock_ptr[i]:inv_subblock_ptr[i +
                                                                  1]].reshape(
                                                                      m, m),
                rhs,
                cond=cond,
                overwrite_a=True,
                overwrite_b=True)
            inv_subblock[
                inv_subblock_ptr[i]:inv_subblock_ptr[i + 1]] = numpy.ravel(
                    gelssoutput[1])

    A.schwarz_parameters = (subdomain, subdomain_ptr, inv_subblock,
                            inv_subblock_ptr)
    return A.schwarz_parameters
Ejemplo n.º 37
0
# coding: utf-8

# # Question 2

# ## Importing libraries

# In[ ]:

import numpy as np
import pandas as pd
import math as mt
import matplotlib.pyplot as plt

eps = np.finfo(float).eps

# ## Utility Functions

# Graph Plotter

# In[ ]:


def plotter(label_x, label_y, title, x_axis, y_axis, mark='', colr='blue'):
    plt.figure(num=None, figsize=(6, 4), dpi=175, facecolor='w', edgecolor='k')
    # plotting the points
    plt.plot(x_axis, y_axis, marker=mark, color=colr, label='Error rate')
    # naming the x axis
    plt.xlabel(label_x)
    # naming the y axis
    plt.ylabel(label_y)
Ejemplo n.º 38
0
def isolumCheckerScan(scanDict, screenSize=[1024, 768]):
    #do full field flickering checkerboard
    #length of scan in s
    scanLength = float(scanDict['numCycles'] * scanDict['period'] +
                       scanDict['preScanRest'])
    #parse out vars from scanDict
    IR = scanDict['innerRadius']
    OR = scanDict['outerRadius']
    #    colorA=scanDict['colorA']
    #    colorB=scanDict['colorB']
    #    colorBG=scanDict['colorBackground']
    colorA = numpy.zeros((3, 1))
    colorB = numpy.zeros((3, 1))
    colorBG = numpy.zeros((3, 1))
    foo = scanDict['colorA']
    bar = foo.split(",")
    colorA[0] = float(bar[0])
    colorA[1] = float(bar[1])
    colorA[2] = float(bar[2])
    foo = scanDict['colorB']
    bar = foo.split(",")
    colorB[0] = float(bar[0])
    colorB[1] = float(bar[1])
    colorB[2] = float(bar[2])
    foo = scanDict['colorBackground']
    bar = foo.split(",")
    colorBG[0] = float(bar[0])
    colorBG[1] = float(bar[1])
    colorBG[2] = float(bar[2])
    flickFreq = scanDict['animFreq']
    timeBase = scanDict['timeBase']
    #open subject window
    mySubScreen = numpy.int(scanDict['subjectScreen'])
    myOpScreen = numpy.int(scanDict['operatorScreen'])
    #    print mySubScreen
    #    print myOpScreen
    winSub = visual.Window(screenSize,
                           monitor=scanDict['monCalFile'],
                           units="deg",
                           screen=mySubScreen,
                           color=[-1.0, -1.0, -1.0],
                           colorSpace='rgb',
                           fullscr=False,
                           allowGUI=False)
    #needs to be flexible--how do I extract the dims from screen?
    #    screenSize=numpy.array([1600,1200])
    #screenSize=numpy.array([1024,768])
    fixPercentage = scanDict['fixFraction']
    fixDuration = 0.25
    respDuration = 1.0
    subjectResponse = numpy.zeros((numpy.ceil(scanLength * 60 / 100), 1))
    subRespArray = numpy.zeros((numpy.ceil(scanLength * 60 / 100), 3))
    subjectResponse[:] = numpy.nan
    #plotResp=numpy.zeros((2,2))
    #    plotRespWrong=numpy.zeros((2,2))
    plotMax = numpy.ceil(scanLength * (60.0 / 100.0))
    plotResp = numpy.zeros((plotMax, 2))
    plotColors = numpy.zeros((plotMax, 3))
    #axesX=numpy.arange(0,scanLength*60/100)
    white = [1.0, 1.0, 1.0]
    gray = [0.0, 0.0, 0.0]
    black = [-1.0, -1.0, -1.0]
    #    plt.ion()
    #    plt.plot(plotResp[0:2],'ro')
    #    plt.ylabel('subject responses')
    #    plt.show()
    #    plt.axis([0,scanLength*0.6,-0.1,1.1])

    operatorWindow = visual.Window([1024, 768],
                                   monitor='testMonitor',
                                   units='deg',
                                   screen=myOpScreen,
                                   color=[0, 0, 0],
                                   colorSpace='rgb')

    #create a shapeStim to show the operator how the subject is doing on the task
    #    opPlotRight=visual.ShapeStim(operatorWindow,units='pix',vertices=plotResp,closeShape=False,pos=(-448,-448),lineWidth=1,lineColor=[-1,1,-1],lineColorSpace='rgb' )
    #    opPlotWrong=visual.ShapeStim(operatorWindow,units='pix',vertices=plotResp,closeShape=False,pos=(-448,-448),lineWidth=1,lineColor=[1,-1,-1],lineColorSpace='rgb' )
    #try another kind of plot
    opPlot = visual.ElementArrayStim(operatorWindow,
                                     units='pix',
                                     xys=plotResp,
                                     sizes=7,
                                     nElements=plotMax,
                                     fieldPos=(-500, -384),
                                     colors=plotColors,
                                     colorSpace='rgb',
                                     sfs=0,
                                     fieldShape='square',
                                     elementMask='circle')
    opPlot = visual.ElementArrayStim(operatorWindow,
                                     units='pix',
                                     xys=plotResp,
                                     sizes=7,
                                     nElements=plotMax,
                                     fieldPos=(-500, -384),
                                     colors=plotColors,
                                     colorSpace='rgb',
                                     sfs=0,
                                     fieldShape='square',
                                     elementMask='circle')
    gridLinesVertices = numpy.zeros((46, 2))
    gridLinesVertices[:, 0] = [
        0, 998, 0, 0, 998, 0, 0, 998, 0, 0, 998, 0, 0, 998, 0, 0, 998, 100,
        100, 100, 200, 200, 200, 300, 300, 300, 400, 400, 400, 500, 500, 500,
        600, 600, 600, 700, 700, 700, 800, 800, 800, 900, 900, 900, 998, 998
    ]
    gridLinesVertices[:, 1] = [
        0, 0, 0, 100, 100, 100, 200, 200, 200, 300, 300, 300, 400, 400, 400,
        500, 500, 500, 0, 500, 500, 0, 500, 500, 0, 500, 500, 0, 500, 500, 0,
        500, 500, 0, 500, 500, 0, 500, 500, 0, 500, 500, 0, 500, 500, 0
    ]
    opPlotGrid = visual.ShapeStim(operatorWindow,
                                  units='pix',
                                  vertices=gridLinesVertices,
                                  closeShape=False,
                                  pos=(-512, -334))
    #    gridLinesStart=numpy.zeros((17,2))
    #    gridLinesEnd=numpy.zeros((17,2))
    #    for i in range(11):
    #        gridLinesStart[i,0]=i*100
    #        gridLinesEnd[i,0]=i*100
    #        gridLinesStart[i,1]=0
    #        gridLinesEnd[i,1]=500
    #    for i in range(11,17):
    #        gridLinesStart[i,0]=0
    #        gridLinesEnd[i,0]=1000
    #        gridLinesStart[i,1]=(i-12)*100
    #        gridLinesEnd[i,1]=(i-12)*100
    #    opPlotGrid00=visual.Line(operatorWindow,units='pix',start=gridLinesStart[:,0],end=gridLinesEnd[:,0],pos=(-448,-448))
    #    opPlotGrid01=visual.Line(operatorWindow,units='pix',start=gridLinesStart[:,1],end=gridLinesEnd[:,1],pos=(-448,-448))
    #    opPlotGrid02=visual.Line(operatorWindow,units='pix',start=gridLinesStart[:,2],end=gridLinesEnd[:,2],pos=(-448,-448))

    #labels for the plot regions
    plotLabel1 = visual.TextStim(
        operatorWindow,
        units='pix',
        pos=(-450, 150),
        alignHoriz='left',
        text='Correct (active--displayed \'X\' and got a button press)',
        color=(-1, 1, -1),
        colorSpace='rgb',
        height=15)
    plotLabel2 = visual.TextStim(
        operatorWindow,
        units='pix',
        pos=(-450, -250),
        alignHoriz='left',
        text='Wrong (\'X\' displayed, NO button press)',
        color=(1, -1, -1),
        colorSpace='rgb',
        height=15)
    plotLabel3 = visual.TextStim(
        operatorWindow,
        units='pix',
        pos=(-450, -150),
        alignHoriz='left',
        text='Wrong-ish (No \'X\', but got a button press!?)',
        color=(1, -1, -1),
        colorSpace='rgb',
        height=15)
    plotLabel4 = visual.TextStim(
        operatorWindow,
        units='pix',
        pos=(-450, 25),
        alignHoriz='left',
        text='Correct (passive--no \'X\', no button press)',
        color=(-1, -1, 1),
        colorSpace='rgb',
        height=15)
    #create a designmatrix for trigger-based counting
    #first create an array--length = total number of Trs
    numTr = scanLength / scanDict['Tr']
    designMatrix = numpy.zeros((numTr, 1))

    #first N Trs are already zero--rest
    #figure out when the stim should be on
    for iStim in range(scanDict['numCycles']):
        restAmt = scanDict['preScanRest'] / scanDict['Tr']
        stimDur = scanDict['period'] / scanDict['Tr']  #CHECK THIS
        firstVal = restAmt + iStim * stimDur
        lastVal = firstVal + scanDict['period'] / (2 * scanDict['Tr'])
        designMatrix[firstVal:lastVal] = 1
    numpy.savetxt('debug.txt', designMatrix, fmt='%.3i')

    #convert colors to psychopy's scheme
    colorAf = numpy.asarray(colorA, dtype=float)
    colorBf = numpy.asarray(colorB, dtype=float)
    colorBGf = numpy.asarray(colorBG, dtype=float)
    colorAp = 2 * colorAf - 1
    colorBp = 2 * colorBf - 1
    colorBGp = 2 * colorBGf - 1
    #    image1=visual.SimpleImageStim(winSub,image='redblack1.jpg')
    #    image2=visual.SimpleImageStim(winSub,image='redblack2.jpg')

    #    image1=visual.PatchStim(winSub,tex='redblack1a.jpg',mask=None,size=[OR,OR])
    #    image2=visual.PatchStim(winSub,tex='redblack2a.jpg',mask=None,size=[OR,OR])

    #let's try making some numpy arrays of the checkerboards! translated from matlab arrays
    #size of image--hardcode for now, but needs to be 2^n that fits inside smaller screen dimension
    #    twoN=numpy.ones((13))
    #    for n in range(13):
    #        twoN[n]=pow(2.0,n)
    #    twoNsize=numpy.nonzero(twoN>screenSize[1])
    #    #hmm, this somehow made a nested tuple, whatever that is
    #    keep_n=twoNsize[0][0]-1
    #    imageSize=pow(2,keep_n)
    #    halfSize=numpy.int(imageSize/2)
    debugVar = numpy.zeros((scanLength * 60, 2))
    #imageSize=1024
    if screenSize[0] < 257:
        imageSize = 256
    elif screenSize[0] < 513:
        imageSize = 512
    elif screenSize[0] < 1025:
        imageSize = 1024
    elif screenSize[0] < 2057:
        imageSize = 2048
    halfSize = numpy.int(imageSize / 2)
    #    print screenSize
    #    print imageSize
    #    print halfSize
    #create arrays of x,y, and r,theta
    xIn = numpy.arange(-halfSize, halfSize, 1)
    yIn = numpy.arange(-halfSize, halfSize, 1)
    xIn.astype(float)
    yIn.astype(float)
    x, y = numpy.meshgrid(xIn, yIn)
    r = numpy.sqrt(x**2 + y**2)
    #avoid divide by zero issues
    y[y == 0] = numpy.finfo(numpy.float).eps
    xOverY = x / y
    theta = numpy.arctan(xOverY)
    theta[halfSize + 1, halfSize + 1] = 0

    #number of wedges (pairs!!)--eventually to be a var passed in
    nWedges = 8.0
    #number of ring pairs
    nRings = 15.0
    #width of wedges in radians
    wedgeWidth = 2.0 * math.pi / nWedges
    ringWidth = 2.0 / nRings
    #ring function--describes how the ring width increases with eccentricity
    ringFunction = numpy.power(
        r / halfSize, 0.3) + 0.2  #um, is there an int float problem here?

    wedgeMask = 0.5 - (numpy.mod(theta, wedgeWidth) >
                       (wedgeWidth / 2.0))  #does this work
    rmA = numpy.mod(ringFunction, ringWidth) > (ringWidth / 2.0)
    ringMask = 1 - 2.0 * (rmA)

    checkerBoardLogic = wedgeMask * ringMask + 0.5
    #checkerBoardBG=r>
    #initialize an array of 1024x1024x3 for RGB channels
    checkerBoardA = numpy.ones((imageSize, imageSize, 3))
    checkerBoardAR = numpy.ones((imageSize, imageSize))
    checkerBoardAB = numpy.ones((imageSize, imageSize))
    checkerBoardAG = numpy.ones((imageSize, imageSize))
    #set the RGB values based on the colors passed in during launch
    #CBA, logic=1-->colorB, logic=0-->colorA
    #CBB, logic=1-->colorA, logic=0-->colorB
    #color A, column 1
    checkerBoardAR[checkerBoardLogic == 1] = colorAp[0]
    checkerBoardAG[checkerBoardLogic == 1] = colorAp[1]
    checkerBoardAB[checkerBoardLogic == 1] = colorAp[2]
    checkerBoardAR[checkerBoardLogic == 0] = colorBp[0]
    checkerBoardAG[checkerBoardLogic == 0] = colorBp[1]
    checkerBoardAB[checkerBoardLogic == 0] = colorBp[2]
    #now add in the background color around the widest ring
    #    imageMask=numpy.ones((imageSize,imageSize))
    #   imageMask[r>halfSize]=-1
    print(colorBG)
    print(colorBGf)
    print(colorBGp)
    checkerBoardAR[r > halfSize] = colorBGp[0]
    checkerBoardAG[r > halfSize] = colorBGp[1]
    checkerBoardAB[r > halfSize] = colorBGp[2]
    #smoosh the arrays together
    checkerBoardA[:, :, 0] = checkerBoardAR
    checkerBoardA[:, :, 1] = checkerBoardAG
    checkerBoardA[:, :, 2] = checkerBoardAB

    checkerBoardB = numpy.ones((imageSize, imageSize, 3))
    checkerBoardBR = numpy.ones((imageSize, imageSize))
    checkerBoardBB = numpy.ones((imageSize, imageSize))
    checkerBoardBG = numpy.ones((imageSize, imageSize))
    checkerBoardBR[checkerBoardLogic == 1] = colorBp[0]
    checkerBoardBG[checkerBoardLogic == 1] = colorBp[1]
    checkerBoardBB[checkerBoardLogic == 1] = colorBp[2]
    checkerBoardBR[checkerBoardLogic == 0] = colorAp[0]
    checkerBoardBG[checkerBoardLogic == 0] = colorAp[1]
    checkerBoardBB[checkerBoardLogic == 0] = colorAp[2]
    checkerBoardBR[r > halfSize] = colorBGp[0]
    checkerBoardBG[r > halfSize] = colorBGp[1]
    checkerBoardBB[r > halfSize] = colorBGp[2]
    checkerBoardB[:, :, 0] = checkerBoardBR
    checkerBoardB[:, :, 1] = checkerBoardBG
    checkerBoardB[:, :, 2] = checkerBoardBB

    #    numpy.savetxt('chAr.txt',checkerBoardA[:,:,0],fmt='%f')
    #    numpy.savetxt('chAg.txt',checkerBoardA[:,:,1],fmt='%f')
    #    numpy.savetxt('chAb.txt',checkerBoardA[:,:,2],fmt='%f')
    #    numpy.savetxt('chBr.txt',checkerBoardB[:,:,0],fmt='%f')
    #    numpy.savetxt('chBg.txt',checkerBoardB[:,:,1],fmt='%f')
    #    numpy.savetxt('chBb.txt',checkerBoardB[:,:,2],fmt='%f')
    #finally, create the image textures!!
    #oooh, these are fun--tiles the checkerboards!
    #stimA=visual.GratingStim(winSub,tex=checkerBoardA,size=imageSize)
    #stimB=visual.GratingStim(winSub,tex=checkerBoardB,size=imageSize)
    stimA = visual.GratingStim(winSub,
                               tex=checkerBoardA,
                               size=imageSize,
                               sf=1 / imageSize,
                               units='pix',
                               texRes=imageSize)
    stimB = visual.GratingStim(winSub,
                               tex=checkerBoardB,
                               size=imageSize,
                               sf=1 / imageSize,
                               units='pix')

    ReverseFreq = flickFreq  #drift in Hz.

    #make a fixation cross which will rotate 45 deg on occasion
    fix0 = visual.Circle(winSub,
                         radius=IR / 2.0,
                         edges=32,
                         lineColor=gray,
                         lineColorSpace='rgb',
                         fillColor=gray,
                         fillColorSpace='rgb',
                         autoLog=False)
    fix1 = visual.ShapeStim(winSub,
                            pos=[0.0, 0.0],
                            vertices=((0.0, -0.2), (0.0, 0.2)),
                            lineWidth=3.0,
                            lineColor=black,
                            lineColorSpace='rgb',
                            fillColor=black,
                            fillColorSpace='rgb',
                            autoLog=False)

    fix2 = visual.ShapeStim(winSub,
                            pos=[0.0, 0.0],
                            vertices=((-0.2, 0.0), (0.2, 0.0)),
                            lineWidth=3.0,
                            lineColor=black,
                            lineColorSpace='rgb',
                            fillColor=black,
                            fillColorSpace='rgb',
                            autoLog=False)

    #stim.setOri(t*rotationRate*360.0)
    #stim.setRadialPhase(driftRate,'+')
    #stim.setPos()#something here
    msg1x = visual.TextStim(winSub,
                            pos=[0, +8],
                            text='flickering checkerboard')
    msg1a = visual.TextStim(
        winSub,
        pos=[0, +5],
        text='During the scan, please keep your eyes on the + in the center.',
        height=1)
    msg1b = visual.TextStim(winSub,
                            pos=[0, +2],
                            text='Hit any button any time the + becomes an X.',
                            height=1)
    msg1 = visual.TextStim(winSub,
                           pos=[0, -3],
                           text='Subject: Hit a button when ready.',
                           color=[1, -1, -1],
                           colorSpace='rgb')
    msg1.draw()
    msg1a.draw()
    msg1b.draw()
    msg1X.draw()
    fix0.draw()
    fix1.draw()
    fix2.draw()
    winSub.flip()

    #wait for subject
    thisKey = None
    while thisKey == None:
        thisKey = event.waitKeys(
            keyList=['r', 'g', 'b', 'y', '1', '2', '3', '4', 'q', 'escape'])
    if thisKey in ['q', 'escape']:
        core.quit()  #abort
    else:
        event.clearEvents()
#    while len(event.getKeys())==0:
#        core.wait(0.05)
#    event.clearEvents()
#    msg1=visual.TextStim(winSub,pos=[0,+0.1],text='Waiting for magnet....',color=[-1,1,-1],colorSpace='rgb',height=0.1,units='norm')
#    msg1=visual.TextStim(winSub,text='Waiting for magnet....',height=10)
#    msg1=visual.TextStim(operatorWindow,pos=[0,-3],text='Subject: wait.',color=[1,-1,-1],colorSpace='rgb')
#    fix0.draw()
#    fix1.draw()
#    fix2.draw()
#    msg1.draw()
#    winSub.flip()
    msg1a = visual.TextStim(winSub, pos=[0, +5], text='   ', height=1)
    msg1b = visual.TextStim(winSub,
                            pos=[0, +2],
                            text='Waiting for magnet',
                            height=1)
    #msg1c=visual.TextStim(winSub,pos=[0,-3],text='Subject: Hit a key when ready.',color=[1,-1,-1],colorSpace='rgb')
    msg1c.draw()
    msg1a.draw()
    msg1b.draw()
    fix0.draw()
    fix1.draw()
    fix2.draw()
    winSub.flip()

    #wait for trigger
    trig = None
    while trig == None:
        #wait for trigger "keypress"
        trig = event.waitKeys(keyList=['t', '5', 'q', 'escape'])
    if trig in ['q', 'escape']:
        core.quit()
    else:  #stray key
        event.clearEvents()

    #start the timer
    scanTimer = core.Clock()
    startTime = scanTimer.getTime()

    #draw the fixation point
    #    wedge1.draw()
    fix0.draw()
    fix1.draw()
    fix2.draw()
    winSub.flip()
    # and drift it
    timeNow = scanTimer.getTime()
    #row=1
    msg = visual.TextStim(operatorWindow,
                          units='pix',
                          text='t = %.3f' % timeNow,
                          pos=(0.0, 325.0),
                          height=30)
    msg.draw()
    loopCounter = 0
    restLoopCounter = 0
    TrCounter = 0
    if timeBase == 0:
        ttp = TrCounter + 1
        msgTr = visual.TextStim(operatorWindow,
                                units='pix',
                                pos=(0.0, 275.0),
                                text='Tr = %i' % ttp,
                                height=30)
        msgTr.draw()
#    msgPC = visual.TextStim(operatorWindow,units='pix',text = 'percent correct',pos=(0.0,0.0),height=30)
#    msgPC.draw()
#    msgTC = visual.TextStim(operatorWindow,units='pix',text = 'time since correct',pos=(0.0,-75.0),height=30)
#    msgTC.draw()
    plotLabel1.draw()
    plotLabel2.draw()
    plotLabel3.draw()
    plotLabel4.draw()

    fixTimer = core.Clock()
    respTimer = core.Clock()
    flickerTimer = core.Clock()

    fixOri = 0
    numCoins = 0
    event.clearEvents()
    for key in event.getKeys():
        if key in ['q', 'escape']:
            core.quit()
        elif key in ['r', 'g', 'b', 'y', '1', '2', '3', '4'
                     ] and respTimeCheck < respDuration:
            subjectResponse[numCoins] = 1
    if timeBase == 1:
        #time based loop advancement
        respCounter = 0
        #display rest for pre-scan duration
        while timeNow < scanDict['preScanRest']:
            timeNow = scanTimer.getTime()
            #draw fixation
            #every 100 frames, decide if the fixation point should change or not
            if restLoopCounter % 100 == 0 and restLoopCounter > 10:
                #flip a coin to decide
                flipCoin = numpy.random.ranf()
                if flipCoin < fixPercentage:
                    #reset timers/change ori
                    fixOri = 45
                    fixTimer.reset()
                    respTimer.reset()
                    numCoins += 1
                    subjectResponse[numCoins] = 0
                #store info--expected response or not?
                respCounter += 1
                subRespArray[respCounter, 0] = timeNow
                subRespArray[respCounter, 1] = flipCoin < fixPercentage
            fixTimeCheck = fixTimer.getTime()
            respTimeCheck = respTimer.getTime()
            if fixTimeCheck > fixDuration:  #timer expired--reset ori
                fixOri = 0

            fix1.setOri(fixOri)
            fix2.setOri(fixOri)
            fix0.draw()
            fix1.draw()
            fix2.draw()
            msg.setText('t = %.3f' % timeNow)
            msg.draw()
            winSub.flip()
            operatorWindow.flip()
            for key in event.getKeys():
                if key in ['q', 'escape']:
                    core.quit()
                elif key in ['r', 'g', 'b', 'y', '1', '2', '3', '4'
                             ] and respTimeCheck < respDuration:
                    subjectResponse[numCoins] = 1
                    plotResp[numCoins] = 1
                    subRespArray[respCounter, 2] = 1
#                elif key in ['t']:
#increment loop count for each trigger
#update the operator graph
#determine response correctness and append to plot vertices variable
            plotResp[respCounter, 0] = respCounter
            if subRespArray[respCounter, 1] == 1 and subRespArray[respCounter,
                                                                  2] == 1:
                #exp resp and got resp--correct and awake
                plotResp[respCounter, 1] = 500
                plotColors[respCounter, 0] = -1
                plotColors[respCounter, 1] = 1
                plotColors[respCounter, 2] = -1
#                 plotResp=numpy.append(plotResp,[[respCounter,500]],0)
#opPlotRight.setLineColor([-1,1,-1])
            elif subRespArray[respCounter,
                              1] == 1 and subRespArray[respCounter, 2] == 0:
                #exp response, got NONE--wrong!
                plotResp[respCounter, 1] = 100
                plotColors[respCounter, 0] = 1
                plotColors[respCounter, 1] = -1
                plotColors[respCounter, 2] = -1
#                 plotRespWrong=numpy.append(plotRespWrong,[[respCounter,200]],0)
#opPlotRight.setLineColor([1,-1,-1])
            elif subRespArray[respCounter,
                              1] == 0 and subRespArray[respCounter, 2] == 1:
                #exp NONE, got response--wrong, but awake at least
                plotResp[respCounter, 1] = 150
                plotColors[respCounter, 0] = 1
                plotColors[respCounter, 1] = -1
                plotColors[respCounter, 2] = -1
#                plotRespWrong=numpy.append(plotRespWrong,[[respCounter,250]],0)
#opPlotRight.setLineColor([1,-1,-1])
            elif subRespArray[respCounter,
                              1] == 0 and subRespArray[respCounter, 2] == 0:
                #exp none, got NONE--correct, but uninformative
                plotResp[respCounter, 1] = 400
                plotColors[respCounter, 0] = -1
                plotColors[respCounter, 1] = -1
                plotColors[respCounter, 2] = 1
#                plotResp=numpy.append(plotResp,[[respCounter,450]],0)
#opPlotRight.setLineColor([-1,1,-1])
#update the vertices
            plotLabel1.draw()
            plotLabel2.draw()
            plotLabel3.draw()
            plotLabel4.draw()
            opPlotGrid.draw()
            #plot only the last 10==do hijinks for n<10
            plotStart = respCounter - 10
            if plotStart < 0:
                plotStart == 0
            opPlot.setXYs(plotResp[plotStart:respCounter])
            opPlot.setColors(plotColors)
            opPlot.draw()
            restLoopCounter += 1
#            if restLoopCounter%300 and restLoopCounter>5:
#                plt.plot(plotResp[0:numCoins+1],'ro')
#                plt.draw

#pre-scan rest is done.
#prepare for looping through the cycles
        epochTimer = core.Clock()

        #time based looping through stimulus
        while timeNow < startTime + scanLength:  #loop for scan duration
            timeBefore = timeNow
            timeNow = scanTimer.getTime()
            deltaT = timeNow - startTime
            deltaTinc = timeNow - timeBefore

            #every 100 frames, decide if the fixation point should change or not
            if loopCounter % 100 == 0 and loopCounter > 10:
                #flip a coin to decide
                flipCoin = numpy.random.ranf()
                if flipCoin < fixPercentage:
                    #reset timers/change ori
                    fixOri = 45
                    fixTimer.reset()
                    respTimer.reset()
                    numCoins += 1
                    subjectResponse[numCoins] = 0
                    plotResp[numCoins] = 1
                #store info--expected response or not?
                respCounter += 1
                subRespArray[respCounter, 0] = timeNow
                subRespArray[respCounter, 1] = flipCoin < fixPercentage
            fixTimeCheck = fixTimer.getTime()
            respTimeCheck = respTimer.getTime()
            if fixTimeCheck > fixDuration:  #timer expired--reset ori
                fixOri = 0

            fix1.setOri(fixOri)
            fix2.setOri(fixOri)

            # alternate between stimulus and rest, starting with pre-scan duration of rest
            epochTime = epochTimer.getTime()
            #half-period  epoch of stimulus
            radialPhase = nowTime
            oriAngle = nowTime / 360.0
            if epochTime < scanDict['period'] / 2.0:
                #alternate wedge 1&2 at flicker rate
                flickerTimeCheck = flickerTimer.getTime()
                if flickerTimeCheck < 1 / (2.0 * ReverseFreq):
                    #first half of a period, show wedge 1
                    #image1.draw()
                    stimA.setPhase(radialPhase)
                    stimA.draw()
                elif flickerTimeCheck < 1 / ReverseFreq:
                    #second half of period, show wedge 2
                    #                image2.draw()
                    stimB.setPhase(radialPhase)
                    stimB.draw()
                else:
                    #clocked over, reset timer
                    #could also do some modulus of timing
                    flickerTimer.reset()
                fix0.draw()
                fix1.draw()
                fix2.draw()
            elif epochTime < scanDict['period']:
                #half-period epoch of rest
                fix0.draw()
                fix1.draw()
                fix2.draw()
            else:
                epochTimer.reset()

            msg.setText('t = %.3f' % timeNow)
            msg.draw()
            operatorWindow.flip()
            winSub.flip()
            #row+=1
            #core.wait(3.0/60.0)

            #count number of keypresses since previous frame, break if non-zero
            for key in event.getKeys():
                if key in ['q', 'escape']:
                    core.quit()
                elif key in ['r', 'g', 'b', 'y', '1', '2', '3', '4'
                             ] and respTimeCheck < respDuration:
                    subjectResponse[numCoins] = 1
                    subRespArray[respCounter, 2] = 1
#            if loopCounter%300 and loopCounter>5:
#                plt.plot(plotResp[0:numCoins+1],'ro')
#                plt.draw

#update the operator graph
#determine response correctness and append to plot vertices variable
            plotResp[respCounter, 0] = respCounter
            #print subRespArray[respCounter,1:2]
            if subRespArray[respCounter, 1] == 1 and subRespArray[respCounter,
                                                                  2] == 1:
                #exp resp and got resp--correct and awake
                plotResp[respCounter, 1] = 500
                #print('exp resp, got resp')
                #print plotResp[respCounter,1]
                plotColors[respCounter, 0] = -1
                plotColors[respCounter, 1] = 1
                plotColors[respCounter, 2] = -1
#                 plotResp=numpy.append(plotResp,[[respCounter,500]],0)
#opPlotRight.setLineColor([-1,1,-1])
            elif subRespArray[respCounter,
                              1] == 1 and subRespArray[respCounter, 2] == 0:
                #exp response, got NONE--wrong!
                plotResp[respCounter, 1] = 100
                #print('exp resp, got none')
                #print plotResp[respCounter,1]
                plotColors[respCounter, 0] = 1
                plotColors[respCounter, 1] = -1
                plotColors[respCounter, 2] = -1
#                 plotRespWrong=numpy.append(plotRespWrong,[[respCounter,200]],0)
#opPlotRight.setLineColor([1,-1,-1])
            elif subRespArray[respCounter,
                              1] == 0 and subRespArray[respCounter, 2] == 1:
                #exp NONE, got response--wrong, but awake at least
                plotResp[respCounter, 1] = 150
                #print('exp none, got one')
                #print plotResp[respCounter,1]
                plotColors[respCounter, 0] = 1
                plotColors[respCounter, 1] = -1
                plotColors[respCounter, 2] = -1
#                plotRespWrong=numpy.append(plotRespWrong,[[respCounter,250]],0)
#opPlotRight.setLineColor([1,-1,-1])
            elif subRespArray[respCounter,
                              1] == 0 and subRespArray[respCounter, 2] == 0:
                #exp none, got NONE--correct, but uninformative
                plotResp[respCounter, 1] = 400
                #print('exp none, got none')
                #print plotResp[respCounter,1]
                plotColors[respCounter, 0] = -1
                plotColors[respCounter, 1] = -1
                plotColors[respCounter, 2] = 1
#                plotResp=numpy.append(plotResp,[[respCounter,450]],0)
#opPlotRight.setLineColor([-1,1,-1])
#update the vertices
            plotLabel1.draw()
            plotLabel2.draw()
            plotLabel3.draw()
            plotLabel4.draw()
            opPlotGrid.draw()
            opPlot.setXYs(plotResp)
            opPlot.setColors(plotColors)
            opPlot.draw()
            loopCounter += 1

    else:  #trigger based
        #loop through, presenting stim or rest according to designMatrix
        TrCounter = 0
        loopCounter = 0
        respCounter = 0
        numFlips = 0
        #WORKING HERE
        #trigger based loop advancement
        #wait for trigger until N triggers found
        while TrCounter < numTr:
            #update times
            timeNow = scanTimer.getTime()
            timeBefore = timeNow
            timeNow = scanTimer.getTime()
            deltaT = timeNow - startTime
            deltaTinc = timeNow - timeBefore

            #organize fixation point orientation
            #every 100 frames, decide if the fixation point should change or not
            debugVar[loopCounter, 0] = loopCounter
            if loopCounter % 50 == 0 and loopCounter > 10:
                #flip a coin to decide
                flipCoin = numpy.random.ranf()
                numFlips += 1
                if flipCoin < fixPercentage:
                    #reset timers/change ori
                    fixOri = 45
                    fixTimer.reset()
                    respTimer.reset()
                    numCoins += 1
                    subjectResponse[numCoins] = 0
                #store info--expected response or not?
                respCounter += 1
                subRespArray[respCounter, 0] = timeNow
                subRespArray[respCounter, 1] = flipCoin < fixPercentage
            fixTimeCheck = fixTimer.getTime()
            respTimeCheck = respTimer.getTime()
            if fixTimeCheck > fixDuration:  #timer expired--reset ori
                fixOri = 0

            debugVar[loopCounter, 1] = fixOri
            fix1.setOri(fixOri)
            fix2.setOri(fixOri)

            #draw stim or rest, based on designMatrix
            # alternate between stimulus and rest, starting with pre-scan duration of rest
            if designMatrix[TrCounter] == 1:
                #alternate wedge 1&2 at flicker rate
                flickerTimeCheck = flickerTimer.getTime()
                if flickerTimeCheck < 1 / (2.0 * ReverseFreq):
                    #first half of a period, show wedge 1
                    #image1.draw()
                    stimA.draw()
                elif flickerTimeCheck < 1 / ReverseFreq:
                    #second half of period, show wedge 2
                    #                image2.draw()
                    stimB.draw()
                else:
                    #clocked over, reset timer
                    #could also do some modulus of timing
                    flickerTimer.reset()
                fix0.draw()
                fix1.draw()
                fix2.draw()
            else:
                #rest
                fix0.draw()
                fix1.draw()
                fix2.draw()

            #count number of keypresses since previous frame,
            TrDone = 0
            for key in event.getKeys():
                if key in ['q', 'escape']:
                    core.quit()
                elif key in ['r', 'g', 'b', 'y', '1', '2', '3', '4'
                             ] and respTimeCheck < respDuration:
                    subjectResponse[numCoins] = 1
                    subRespArray[respCounter, 2] = 1
                elif key in ['t'] and TrDone == 0:
                    #increment loop count for each trigger
                    TrCounter += 1
                    TrDone = 1
            #update the operator graph
            #determine response correctness and append to plot vertices variable
            plotResp[respCounter, 0] = respCounter
            if subRespArray[respCounter, 1] == 1 and subRespArray[respCounter,
                                                                  2] == 1:
                #exp resp and got resp--correct and awake
                plotResp[respCounter, 1] = 500
                plotColors[respCounter, 0] = -1
                plotColors[respCounter, 1] = 1
                plotColors[respCounter, 2] = -1
#                 plotResp=numpy.append(plotResp,[[respCounter,500]],0)
#opPlotRight.setLineColor([-1,1,-1])
            elif subRespArray[respCounter,
                              1] == 1 and subRespArray[respCounter, 2] == 0:
                #exp response, got NONE--wrong!
                plotResp[respCounter, 1] = 100
                plotColors[respCounter, 0] = 1
                plotColors[respCounter, 1] = -1
                plotColors[respCounter, 2] = -1
#                 plotRespWrong=numpy.append(plotRespWrong,[[respCounter,200]],0)
#opPlotRight.setLineColor([1,-1,-1])
            elif subRespArray[respCounter,
                              1] == 0 and subRespArray[respCounter, 2] == 1:
                #exp NONE, got response--wrong, but awake at least
                plotResp[respCounter, 1] = 150
                plotColors[respCounter, 0] = 1
                plotColors[respCounter, 1] = -1
                plotColors[respCounter, 2] = -1
#                plotRespWrong=numpy.append(plotRespWrong,[[respCounter,250]],0)
#opPlotRight.setLineColor([1,-1,-1])
            elif subRespArray[respCounter,
                              1] == 0 and subRespArray[respCounter, 2] == 0:
                #exp none, got NONE--correct, but uninformative
                plotResp[respCounter, 1] = 400
                plotColors[respCounter, 0] = -1
                plotColors[respCounter, 1] = -1
                plotColors[respCounter, 2] = 1
#                plotResp=numpy.append(plotResp,[[respCounter,450]],0)
#opPlotRight.setLineColor([-1,1,-1])
#update the vertices
            plotLabel1.draw()
            plotLabel2.draw()
            plotLabel3.draw()
            plotLabel4.draw()
            opPlotGrid.draw()
            opPlot.setXYs(plotResp)
            opPlot.setColors(plotColors)
            opPlot.draw()
            #            opPlotWrong.setVertices(plotRespWrong)
            #            opPlotWrong.draw()
            #update the operator on the last 10 responses
            #            if numCoins>9:
            #                findResp = subjectResponse[~numpy.isnan(subjectResponse)]
            #                calcResp=findResp[findResp==1]
            #                numCorrect=float(calcResp.shape[0])
            #                percentCorrect=float(numCorrect)/(float(findResp.shape[0]))
            #                timeLastCor=subRespArray[-1,0]
            #                timeSinceCor=timeNow-timeLastCor
            #                msgTextPC='Last 10 fixation tasks, percent correct: %.1f' %(percentCorrect)
            ##                msgTextTC='time since last correct response: %f' %(timeSinceCor)
            #                msgPC.setText(msgTextPC)
            ##                msgTC.setText(msgTextTC)
            #                msgPC.draw()
            ##                msgTC.draw()
            msg.setText('t = %.3f' % timeNow)
            ttp = TrCounter + 1
            msgTr.setText('Tr = %i' % ttp)
            msg.draw()
            msgTr.draw()
            operatorWindow.flip()
            winSub.flip()
            #row+=1
            #core.wait(3.0/60.0)

            #update plot once per 3Tr
            #            if TrCounter%3==0 and TrCounter>1:
            #                plt.plot(plotResp[0:numFlips+1],'ro')
            #                plt.draw
            #plt.draw()
            #            if numCoins>2:
            #                #calculate correct percentage
            #                findResp=subjectResponse[~numpy.isnan(subjectResponse)]
            #                calcResp=findResp[findResp==1]
            #                numCorrect=float(calcResp.shape[0])
            #                percentCorrect=float(numCorrect)/(float(numCoins))
            #                timeLastCor=subRespArray[len(calcResp),0]
            #                timeSinceCor=timeNow-timeLastCor
            #                msgText='Subject responses: %f correct' %(percentCorrect,)
            #                msgText2='Time since last correct response: %f s' %(timeSinceCor,)
            #                msg4.setText(msgText)
            #                msg4.draw()
            #                msg5.setText(msgText2)
            #                msg5.draw()
            #                msg.draw()
            #                msgTr.draw()
            #                operatorWindow.flip()
            #                print msgText
            #                print msgText2

            loopCounter += 1

    #core.wait(5.0)
    #outFile = open("debug.txt","w")
    #outFile.write(str(debugVar))
    #outFile.close()
    #numpy.savetxt('debug.txt',debugVar,fmt='%.3f')
    #numpy.savetxt('debug.txt',designMatrix,fmt='%.3i')
    #numpy.savetxt('debugchop.txt',debugVar[:row,],fmt='%.3f')

    #calculate %age of responses that were correct
    #find non-nan
    #np.isnan(a) gives boolean array of true/a=false
    #np.isnan(a).any(1) gives a col vector of the rows with nans
    #~np.isnan(a).any(1) inverts the logic
    #myarray[~np.isnan(a).any(1)] gives the subset that I want
    findResp = subjectResponse[~numpy.isnan(subjectResponse)]
    calcResp = findResp[findResp == 1]
    numCorrect = float(calcResp.shape[0])
    if numCoins > 0:
        percentCorrect = 100.0 * float(numCorrect) / (float(numCoins))
    else:
        percentCorrect = 100.0

    msgText = 'You got %.0f %% correct!' % (percentCorrect, )
    msg1 = visual.TextStim(winSub, pos=[0, +3], text=msgText)
    msg1.draw()
    winSub.flip()

    #create an output file in a subdirectory
    #check for the subdirectory
    if os.path.isdir('subjectResponseFiles') == False:
        #create directory
        os.makedirs('subjectResponseFiles')
    nowTime = datetime.datetime.now()
    outFile = 'isolumResponse%04d%02d%02d_%02d%02d.txt' % (
        nowTime.year, nowTime.month, nowTime.day, nowTime.hour, nowTime.minute)
    outFilePath = os.path.join('subjectResponseFiles', outFile)
    numpy.savetxt(outFilePath, findResp, fmt='%.0f')
    core.wait(2)
    winSub.close()
    operatorWindow.close()
Ejemplo n.º 39
0
def curve_fit_2(f,
                strg,
                xdata,
                ydata,
                p0=None,
                sigma=None,
                absolute_sigma=False,
                check_finite=True,
                bounds=(-np.inf, np.inf),
                method=None,
                **kwargs):
    """
    Use non-linear least squares to fit a function, f, to data.

    Assumes ``ydata = f(xdata, *params) + eps``

    Parameters
    ----------
    f : callable
        The model function, f(x, ...).  It must take the independent
        variable as the first argument and the parameters to fit as
        separate remaining arguments.
    xdata : An M-length sequence or an (k,M)-shaped array
        for functions with k predictors.
        The independent variable where the data is measured.
    ydata : M-length sequence
        The dependent data --- nominally f(xdata, ...)
    p0 : None, scalar, or N-length sequence, optional
        Initial guess for the parameters.  If None, then the initial
        values will all be 1 (if the number of parameters for the function
        can be determined using introspection, otherwise a ValueError
        is raised).
    sigma : None or M-length sequence, optional
        If not None, the uncertainties in the ydata array. These are used as
        weights in the least-squares problem
        i.e. minimising ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )``
        If None, the uncertainties are assumed to be 1.
    absolute_sigma : bool, optional
        If False, `sigma` denotes relative weights of the data points.
        The returned covariance matrix `pcov` is based on *estimated*
        errors in the data, and is not affected by the overall
        magnitude of the values in `sigma`. Only the relative
        magnitudes of the `sigma` values matter.

        If True, `sigma` describes one standard deviation errors of
        the input data points. The estimated covariance in `pcov` is
        based on these values.
    check_finite : bool, optional
        If True, check that the input arrays do not contain nans of infs,
        and raise a ValueError if they do. Setting this parameter to
        False may silently produce nonsensical results if the input arrays
        do contain nans. Default is True.
    bounds : 2-tuple of array_like, optional
        Lower and upper bounds on independent variables. Defaults to no bounds.
        Each element of the tuple must be either an array with the length equal
        to the number of parameters, or a scalar (in which case the bound is
        taken to be the same for all parameters.) Use ``np.inf`` with an
        appropriate sign to disable bounds on all or some parameters.

        .. versionadded:: 0.17
    method : {'lm', 'trf', 'dogbox'}, optional
        Method to use for optimization.  See `least_squares` for more details.
        Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
        provided. The method 'lm' won't work when the number of observations
        is less than the number of variables, use 'trf' or 'dogbox' in this
        case.

        .. versionadded:: 0.17
    kwargs
        Keyword arguments passed to `leastsq` for ``method='lm'`` or
        `least_squares` otherwise.

    Returns
    -------
    popt : array
        Optimal values for the parameters so that the sum of the squared error
        of ``f(xdata, *popt) - ydata`` is minimized
    pcov : 2d array
        The estimated covariance of popt. The diagonals provide the variance
        of the parameter estimate. To compute one standard deviation errors
        on the parameters use ``perr = np.sqrt(np.diag(pcov))``.

        How the `sigma` parameter affects the estimated covariance
        depends on `absolute_sigma` argument, as described above.

        If the Jacobian matrix at the solution doesn't have a full rank, then
        'lm' method returns a matrix filled with ``np.inf``, on the other hand
        'trf'  and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
        the covariance matrix.

    Raises
    ------
    OptimizeWarning
        if covariance of the parameters can not be estimated.

    ValueError
        if either `ydata` or `xdata` contain NaNs.

    See Also
    --------
    least_squares : Minimize the sum of squares of nonlinear functions.
    stats.linregress : Calculate a linear least squares regression for two sets
                       of measurements.

    Notes
    -----
    With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
    through `leastsq`. Note that this algorithm can only deal with
    unconstrained problems.

    Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
    the docstring of `least_squares` for more information.

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.optimize import curve_fit
    >>> def func(x, a, b, c):
    ...     return a * np.exp(-b * x) + c

    >>> xdata = np.linspace(0, 4, 50)
    >>> y = func(xdata, 2.5, 1.3, 0.5)
    >>> ydata = y + 0.2 * np.random.normal(size=len(xdata))

    >>> popt, pcov = curve_fit(func, xdata, ydata)

    Constrain the optimization to the region of ``0 < a < 3``, ``0 < b < 2``
    and ``0 < c < 1``:

    >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 2., 1.]))

    """
    if p0 is None:
        # determine number of parameters by inspecting the function
        from scipy._lib._util import getargspec_no_self as _getargspec
        args, varargs, varkw, defaults = _getargspec(f)
        if len(args) < 2:
            raise ValueError("Unable to determine number of fit parameters.")
        n = len(args) - 1
    else:
        p0 = np.atleast_1d(p0)
        n = p0.size

    lb, ub = minpack.prepare_bounds(bounds, n)
    if p0 is None:
        p0 = minpack._initialize_feasible(lb, ub)

    bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
    if method is None:
        if bounded_problem:
            method = 'trf'
        else:
            method = 'lm'

    if method == 'lm' and bounded_problem:
        raise ValueError("Method 'lm' only works for unconstrained problems. "
                         "Use 'trf' or 'dogbox' instead.")

    # NaNs can not be handled
    if check_finite:
        ydata = np.asarray_chkfinite(ydata)
    else:
        ydata = np.asarray(ydata)

    if isinstance(xdata, (list, tuple, np.ndarray)):
        # `xdata` is passed straight to the user-defined `f`, so allow
        # non-array_like `xdata`.
        if check_finite:
            xdata = np.asarray_chkfinite(xdata)
        else:
            xdata = np.asarray(xdata)

    args = (xdata, ydata, f, strg)
    if sigma is None:
        func = _general_function
    else:
        func = minpack._weighted_general_function
        args += (1.0 / asarray(sigma), )

    if method == 'lm':
        # Remove full_output from kwargs, otherwise we're passing it in twice.
        return_full = kwargs.pop('full_output', False)
        res = leastsq(func, p0, args=args, full_output=1, **kwargs)
        popt, pcov, infodict, errmsg, ier = res
        cost = np.sum(infodict['fvec']**2)
        if ier not in [1, 2, 3, 4]:
            raise RuntimeError("Optimal parameters not found: " + errmsg)
    else:
        res = False
        try:
            res = least_squares(func,
                                p0,
                                args=args,
                                bounds=bounds,
                                method=method,
                                **kwargs)
        except ValueError:
            res = False
        if res == False:
            return [0], [0], res, 0
        if not res.success:
            #nfev_m=(n+1)+((n*n)/2)+res.nfev
            return res.x, [0], True, res.nfev
            #raise RuntimeError("Optimal parameters not found: " + res.message)

        cost = 2 * res.cost  # res.cost is half sum of squares!
        popt = res.x

        # Do Moore-Penrose inverse discarding zero singular values.
        _, s, VT = minpack.svd(res.jac, full_matrices=False)
        threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
        s = s[s > threshold]
        VT = VT[:s.size]
        pcov = np.dot(VT.T / s**2, VT)
        return_full = False

    warn_cov = False
    if pcov is None:
        # indeterminate covariance
        pcov = zeros((len(popt), len(popt)), dtype=float)
        pcov.fill(inf)
        warn_cov = True
    elif not absolute_sigma:
        if ydata.size > p0.size:
            s_sq = cost / (ydata.size - p0.size)
            pcov = pcov * s_sq
        else:
            pcov.fill(inf)
            warn_cov = True

    if warn_cov:
        minpack.warnings.warn(
            'Covariance of the parameters could not be estimated',
            category=minpack.OptimizeWarning)

    if return_full:
        return popt, pcov, infodict, errmsg, ier
    else:
        #nfev_m = (n + 1) + ((n * n) / 2) + res.nfev
        return popt, pcov, res.success, res.nfev
Ejemplo n.º 40
0
def estimate_X(counts, init_X, alpha, lengths, bias=None, constraints=None,
               multiscale_factor=1, multiscale_variances=None,
               max_iter=10000000000, factr=10000000., pgtol=1e-05,
               callback=None, alpha_loop=None, reorienter=None,
               mixture_coefs=None, verbose=True):
    """Estimates a 3D structure, given current alpha.

    Infer 3D structure from Hi-C contact counts data for haploid or diploid
    organisms at a given resolution.

    Parameters
    ----------
    counts : list of CountsMatrix subclass instances
        Preprocessed counts data.
    init_X : array_like of float
        Initialization for inference.
    alpha : float, optional
        Biophysical parameter of the transfer function used in converting
        counts to wish distances. If alpha is not specified, it will be
        inferred.
    lengths : array_like of int
        Number of beads per homolog of each chromosome.
    bias : array_like of float, optional
        Biases computed by ICE normalization.
    constraints : Constraints instance, optional
        Object to compute constraints at each iteration.
    multiscale_factor : int, optional
        Factor by which to reduce the resolution. A value of 2 halves the
        resolution. A value of 1 indicates full resolution.
    multiscale_variances : float or array_like of float, optional
        For multiscale optimization at low resolution, the variances of each
        group of full-resolution beads corresponding to a single low-resolution
        bead.
    max_iter : int, optional
        Maximum number of iterations per optimization.
    factr : float, optional
        factr for scipy's L-BFGS-B, alters convergence criteria.
    pgtol : float, optional
        pgtol for scipy's L-BFGS-B, alters convergence criteria.
    callback : pastis.callbacks.Callback object, optional
        Object to perform callback at each iteration and before and after
        optimization.
    alpha_loop : int, optional
        Current iteration of alpha/structure optimization.

    Returns
    -------
    X : array_like of float
        Output of the optimization (typically a 3D structure).
    obj : float
        Final objective value.
    converged : bool
        Whether the optimization successfully converged.
    callback.history : list of dict
        History generated by the callback, containing information about the
        objective function during optimization.
    """

    # Check format of input
    counts = (counts if isinstance(counts, list) else [counts])
    lengths = np.array(lengths)
    if bias is None:
        bias = np.ones((min([min(counts_maps.shape)
                             for counts_maps in counts]),))
    bias = np.array(bias)

    if verbose:
        print('=' * 30, flush=True)
        print("\nRUNNING THE L-BFGS-B CODE\n\n           * * *\n\nMachine"
              " precision = %.4g\n" % np.finfo(np.float).eps, flush=True)

    if callback is not None:
        if reorienter is not None and reorienter.reorient:
            opt_type = 'chrom_reorient'
        else:
            opt_type = 'structure'
        callback.on_training_begin(opt_type=opt_type, alpha_loop=alpha_loop)
        objective_wrapper(
            init_X.flatten(), counts=counts, alpha=alpha, lengths=lengths,
            bias=bias, constraints=constraints, reorienter=reorienter,
            multiscale_factor=multiscale_factor,
            multiscale_variances=multiscale_variances,
            mixture_coefs=mixture_coefs, callback=callback)

    results = optimize.fmin_l_bfgs_b(
        objective_wrapper,
        x0=init_X.flatten(),
        fprime=fprime_wrapper,
        iprint=0,
        maxiter=max_iter,
        pgtol=pgtol,
        factr=factr,
        args=(counts, alpha, lengths, bias, constraints,
              reorienter, multiscale_factor, multiscale_variances,
              mixture_coefs, callback))

    if callback is not None:
        callback.on_training_end()

    X, obj, d = results
    converged = d['warnflag'] == 0

    if verbose:
        if converged:
            print('CONVERGED\n\n', flush=True)
        else:
            print('OPTIMIZATION DID NOT CONVERGE', flush=True)
            print(d['task'].decode('utf8') + '\n\n', flush=True)

    return X, obj, converged, callback.history
Ejemplo n.º 41
0
import numpy as np 
import pandas as pd 
eps = np.finfo(float).eps
inf = np.finfo(float).max
import sys, getopt
import os
import argparse
import csv
import pprint
import collections
import random
from scipy.optimize import minimize
from random import randrange

class NN():
	def __init__(self, learning_rate, learning_rate_tweak, width, num_epoch, input_size, depth = 2, output_size = 1):

		self.r, self.d, self.w, self.epoch, self.input_size = learning_rate, learning_rate_tweak, width, num_epoch, input_size
		self.depth = depth
		self.output_size = output_size

		
		self.network = list()
		self.network.append({'weights':np.random.randn(self.input_size, self.w), 'delta':np.zeros(self.w), 'output':np.zeros((self.input_size, self.w))})
		self.network.append({'weights':np.random.randn(self.w + 1, self.w), 'delta':np.zeros(self.w), 'output':np.zeros((self.w + 1, self.w))})
		self.network.append({'weights':np.random.randn(self.w + 1, self.output_size), 'delta':np.zeros(self.w), 'output':np.zeros((self.w + 1, self.output_size))})
		

		self.cache = list()
		# self.network = list()
		# self.network.append({'weights':np.random.randn(self.input_size, self.w), 'delta':np.zeros(self.w), 'output':np.zeros((self.input_size, self.w))})
Ejemplo n.º 42
0
def leastsq(func,
            x0,
            args=(),
            Dfun=None,
            full_output=0,
            col_deriv=0,
            ftol=1.49012e-8,
            xtol=1.49012e-8,
            gtol=0.0,
            maxfev=0,
            epsfcn=None,
            factor=100,
            diag=None):
    """
    Minimize the sum of squares of a set of equations.

    ::

        x = arg min(sum(func(y)**2,axis=0))
                 y

    Parameters
    ----------
    func : callable
        should take at least one (possibly length N vector) argument and
        returns M floating point numbers. It must not return NaNs or
        fitting might fail.
    x0 : ndarray
        The starting estimate for the minimization.
    args : tuple, optional
        Any extra arguments to func are placed in this tuple.
    Dfun : callable, optional
        A function or method to compute the Jacobian of func with derivatives
        across the rows. If this is None, the Jacobian will be estimated.
    full_output : bool, optional
        non-zero to return all optional outputs.
    col_deriv : bool, optional
        non-zero to specify that the Jacobian function computes derivatives
        down the columns (faster, because there is no transpose operation).
    ftol : float, optional
        Relative error desired in the sum of squares.
    xtol : float, optional
        Relative error desired in the approximate solution.
    gtol : float, optional
        Orthogonality desired between the function vector and the columns of
        the Jacobian.
    maxfev : int, optional
        The maximum number of calls to the function. If `Dfun` is provided
        then the default `maxfev` is 100*(N+1) where N is the number of elements
        in x0, otherwise the default `maxfev` is 200*(N+1).
    epsfcn : float, optional
        A variable used in determining a suitable step length for the forward-
        difference approximation of the Jacobian (for Dfun=None).
        Normally the actual step length will be sqrt(epsfcn)*x
        If epsfcn is less than the machine precision, it is assumed that the
        relative errors are of the order of the machine precision.
    factor : float, optional
        A parameter determining the initial step bound
        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
    diag : sequence, optional
        N positive entries that serve as a scale factors for the variables.

    Returns
    -------
    x : ndarray
        The solution (or the result of the last iteration for an unsuccessful
        call).
    cov_x : ndarray
        Uses the fjac and ipvt optional outputs to construct an
        estimate of the jacobian around the solution. None if a
        singular matrix encountered (indicates very flat curvature in
        some direction).  This matrix must be multiplied by the
        residual variance to get the covariance of the
        parameter estimates -- see curve_fit.
    infodict : dict
        a dictionary of optional outputs with the key s:

        ``nfev``
            The number of function calls
        ``fvec``
            The function evaluated at the output
        ``fjac``
            A permutation of the R matrix of a QR
            factorization of the final approximate
            Jacobian matrix, stored column wise.
            Together with ipvt, the covariance of the
            estimate can be approximated.
        ``ipvt``
            An integer array of length N which defines
            a permutation matrix, p, such that
            fjac*p = q*r, where r is upper triangular
            with diagonal elements of nonincreasing
            magnitude. Column j of p is column ipvt(j)
            of the identity matrix.
        ``qtf``
            The vector (transpose(q) * fvec).

    mesg : str
        A string message giving information about the cause of failure.
    ier : int
        An integer flag.  If it is equal to 1, 2, 3 or 4, the solution was
        found.  Otherwise, the solution was not found. In either case, the
        optional output variable 'mesg' gives more information.

    Notes
    -----
    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.

    cov_x is a Jacobian approximation to the Hessian of the least squares
    objective function.
    This approximation assumes that the objective function is based on the
    difference between some observed target data (ydata) and a (non-linear)
    function of the parameters `f(xdata, params)` ::

           func(params) = ydata - f(xdata, params)

    so that the objective function is ::

           min   sum((ydata - f(xdata, params))**2, axis=0)
         params

    """
    x0 = asarray(x0).flatten()
    n = len(x0)
    if not isinstance(args, tuple):
        args = (args, )
    shape, dtype = minpack._check_func('leastsq', 'func', func, x0, args, n)
    m = shape[0]
    # if n > m:
    #     raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
    if epsfcn is None:
        epsfcn = finfo(dtype).eps
    if Dfun is None:
        if maxfev == 0:
            maxfev = 200 * (n + 1)
        retval = minpack._minpack._lmdif(func, x0, args, full_output, ftol,
                                         xtol, gtol, maxfev, epsfcn, factor,
                                         diag)
    else:
        if col_deriv:
            minpack._check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
        else:
            minpack._check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
        if maxfev == 0:
            maxfev = 100 * (n + 1)
        retval = minpack._minpack._lmder(func, Dfun, x0, args, full_output,
                                         col_deriv, ftol, xtol, gtol, maxfev,
                                         factor, diag)

    errors = {
        0: ["Improper input parameters.", TypeError],
        1: [
            "Both actual and predicted relative reductions "
            "in the sum of squares\n  are at most %f" % ftol, None
        ],
        2: [
            "The relative error between two consecutive "
            "iterates is at most %f" % xtol, None
        ],
        3: [
            "Both actual and predicted relative reductions in "
            "the sum of squares\n  are at most %f and the "
            "relative error between two consecutive "
            "iterates is at \n  most %f" % (ftol, xtol), None
        ],
        4: [
            "The cosine of the angle between func(x) and any "
            "column of the\n  Jacobian is at most %f in "
            "absolute value" % gtol, None
        ],
        5: [
            "Number of calls to function has reached "
            "maxfev = %d." % maxfev, ValueError
        ],
        6: [
            "ftol=%f is too small, no further reduction "
            "in the sum of squares\n  is possible."
            "" % ftol, ValueError
        ],
        7: [
            "xtol=%f is too small, no further improvement in "
            "the approximate\n  solution is possible." % xtol, ValueError
        ],
        8: [
            "gtol=%f is too small, func(x) is orthogonal to the "
            "columns of\n  the Jacobian to machine "
            "precision." % gtol, ValueError
        ],
        'unknown': ["Unknown error.", TypeError]
    }

    info = retval[-1]  # The FORTRAN return value

    if info not in [1, 2, 3, 4] and not full_output:
        if info in [5, 6, 7, 8]:
            minpack.warnings.warn(errors[info][0], RuntimeWarning)
        else:
            try:
                raise errors[info][1](errors[info][0])
            except KeyError:
                raise errors['unknown'][1](errors['unknown'][0])

    mesg = errors[info][0]
    if full_output:
        cov_x = None
        if info in [1, 2, 3, 4]:
            from numpy.dual import inv
            from numpy.linalg import LinAlgError
            perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
            r = triu(transpose(retval[1]['fjac'])[:n, :])
            R = dot(r, perm)
            try:
                cov_x = inv(dot(transpose(R), R))
            except (LinAlgError, ValueError):
                pass
        return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
    else:
        return (retval[0], info)
Ejemplo n.º 43
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np
from scipy import fftpack, signal

__author__ = [
    'Konstantinos Drossos -- TUT', 'Stylianos Mimilakis -- Fraunhofer IDMT'
]
__docformat__ = 'reStructuredText'
__all__ = ['stft', 'i_stft', 'ideal_ratio_masking']

_eps = np.finfo(np.float32).tiny


def ideal_ratio_masking(mixture_in, magn_spectr_target, magn_spectr_residual):
    """Computation of Ideal Amplitude Ratio Mask. As appears in :\
    H Erdogan, John R. Hershey, Shinji Watanabe, and Jonathan Le Roux,\
    `Phase-sensitive and recognition-boosted speech separation using deep recurrent neural networks,`\
    in ICASSP 2015, Brisbane, April, 2015.

    :param mixture_in: The input mixture
    :type mixture_in: numpy.core.multiarray.ndarray
    :param magn_spectr_target: Magnitude Spectrogram of the target component
    :type magn_spectr_target: numpy.core.multiarray.ndarray
    :param magn_spectr_residual: Magnitude Spectrogram of the residual component
    :type magn_spectr_residual: numpy.core.multiarray.ndarray
    :return: Time-frequency gain values
    :rtype: numpy.core.multiarray.ndarray
    """
    mask = np.divide(magn_spectr_target,
Ejemplo n.º 44
0
def integrate_angular_velocity(Omega, t0, t1, R0=None, tolerance=1e-12):
    """Compute frame with given angular velocity

    Parameters
    ----------
    Omega : tuple or callable
        Angular velocity from which to compute frame.  Can be
          1) a 2-tuple of float arrays (t, v) giving the angular velocity vector at a series of times,
          2) a function of time that returns the 3-vector angular velocity, or
          3) a function of time and orientation (t, R) that returns the 3-vector angular velocity
        In case 1, the angular velocity will be interpolated to the required times.  Note that accuracy
        is poor in case 1.
    t0 : float
        Initial time
    t1 : float
        Final time
    R0 : quaternion, optional
        Initial frame orientation.  Defaults to 1 (the identity orientation).
    tolerance : float, optional
        Absolute tolerance used in integration.  Defaults to 1e-12.

    Returns
    -------
    t : float array
    R : quaternion array

    """
    import warnings
    from scipy.integrate import ode
    from scipy.interpolate import CubicSpline

    if R0 is None:
        R0 = quaternion.one

    input_is_tabulated = False

    try:
        t_Omega, v = Omega
        Omega = CubicSpline(t_Omega, v)

        def Omega_func(t, R):
            return Omega(t)

        Omega_func(t0, R0)
        input_is_tabulated = True
    except (TypeError, ValueError):

        def Omega_func(t, R):
            return Omega(t, R)

        try:
            Omega_func(t0, R0)
        except TypeError:

            def Omega_func(t, R):
                return Omega(t)

            Omega_func(t0, R0)

    def RHS(t, y):
        R = quaternion.quaternion(*y)
        return (0.5 * quaternion.quaternion(0.0, *Omega_func(t, R)) *
                R).components

    y0 = R0.components

    if input_is_tabulated:
        from scipy.integrate import solve_ivp
        t = t_Omega
        t_span = [t_Omega[0], t_Omega[-1]]
        solution = solve_ivp(RHS,
                             t_span,
                             y0,
                             t_eval=t_Omega,
                             atol=tolerance,
                             rtol=100 * np.finfo(float).eps)
        R = quaternion.from_float_array(solution.y.T)
    else:
        solver = ode(RHS)
        solver.set_initial_value(y0, t0)
        solver.set_integrator('dop853', nsteps=1, atol=tolerance, rtol=0.0)
        solver._integrator.iwork[2] = -1  # suppress Fortran-printed warning
        t = appending_array((int(t1 - t0), ))
        t.append(solver.t)
        R = appending_array((int(t1 - t0), 4))
        R.append(solver.y)
        warnings.filterwarnings("ignore", category=UserWarning)
        t_last = solver.t
        while solver.t < t1:
            solver.integrate(t1, step=True)
            if solver.t > t_last:
                t.append(solver.t)
                R.append(solver.y)
                t_last = solver.t
        warnings.resetwarnings()
        t = t.a
        R = quaternion.as_quat_array(R.a)

    return t, R
Ejemplo n.º 45
0
Archivo: salt.py Proyecto: gehly/metis
def lgwt(N,a,b):
    '''
    This function returns the locations and weights of nodes to use for
    Gauss-Legendre Quadrature for numerical integration.
    
    Adapted from MATLAB code by Greg von Winckel
    
    Parameters
    ------
    N : int
        number of nodes
    a : float
        lower limit of integral
    b : float
        upper limit of integral
    
    Returns
    ------
    x_vect : 1D numpy array
        node locations
    w_vect : 1D numpy array
        node weights
    
    '''
    
    xu = np.linspace(-1, 1, N)
    
    # Initial Guess
    y=np.cos((2*np.arange(0,N)+1)*pi/(2*(N-1)+2))+(0.27/N)*np.sin(pi*xu*(N-1)/(N+1))
    y=y.reshape(len(y),1)
    
    # Legendre-Gauss Vandermonde Matrix
    L=np.zeros((N,N+1))
    
    # Derivative of LGVM
    
    # Compute the zeros of the N+1 Legendre Polynomial
    # using the recursion relation and the Newton-Raphson method
    y0=2.
    eps = np.finfo(float).eps
    
    # Iterate until new points are uniformly within epsilon of old points
    while max(abs(y-y0)) > eps:
        
        L[:,0] = 1.
        
        L[:,1] = y.flatten()
        
        for k in range(1,N):
            
            L1 = (2*(k+1)-1)*y.flatten()
            L2 = L[:,k].flatten()
            L3 = L[:,k-1].flatten()
            
            L[:,k+1] = (np.multiply(L1, L2) - k*L3)/(k+1)

        y2 = np.multiply(y.flatten(), y.flatten())
        Lp1=(N+1)*( L[:,N-1]- np.multiply(y.flatten(), L[:,N].flatten() ))  
        Lp = np.multiply(Lp1, 1./(1-y2))

        y0 = y.copy()
        y = y0 - np.reshape(np.multiply(L[:,N].flatten(), 1./Lp), (len(y0), 1))
        
    # Linear map from[-1,1] to [a,b]
    x_vect = (a*(1-y)+b*(1+y))/2
    x_vect = x_vect.flatten()
    
    # Compute the weights
    y2 = np.multiply(y, y)
    Lp2 = np.multiply(Lp, Lp)
    w_vect = (b-a)/(np.multiply((1-y2.flatten()), Lp2.flatten()))*((N+1)/N)**2.
    
    return x_vect, w_vect
Ejemplo n.º 46
0
def PB_and_fourlayermodel(distance,
                          psi_pb_0,
                          log_K,
                          X,
                          A,
                          T,
                          pos_ele_S1,
                          pos_ele_S2,
                          C_vec_S1,
                          C_vec_S2,
                          a1,
                          a2,
                          s1,
                          s2,
                          idx_aq,
                          Z_aq_vec,
                          temp,
                          epsilon,
                          tolerance=1e-8,
                          max_iterations=80,
                          idx_fix_species=None,
                          scalingRC=True):
    """
    -Implements the fours layer model for two surfaces that interact between them.
    
    Arguments:
        A               stoichiometric matrix. Define as Westall (1980)
        X               vector of primary species. Define as Westall (1980)
        log_K           vector of log Ki. Define as Westall (1980)
        T               vector of totals --> THE POSITION OF T must be the same THAT THE POSITON of X!!!!!!
        pos_ele_S1      vector of the indexes of the electrostatic psi_0, psi_C, psi_A, psi_D. They have the same position at X and at T
        pos_ele_S2      idem but for surface2
        a1              surface area of the solid per mass [m2/g]
        a2              idem but for surface2
        s1
        s2              idem but for surface2
        idx_aq          vector of indexes of aqueous species that are found in C (log_C = log_K + A*log(X))
        Z_aq_vec        is a vector of the valancies of the aqueous species. The order of the values on this vector is given by the order of aqueous species in C (log_C = log_K + A*log(X))
    """
    F = 96485.3328959  # C/mol
    R = 8.314472  # J/(K*mol)
    epsilon_0 = 8.854187871e-12  # Farrads = F/m   - permittivity in vaccuum
    abs_err = tolerance + 1
    counter_iterations = 0
    abs_err = tolerance + 1
    if idx_fix_species != None:
        X[idx_fix_species] = T[idx_fix_species]
    while abs_err - tolerance > np.finfo(
            float).eps and counter_iterations < max_iterations:
        # First step is to calculate the Residual function
        [Y, T
         ] = residual_function_calculation(log_K, X, A, T, pos_ele_S1,
                                           pos_ele_S2, C_vec_S1, C_vec_S2, a1,
                                           a2, s1, s2, temp, F, R, epsilon_0,
                                           epsilon, idx_aq, Z_aq_vec, psi_pb_0,
                                           distance, idx_fix_species)
        # Second step is to calculate the Jacaobian
        J = calculate_jacobian_function(log_K, X, A, pos_ele_S1, pos_ele_S2,
                                        C_vec_S1, C_vec_S2, a1, a2, s1, s2,
                                        temp, F, R, epsilon_0, epsilon, idx_aq,
                                        Z_aq_vec, psi_pb_0, distance,
                                        idx_fix_species)
        # solve
        if scalingRC == True:
            D1 = diagonal_row(J)
            D2 = diagonal_col(J)

            J_new = np.matmul(D1, np.matmul(J, D2))
            Y_new = np.matmul(D1, Y)
            delta_X_new = linalg.solve(J_new, -Y_new)
            delta_X = np.matmul(D2, delta_X_new)
        else:
            # Calculating the diff, Delta_X
            delta_X = linalg.solve(J, -Y)
        max_1 = 1
        max_2 = np.amax(-2 * np.multiply(delta_X, 1 / X))
        Max_f = np.amax([max_1, max_2])
        Del_mul = 1 / Max_f
        X = X + Del_mul * delta_X

        if idx_fix_species != None:
            Y[idx_fix_species] = 0
        abs_err = max(abs(Y))
        counter_iterations += 1
        print(Y)
    if counter_iterations >= max_iterations or np.isnan(abs_err):
        raise ValueError('Max number of iterations exceed.')
    C = modified_mass_action_law(log_K, X, A)
    return X, C
Ejemplo n.º 47
0
def brentq(f,
           a,
           b,
           args=(),
           xtol=2e-14,
           maxiter=200,
           rtol=4 * np.finfo(float).eps):
    """
    Find a root of a function in a bracketing interval using Brent's method
    adapted from Scipy's brentq.

    Uses the classic Brent's method to find a zero of the function `f` on
    the sign changing interval [a , b].

    Parameters
    ----------
    f : callable
        Python function returning a number.  `f` must be continuous.
    a : number
        One end of the bracketing interval [a,b].
    b : number
        The other end of the bracketing interval [a,b].
    args : tuple, optional(default=())
        Extra arguments to be used in the function call.
    xtol : number, optional(default=2e-12)
        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
        parameter must be nonnegative.
    rtol : number, optional(default=4*np.finfo(float).eps)
        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
        atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
    maxiter : number, optional(default=100)
        Maximum number of iterations.

    Returns
    -------
    results : namedtuple

    """
    if xtol <= 0:
        raise ValueError("xtol is too small (<= 0)")
    if maxiter < 1:
        raise ValueError("maxiter must be greater than 0")

    # Convert to float
    xpre = a * 1.0
    xcur = b * 1.0

    # Conditional checks for intervals in methods involving bisection
    fpre = f(xpre, *args)
    fcur = f(xcur, *args)
    funcalls = 2

    if fpre * fcur > 0:
        raise ValueError("f(a) and f(b) must have different signs")
    root = 0.0
    status = _ECONVERR

    # Root found at either end of [a,b]
    if fpre == 0:
        root = xpre
        status = _ECONVERGED
    if fcur == 0:
        root = xcur
        status = _ECONVERGED

    root, status = root, status

    # Check for sign error and early termination
    if status == _ECONVERGED:
        itr = 0
    else:
        # Perform Brent's method
        for itr in range(maxiter):
            if fpre * fcur < 0:
                xblk = xpre
                fblk = fpre
                spre = scur = xcur - xpre
            if abs(fblk) < abs(fcur):
                xpre = xcur
                xcur = xblk
                xblk = xpre

                fpre = fcur
                fcur = fblk
                fblk = fpre

            delta = (xtol + rtol * abs(xcur)) / 2
            sbis = (xblk - xcur) / 2

            # Root found
            if fcur == 0 or abs(sbis) < delta:
                status = _ECONVERGED
                root = xcur
                itr += 1
                break

            if abs(spre) > delta and abs(fcur) < abs(fpre):
                if xpre == xblk:
                    # interpolate
                    stry = -fcur * (xcur - xpre) / (fcur - fpre)
                else:
                    # extrapolate
                    dpre = (fpre - fcur) / (xpre - xcur)
                    dblk = (fblk - fcur) / (xblk - xcur)
                    stry = -fcur * (fblk * dblk - fpre * dpre) / \
                           (dblk * dpre * (fblk - fpre))

                if 2 * abs(stry) < min(abs(spre), 3 * abs(sbis) - delta):
                    # good short step
                    spre = scur
                    scur = stry
                else:
                    # bisect
                    spre = sbis
                    scur = sbis
            else:
                # bisect
                spre = sbis
                scur = sbis

            xpre = xcur
            fpre = fcur
            if abs(scur) > delta:
                xcur += scur
            else:
                xcur += (delta if sbis > 0 else -delta)
            fcur = f(xcur, *args)
            funcalls += 1

    if status == _ECONVERR:
        raise RuntimeError("Failed to converge")

    # x, funcalls, iterations = root, funcalls, itr
    return root, funcalls, itr
Ejemplo n.º 48
0
def _sigmoid_calibration(df, y, sample_weight=None):
    """Probability Calibration with sigmoid method (Platt 2000)

    Parameters
    ----------
    df : ndarray, shape (n_samples,)
        The decision function or predict proba for the samples.

    y : ndarray, shape (n_samples,)
        The targets.

    sample_weight : array-like, shape = [n_samples] or None
        Sample weights. If None, then samples are equally weighted.

    Returns
    -------
    a : float
        The slope.

    b : float
        The intercept.

    References
    ----------
    Platt, "Probabilistic Outputs for Support Vector Machines"
    """
    df = column_or_1d(df)
    y = column_or_1d(y)

    F = df  # F follows Platt's notations
    tiny = np.finfo(np.float).tiny  # to avoid division by 0 warning

    # Bayesian priors (see Platt end of section 2.2)
    prior0 = float(np.sum(y <= 0))
    prior1 = y.shape[0] - prior0
    T = np.zeros(y.shape)
    T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
    T[y <= 0] = 1. / (prior0 + 2.)
    T1 = 1. - T

    def objective(AB):
        # From Platt (beginning of Section 2.2)
        E = np.exp(AB[0] * F + AB[1])
        P = 1. / (1. + E)
        l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
        if sample_weight is not None:
            return (sample_weight * l).sum()
        else:
            return l.sum()

    def grad(AB):
        # gradient of the objective function
        E = np.exp(AB[0] * F + AB[1])
        P = 1. / (1. + E)
        TEP_minus_T1P = P * (T * E - T1)
        if sample_weight is not None:
            TEP_minus_T1P *= sample_weight
        dA = np.dot(TEP_minus_T1P, F)
        dB = np.sum(TEP_minus_T1P)
        return np.array([dA, dB])

    AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
    AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
    return AB_[0], AB_[1]
Ejemplo n.º 49
0
class DataFrameModel(QtCore.QAbstractTableModel):
    """data model for use in QTableView, QListView, QComboBox, etc.

    Attributes:
        timestampFormat (unicode): formatting string for conversion of timestamps to QtCore.QDateTime.
            Used in data method.
        sortingAboutToStart (QtCore.pyqtSignal): emitted directly before sorting starts.
        sortingFinished (QtCore.pyqtSignal): emitted, when sorting finished.
        dtypeChanged (Signal(columnName)): passed from related ColumnDtypeModel
            if a columns dtype has changed.
        changingDtypeFailed (Signal(columnName, index, dtype)):
            passed from related ColumnDtypeModel.
            emitted after a column has changed it's data type.
        dataChanged (Signal):
            Emitted, if data has changed, e.x. finished loading, new columns added or removed.
            It's not the same as layoutChanged.
            Usefull to reset delegates in the view.
    """

    _float_precisions = {
        "float16": numpy.finfo(numpy.float16).precision - 2,
        "float32": numpy.finfo(numpy.float32).precision - 1,
        "float64": numpy.finfo(numpy.float64).precision - 1
    }
    """list of int datatypes for easy checking in data() and setData()"""
    _intDtypes = SupportedDtypes.intTypes() + SupportedDtypes.uintTypes()
    """list of float datatypes for easy checking in data() and setData()"""
    _floatDtypes = SupportedDtypes.floatTypes()
    """list of bool datatypes for easy checking in data() and setData()"""
    _boolDtypes = SupportedDtypes.boolTypes()
    """list of datetime datatypes for easy checking in data() and setData()"""
    _dateDtypes = SupportedDtypes.datetimeTypes()

    _timestampFormat = Qt.ISODate

    sortingAboutToStart = Signal()
    sortingFinished = Signal()
    dtypeChanged = Signal(int, object)
    changingDtypeFailed = Signal(object, QtCore.QModelIndex, object)
    dataChanged = Signal()
    dataFrameChanged = Signal()

    def __init__(self, dataFrame=None, copyDataFrame=False, filePath=None):
        """

        Args:
            dataFrame (pandas.core.frame.DataFrame, optional): initializes the model with given DataFrame.
                If none is given an empty DataFrame will be set. defaults to None.
            copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.
                If you use it as is, you can change it from outside otherwise you have to reset the dataFrame
                after external changes.
            filePath (str, optional): stores the original path for tracking.

        """
        super(DataFrameModel, self).__init__()

        self._dataFrame = pandas.DataFrame()

        if dataFrame is not None:
            self.setDataFrame(dataFrame, copyDataFrame=copyDataFrame)

        self.dataChanged.emit()

        self._dataFrameOriginal = None
        self._search = DataSearch("nothing", "")
        self.editable = False
        self._filePath = filePath

    @property
    def filePath(self):
        """
        Access to the internal _filepath property (could be None)
        :return: qtpandas.models.DataFrameModel._filepath
        """
        return self._filePath

    def dataFrame(self):
        """
        getter function to _dataFrame. Holds all data.

        Note:
            It's not implemented with python properties to keep Qt conventions.
            Not sure why??
        """
        return self._dataFrame

    def setDataFrameFromFile(self, filepath, **kwargs):
        """
        Sets the model's dataFrame by reading a file.
        Accepted file formats:
            - .xlsx (sheet1 is read unless specified in kwargs)
            - .csv (comma separated unless specified in kwargs)
            - .txt (any separator)

        :param filepath: (str)
            The path to the file to be read.
        :param kwargs:
            pandas.read_csv(**kwargs) or pandas.read_excel(**kwargs)
        :return: None
        """
        df = superReadFile(filepath, **kwargs)
        self.setDataFrame(df, filePath=filepath)

    def setDataFrame(self, dataFrame, copyDataFrame=False, filePath=None):
        """
        Setter function to _dataFrame. Holds all data.

        Note:
            It's not implemented with python properties to keep Qt conventions.

        Raises:
            TypeError: if dataFrame is not of type pandas.core.frame.DataFrame.

        Args:
            dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.
            copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.
                If you use it as is, you can change it from outside otherwise you have to reset the dataFrame
                after external changes.

        """
        if not isinstance(dataFrame, pandas.core.frame.DataFrame):
            raise TypeError("not of type pandas.core.frame.DataFrame")

        self.layoutAboutToBeChanged.emit()
        if copyDataFrame:
            self._dataFrame = dataFrame.copy()
        else:
            self._dataFrame = dataFrame

        self._columnDtypeModel = ColumnDtypeModel(dataFrame)
        self._columnDtypeModel.dtypeChanged.connect(self.propagateDtypeChanges)
        self._columnDtypeModel.changeFailed.connect(
            lambda columnName, index, dtype: self.changingDtypeFailed.emit(
                columnName, index, dtype))
        if filePath is not None:
            self._filePath = filePath
        self.layoutChanged.emit()
        self.dataChanged.emit()
        self.dataFrameChanged.emit()

    @Slot(int, object)
    def propagateDtypeChanges(self, column, dtype):
        """
        Emits a dtypeChanged signal with the column and dtype.

        :param column: (str)
        :param dtype: ??
        :return: None
        """
        self.dtypeChanged.emit(column, dtype)

    @property
    def timestampFormat(self):
        """getter to _timestampFormat"""
        return self._timestampFormat

    @timestampFormat.setter
    def timestampFormat(self, timestampFormat):
        """
        Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime

        Raises:
            AssertionError: if timestampFormat is not of type unicode.

        Args:
            timestampFormat (unicode): assign timestampFormat to _timestampFormat.
                Formatting string for conversion of timestamps to QtCore.QDateTime. Used in data method.

        """
        if not isinstance(timestampFormat, str):
            raise TypeError('not of type unicode')
        #assert isinstance(timestampFormat, unicode) or timestampFormat.__class__.__name__ == "DateFormat", "not of type unicode"
        self._timestampFormat = timestampFormat

    def rename(self, index=None, columns=None, **kwargs):
        """
        Renames the dataframe inplace calling appropriate signals.
        Wraps pandas.DataFrame.rename(*args, **kwargs) - overrides
        the inplace kwarg setting it to True.

        Example use:
        renames = {'colname1':'COLNAME_1', 'colname2':'COL2'}
        DataFrameModel.rename(columns=renames)

        :param args:
            see pandas.DataFrame.rename
        :param kwargs:
            see pandas.DataFrame.rename
        :return:
            None
        """
        kwargs['inplace'] = True
        self.layoutAboutToBeChanged.emit()
        self._dataFrame.rename(index, columns, **kwargs)
        self.layoutChanged.emit()
        self.dataChanged.emit()
        self.dataFrameChanged.emit()

    def applyFunction(self, func):
        """
        Applies a function to the dataFrame with appropriate signals.
        The function must return a dataframe.
        :param func: A function (or partial function) that accepts a dataframe as the first argument.
        :return: None
        :raise:
            AssertionError if the func is not callable.
            AssertionError if the func does not return a DataFrame.
        """
        assert callable(func), "function {} is not callable".format(func)
        self.layoutAboutToBeChanged.emit()
        df = func(self._dataFrame)
        assert isinstance(df, pandas.DataFrame
                          ), "function {} did not return a DataFrame.".format(
                              func.__name__)
        self._dataFrame = df
        self.layoutChanged.emit()
        self.dataChanged.emit()
        self.dataFrameChanged.emit()

    def headerData(self, section, orientation, role=Qt.DisplayRole):
        """
        Return the header depending on section, orientation and Qt::ItemDataRole

        Args:
            section (int): For horizontal headers, the section number corresponds to the column number.
                Similarly, for vertical headers, the section number corresponds to the row number.
            orientation (Qt::Orientations):
            role (Qt::ItemDataRole):

        Returns:
            None if not Qt.DisplayRole
            _dataFrame.columns.tolist()[section] if orientation == Qt.Horizontal
            section if orientation == Qt.Vertical
            None if horizontal orientation and section raises IndexError
        """
        if role != Qt.DisplayRole:
            return None

        if orientation == Qt.Horizontal:
            try:
                label = self._dataFrame.columns.tolist()[section]
                if label == section:
                    label = section
                return label
            except (IndexError, ):
                return None
        elif orientation == Qt.Vertical:
            return section

    def data(self, index, role=Qt.DisplayRole):
        """return data depending on index, Qt::ItemDataRole and data type of the column.

        Args:
            index (QtCore.QModelIndex): Index to define column and row you want to return
            role (Qt::ItemDataRole): Define which data you want to return.

        Returns:
            None if index is invalid
            None if role is none of: DisplayRole, EditRole, CheckStateRole, DATAFRAME_ROLE

            if role DisplayRole:
                unmodified _dataFrame value if column dtype is object (string or unicode).
                _dataFrame value as int or long if column dtype is in _intDtypes.
                _dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).
                None if column dtype is in _boolDtypes.
                QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.

            if role EditRole:
                unmodified _dataFrame value if column dtype is object (string or unicode).
                _dataFrame value as int or long if column dtype is in _intDtypes.
                _dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision).
                _dataFrame value as bool if column dtype is in _boolDtypes.
                QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template.

            if role CheckStateRole:
                Qt.Checked or Qt.Unchecked if dtype is numpy.bool_ otherwise None for all other dtypes.

            if role DATAFRAME_ROLE:
                unmodified _dataFrame value.

            raises TypeError if an unhandled dtype is found in column.
        """

        if not index.isValid():
            return None

        def convertValue(row, col, columnDtype):
            value = None
            if columnDtype == object:
                value = self._dataFrame.ix[row, col]
            elif columnDtype in self._floatDtypes:
                value = round(float(self._dataFrame.ix[row, col]),
                              self._float_precisions[str(columnDtype)])
            elif columnDtype in self._intDtypes:
                value = int(self._dataFrame.ix[row, col])
            elif columnDtype in self._boolDtypes:
                # TODO this will most likely always be true
                # See: http://stackoverflow.com/a/715455
                # well no: I am mistaken here, the data is already in the dataframe
                # so its already converted to a bool
                value = bool(self._dataFrame.ix[row, col])

            elif columnDtype in self._dateDtypes:
                #print numpy.datetime64(self._dataFrame.ix[row, col])
                value = pandas.Timestamp(self._dataFrame.ix[row, col])
                value = QtCore.QDateTime.fromString(str(value),
                                                    self.timestampFormat)
                #print value
            # else:
            #     raise TypeError, "returning unhandled data type"
            return value

        row = self._dataFrame.index[index.row()]
        col = self._dataFrame.columns[index.column()]
        columnDtype = self._dataFrame[col].dtype

        if role == Qt.DisplayRole:
            # return the value if you wanne show True/False as text
            if columnDtype == numpy.bool:
                result = self._dataFrame.ix[row, col]
            else:
                result = convertValue(row, col, columnDtype)
        elif role == Qt.EditRole:
            result = convertValue(row, col, columnDtype)
        elif role == Qt.CheckStateRole:
            if columnDtype == numpy.bool_:
                if convertValue(row, col, columnDtype):
                    result = Qt.Checked
                else:
                    result = Qt.Unchecked
            else:
                result = None
        elif role == DATAFRAME_ROLE:
            result = self._dataFrame.ix[row, col]
        else:
            result = None
        return result

    def flags(self, index):
        """Returns the item flags for the given index as ored value, e.x.: Qt.ItemIsUserCheckable | Qt.ItemIsEditable

        If a combobox for bool values should pop up ItemIsEditable have to set for bool columns too.

        Args:
            index (QtCore.QModelIndex): Index to define column and row

        Returns:
            if column dtype is not boolean Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
            if column dtype is boolean Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable
        """
        flags = super(DataFrameModel, self).flags(index)

        if not self.editable:
            return flags

        col = self._dataFrame.columns[index.column()]
        if self._dataFrame[col].dtype == numpy.bool:
            flags |= Qt.ItemIsUserCheckable
        else:
            # if you want to have a combobox for bool columns set this
            flags |= Qt.ItemIsEditable

        return flags

    def setData(self, index, value, role=Qt.DisplayRole):
        """Set the value to the index position depending on Qt::ItemDataRole and data type of the column

        Args:
            index (QtCore.QModelIndex): Index to define column and row.
            value (object): new value.
            role (Qt::ItemDataRole): Use this role to specify what you want to do.

        Raises:
            TypeError: If the value could not be converted to a known datatype.

        Returns:
            True if value is changed. Calls layoutChanged after update.
            False if value is not different from original value.

        """
        if not index.isValid() or not self.editable:
            return False

        if value != index.data(role):

            self.layoutAboutToBeChanged.emit()

            row = self._dataFrame.index[index.row()]
            col = self._dataFrame.columns[index.column()]
            #print 'before change: ', index.data().toUTC(), self._dataFrame.iloc[row][col]
            columnDtype = self._dataFrame[col].dtype

            if columnDtype == object:
                pass

            elif columnDtype in self._intDtypes:
                dtypeInfo = numpy.iinfo(columnDtype)
                if value < dtypeInfo.min:
                    value = dtypeInfo.min
                elif value > dtypeInfo.max:
                    value = dtypeInfo.max

            elif columnDtype in self._floatDtypes:
                value = numpy.float64(value).astype(columnDtype)

            elif columnDtype in self._boolDtypes:
                value = numpy.bool_(value)

            elif columnDtype in self._dateDtypes:
                # convert the given value to a compatible datetime object.
                # if the conversation could not be done, keep the original
                # value.
                if isinstance(value, QtCore.QDateTime):
                    value = value.toString(self.timestampFormat)
                try:
                    value = pandas.Timestamp(value)
                except Exception:
                    raise Exception(
                        "Can't convert '{0}' into a datetime".format(value))
                    # return False
            else:
                raise TypeError("try to set unhandled data type")

            self._dataFrame.set_value(row, col, value)

            #print 'after change: ', value, self._dataFrame.iloc[row][col]
            self.layoutChanged.emit()
            return True
        else:
            return False

    def rowCount(self, index=QtCore.QModelIndex()):
        """returns number of rows

        Args:
            index (QtCore.QModelIndex, optional): Index to define column and row. defaults to empty QModelIndex

        Returns:
            number of rows
        """
        # len(df.index) is faster, so use it:
        # In [12]: %timeit df.shape[0]
        # 1000000 loops, best of 3: 437 ns per loop
        # In [13]: %timeit len(df.index)
        # 10000000 loops, best of 3: 110 ns per loop
        # %timeit df.__len__()
        # 1000000 loops, best of 3: 215 ns per loop
        return len(self._dataFrame.index)

    def columnCount(self, index=QtCore.QModelIndex()):
        """returns number of columns

        Args:
            index (QtCore.QModelIndex, optional): Index to define column and row. defaults to empty QModelIndex

        Returns:
            number of columns
        """
        # speed comparison:
        # In [23]: %timeit len(df.columns)
        # 10000000 loops, best of 3: 108 ns per loop

        # In [24]: %timeit df.shape[1]
        # 1000000 loops, best of 3: 440 ns per loop
        return len(self._dataFrame.columns)

    def sort(self, columnId, order=Qt.AscendingOrder):
        """
        Sorts the model column

        After sorting the data in ascending or descending order, a signal
        `layoutChanged` is emitted.

        :param: columnId (int)
            the index of the column to sort on.
        :param: order (Qt::SortOrder, optional)
            descending(1) or ascending(0). defaults to Qt.AscendingOrder

        """
        self.layoutAboutToBeChanged.emit()
        self.sortingAboutToStart.emit()
        column = self._dataFrame.columns[columnId]
        self._dataFrame.sort(column, ascending=not bool(order), inplace=True)
        self.layoutChanged.emit()
        self.sortingFinished.emit()

    def setFilter(self, search):
        """
        Apply a filter and hide rows.

        The filter must be a `DataSearch` object, which evaluates a python
        expression.
        If there was an error while parsing the expression, the data will remain
        unfiltered.

        Args:
            search(qtpandas.DataSearch): data search object to use.

        Raises:
            TypeError: An error is raised, if the given parameter is not a
                `DataSearch` object.

        """
        if not isinstance(search, DataSearch):
            raise TypeError(
                'The given parameter must an `qtpandas.DataSearch` object')

        self._search = search

        self.layoutAboutToBeChanged.emit()

        if self._dataFrameOriginal is not None:
            self._dataFrame = self._dataFrameOriginal
        self._dataFrameOriginal = self._dataFrame.copy()

        self._search.setDataFrame(self._dataFrame)
        searchIndex, valid = self._search.search()

        if valid:
            self._dataFrame = self._dataFrame[searchIndex]
            self.layoutChanged.emit()
        else:
            self.clearFilter()
            self.layoutChanged.emit()

        self.dataFrameChanged.emit()

    def clearFilter(self):
        """
        Clear all filters.
        """
        if self._dataFrameOriginal is not None:
            self.layoutAboutToBeChanged.emit()
            self._dataFrame = self._dataFrameOriginal
            self._dataFrameOriginal = None
            self.layoutChanged.emit()

    def columnDtypeModel(self):
        """
        Getter for a ColumnDtypeModel.

        :return:
            qtpandas.models.ColumnDtypeModel
        """
        return self._columnDtypeModel

    def enableEditing(self, editable=True):
        """
        Sets the DataFrameModel and columnDtypeModel's
        editable properties.
        :param editable: bool
            defaults to True,
            False disables most editing methods.
        :return:
            None
        """
        self.editable = editable
        self._columnDtypeModel.setEditable(self.editable)

    def dataFrameColumns(self):
        """
        :return: list containing dataframe columns
        """
        return self._dataFrame.columns.tolist()

    def addDataFrameColumn(self, columnName, dtype=str, defaultValue=None):
        """
        Adds a column to the dataframe as long as
        the model's editable property is set to True and the
        dtype is supported.

        :param columnName: str
            name of the column.
        :param dtype: qtpandas.models.SupportedDtypes option
        :param defaultValue: (object)
            to default the column's value to, should be the same as the dtype or None
        :return: (bool)
            True on success, False otherwise.
        """
        if not self.editable or dtype not in SupportedDtypes.allTypes():
            return False

        elements = self.rowCount()
        columnPosition = self.columnCount()

        newColumn = pandas.Series([defaultValue] * elements,
                                  index=self._dataFrame.index,
                                  dtype=dtype)

        self.beginInsertColumns(QtCore.QModelIndex(), columnPosition - 1,
                                columnPosition - 1)
        try:
            self._dataFrame.insert(columnPosition,
                                   columnName,
                                   newColumn,
                                   allow_duplicates=False)
        except ValueError as e:
            # columnName does already exist
            return False

        self.endInsertColumns()

        self.propagateDtypeChanges(columnPosition, newColumn.dtype)

        return True

    def addDataFrameRows(self, count=1):
        """

        Adds rows to the dataframe.

        :param count: (int)
            The number of rows to add to the dataframe.
        :return: (bool)
            True on success, False on failure.

        """
        # don't allow any gaps in the data rows.
        # and always append at the end

        if not self.editable:
            return False

        position = self.rowCount()

        if count < 1:
            return False

        if len(self.dataFrame().columns) == 0:
            # log an error message or warning
            return False

        # Note: This function emits the rowsAboutToBeInserted() signal which
        # connected views (or proxies) must handle before the data is
        # inserted. Otherwise, the views may end up in an invalid state.
        self.beginInsertRows(QtCore.QModelIndex(), position,
                             position + count - 1)

        defaultValues = []
        for dtype in self._dataFrame.dtypes:
            if dtype.type == numpy.dtype('<M8[ns]'):
                val = pandas.Timestamp('')
            elif dtype.type == numpy.dtype(object):
                val = ''
            else:
                val = dtype.type()
            defaultValues.append(val)

        for i in range(count):
            self._dataFrame.loc[position + i] = defaultValues
        self._dataFrame.reset_index()
        self.endInsertRows()
        return True

    def removeDataFrameColumns(self, columns):
        """
        Removes columns from the dataframe.
        :param columns: [(int, str)]
        :return: (bool)
            True on success, False on failure.
        """
        if not self.editable:
            return False

        if columns:
            deleted = 0
            errored = False
            for (position, name) in columns:
                position = position - deleted
                if position < 0:
                    position = 0
                self.beginRemoveColumns(QtCore.QModelIndex(), position,
                                        position)
                try:
                    self._dataFrame.drop(name, axis=1, inplace=True)
                except ValueError as e:
                    errored = True
                    continue
                self.endRemoveColumns()
                deleted += 1
            self.dataChanged.emit()

            if errored:
                return False
            else:
                return True
        return False

    def removeDataFrameRows(self, rows):
        """
        Removes rows from the dataframe.

        :param rows: (list)
            of row indexes to removes.
        :return: (bool)
            True on success, False on failure.
        """
        if not self.editable:
            return False

        if rows:
            position = min(rows)
            count = len(rows)
            self.beginRemoveRows(QtCore.QModelIndex(), position,
                                 position + count - 1)

            removedAny = False
            for idx, line in self._dataFrame.iterrows():
                if idx in rows:
                    removedAny = True
                    self._dataFrame.drop(idx, inplace=True)

            if not removedAny:
                return False

            self._dataFrame.reset_index(inplace=True, drop=True)

            self.endRemoveRows()
            return True
        return False
Ejemplo n.º 50
0
import numpy

from pytorch_lightning.utilities.apply_func import move_data_to_device  # noqa: F401
from pytorch_lightning.utilities.distributed import AllGatherGrad  # noqa: F401
from pytorch_lightning.utilities.enums import (  # noqa: F401
    _AcceleratorType, _StrategyType, AMPType, DistributedType,
    GradClipAlgorithmType, LightningEnum, ModelSummaryMode,
)
from pytorch_lightning.utilities.grads import grad_norm  # noqa: F401
from pytorch_lightning.utilities.imports import (  # noqa: F401
    _APEX_AVAILABLE, _BAGUA_AVAILABLE, _DEEPSPEED_AVAILABLE,
    _FAIRSCALE_AVAILABLE, _FAIRSCALE_FULLY_SHARDED_AVAILABLE,
    _FAIRSCALE_OSS_FP16_BROADCAST_AVAILABLE, _GROUP_AVAILABLE,
    _HOROVOD_AVAILABLE, _HPU_AVAILABLE, _HYDRA_AVAILABLE,
    _HYDRA_EXPERIMENTAL_AVAILABLE, _IPU_AVAILABLE, _IS_INTERACTIVE,
    _IS_WINDOWS, _JSONARGPARSE_AVAILABLE, _module_available,
    _OMEGACONF_AVAILABLE, _POPTORCH_AVAILABLE, _RICH_AVAILABLE,
    _TORCH_GREATER_EQUAL_1_8, _TORCH_GREATER_EQUAL_1_9,
    _TORCH_GREATER_EQUAL_1_10, _TORCH_QUANTIZE_AVAILABLE, _TORCHTEXT_AVAILABLE,
    _TORCHVISION_AVAILABLE, _TPU_AVAILABLE, _XLA_AVAILABLE,
)
from pytorch_lightning.utilities.parameter_tying import find_shared_parameters, set_shared_parameters  # noqa: F401
from pytorch_lightning.utilities.parsing import AttributeDict, flatten_dict, is_picklable  # noqa: F401
from pytorch_lightning.utilities.rank_zero import (  # noqa: F401
    rank_zero_deprecation, rank_zero_info, rank_zero_only, rank_zero_warn,
)

FLOAT16_EPSILON = numpy.finfo(numpy.float16).eps
FLOAT32_EPSILON = numpy.finfo(numpy.float32).eps
FLOAT64_EPSILON = numpy.finfo(numpy.float64).eps
Ejemplo n.º 51
0
def evaluate(model,
             generator, 
             iou_threshold,
             obj_thresh,
             nms_thresh,
             net_h=416,
             net_w=416,
             save_path=None):
    """ Evaluate a given dataset using a given model.
    code originally from https://github.com/fizyr/keras-retinanet

    # Arguments
        model           : The model to evaluate.
        generator       : The generator that represents the dataset to evaluate.
        iou_threshold   : The threshold used to consider when a detection is positive or negative.
        obj_thresh      : The threshold used to distinguish between object and non-object
        nms_thresh      : The threshold used to determine whether two detections are duplicates
        net_h           : The height of the input image to the model, higher value results in better accuracy
        net_w           : The width of the input image to the model
        save_path       : The path to save images with visualized detections to.
    # Returns
        A dict mapping class names to mAP scores.
    """    
    # gather all detections and annotations
    all_detections     = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
    all_annotations    = [[None for i in range(generator.num_classes())] for j in range(generator.size())]

    for i in range(generator.size()):
        raw_image = [generator.load_image(i)]

        # make the boxes and the labels
        pred_boxes = get_yolo_boxes(model, raw_image, net_h, net_w, generator.get_anchors(), obj_thresh, nms_thresh)[0]

        score = np.array([box.get_score() for box in pred_boxes])
        pred_labels = np.array([box.label for box in pred_boxes])        
        
        if len(pred_boxes) > 0:
            pred_boxes = np.array([[box.xmin, box.ymin, box.xmax, box.ymax, box.get_score()] for box in pred_boxes]) 
        else:
            pred_boxes = np.array([[]])  
        
        # sort the boxes and the labels according to scores
        score_sort = np.argsort(-score)
        pred_labels = pred_labels[score_sort]
        pred_boxes  = pred_boxes[score_sort]
        
        # copy detections to all_detections
        for label in range(generator.num_classes()):
            all_detections[i][label] = pred_boxes[pred_labels == label, :]

        annotations = generator.load_annotation(i)
        
        # copy detections to all_annotations
        for label in range(generator.num_classes()):
            all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()

    # compute mAP by comparing all detections and all annotations
    average_precisions = {}
    
    for label in range(generator.num_classes()):
        false_positives = np.zeros((0,))
        true_positives  = np.zeros((0,))
        scores          = np.zeros((0,))
        num_annotations = 0.0

        for i in range(generator.size()):
            detections           = all_detections[i][label]
            annotations          = all_annotations[i][label]
            num_annotations     += annotations.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives  = np.append(true_positives, 0)
                    continue

                overlaps            = compute_overlap(np.expand_dims(d, axis=0), annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap         = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives  = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives  = np.append(true_positives, 0)

        # no annotations -> AP for this class is 0 (is this correct?)
        if num_annotations == 0:
            average_precisions[label] = 0
            continue

        # sort by score
        indices         = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives  = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives  = np.cumsum(true_positives)

        # compute recall and precision
        recall    = true_positives / num_annotations
        precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)

        # compute average precision
        average_precision  = compute_ap(recall, precision)  
        average_precisions[label] = average_precision

    return average_precisions    
Ejemplo n.º 52
0
netL12 = lasagne.layers.Conv2DLayer(netL11BN,
                                    num_filters=10,
                                    filter_size=(5, 5),
                                    pad='same',
                                    name='netL12')
netL12BN = batch_norm(netL12, name='netL12BN')
networkOut = lasagne.layers.Conv2DLayer(
    netL12BN,
    num_filters=1,
    filter_size=(3, 3),
    pad='same',
    nonlinearity=lasagne.nonlinearities.sigmoid,
    name='networkOut')

output = T.add(lasagne.layers.get_output(networkOut, inputs=input_var),
               np.finfo(np.float32).eps)

loss = lasagne.objectives.binary_crossentropy(output, target_var)

loss = loss.mean()

params = lasagne.layers.get_all_params(networkOut, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss,
                                            params,
                                            learning_rate=0.01,
                                            momentum=0.9)
#updates = lasagne.updates.sgd(loss , params , learning_rate = 0.01)
test_output = T.add(
    lasagne.layers.get_output(networkOut, inputs=input_var,
                              deterministic=True),
    np.finfo(np.float32).eps)
Ejemplo n.º 53
0
        "build",
        "map_center",
        "frac_r",
        "cntlreg",
        "all_solar",
        "all_wind",
        "nuclear",
        "hydro",
    ])

    build = gdx.to_dataframe("build")
    map_center = gdx.to_dict("map_center")

    carbon = {
        i: gdx.to_dict("frac_r")["elements"][i]
        if gdx.to_dict("frac_r")["elements"][i] > np.finfo(float).tiny else 0
        for i in gdx.to_dict("frac_r")["elements"]
    }

    cap = build["elements"].copy()
    cap.loc[cap[cap["L"] <= np.finfo(float).tiny].index, "L"] = 0

    cap["is_solar"] = cap["k"].isin(gdx.to_dict("all_solar")["elements"])
    cap["is_wind"] = cap["k"].isin(gdx.to_dict("all_wind")["elements"])
    cap["is_nuclear"] = cap["k"].isin(gdx.to_dict("nuclear")["elements"])
    cap["is_hydro"] = cap["k"].isin(gdx.to_dict("hydro")["elements"])

    # rename technologies
    cap["k"] = cap["k"].map(sp.gen_map)

    # groupby
Ejemplo n.º 54
0
def construct_beam_search_ops(models, beam_size):
    """Builds a graph fragment for beam search over one or more RNNModels.

    Strategy:
        compute the log_probs - same as with sampling
        for sentences that are ended set log_prob(<eos>)=0, log_prob(not eos)=-inf
        add previous cost to log_probs
        run top k -> (idxs, values)
        use values as new costs
        divide idxs by num_classes to get state_idxs
        use gather to get new states
        take the remainder of idxs after num_classes to get new_predicted words
    """

    # Get some parameter settings.  For ensembling, some parameters are required
    # to be consistent across all models but others are not.  In the former
    # case, we assume that consistency has already been checked.  For the
    # parameters that are allowed to vary across models, the first model's
    # settings take precedence.
    decoder = models[0].decoder
    batch_size = tf.shape(decoder.init_state)[0]
    embedding_size = decoder.embedding_size
    translation_maxlen = decoder.translation_maxlen
    target_vocab_size = decoder.target_vocab_size
    high_depth = 0 if decoder.high_gru_stack == None \
                   else len(decoder.high_gru_stack.grus)

    # Initialize loop variables
    i = tf.constant(0)
    init_ys = -tf.ones(dtype=tf.int32, shape=[batch_size])
    init_embs = [
        tf.zeros(dtype=tf.float32, shape=[batch_size, embedding_size])
    ] * len(models)

    f_min = numpy.finfo(numpy.float32).min
    init_cost = [0.] + [f_min] * (
        beam_size - 1)  # to force first top k are from first hypo only
    init_cost = tf.constant(init_cost, dtype=tf.float32)
    init_cost = tf.tile(init_cost,
                        multiples=[batch_size / beam_size])  # duplications
    ys_array = tf.TensorArray(dtype=tf.int32,
                              size=translation_maxlen,
                              clear_after_read=True,
                              name='y_sampled_array')
    p_array = tf.TensorArray(dtype=tf.int32,
                             size=translation_maxlen,
                             clear_after_read=True,
                             name='parent_idx_array')
    init_base_states = [m.decoder.init_state for m in models]
    init_high_states = [[m.decoder.init_state] * high_depth for m in models]
    init_loop_vars = [
        i, init_base_states, init_high_states, init_ys, init_embs, init_cost,
        ys_array, p_array
    ]

    # Prepare cost matrix for completed sentences -> Prob(EOS) = 1 and Prob(x) = 0
    eos_log_probs = tf.constant([[0.] + ([f_min] * (target_vocab_size - 1))],
                                dtype=tf.float32)
    eos_log_probs = tf.tile(eos_log_probs, multiples=[batch_size, 1])

    def cond(i, prev_base_states, prev_high_states, prev_ys, prev_embs, cost,
             ys_array, p_array):
        return tf.logical_and(tf.less(i, translation_maxlen),
                              tf.reduce_any(tf.not_equal(prev_ys, 0)))

    def body(i, prev_base_states, prev_high_states, prev_ys, prev_embs, cost,
             ys_array, p_array):
        # get predictions from all models and sum the log probs
        sum_log_probs = None
        base_states = [None] * len(models)
        high_states = [None] * len(models)
        for j in range(len(models)):
            d = models[j].decoder
            states1 = d.grustep1.forward(prev_base_states[j], prev_embs[j])
            att_ctx = d.attstep.forward(states1)
            base_states[j] = d.grustep2.forward(states1, att_ctx)
            if d.high_gru_stack == None:
                stack_output = base_states[j]
                high_states[j] = []
            else:
                if d.high_gru_stack.context_state_size == 0:
                    stack_output, high_states[
                        j] = d.high_gru_stack.forward_single(
                            prev_high_states[j], base_states[j])
                else:
                    stack_output, high_states[
                        j] = d.high_gru_stack.forward_single(
                            prev_high_states[j],
                            base_states[j],
                            context=att_ctx)
            logits = d.predictor.get_logits(prev_embs[j],
                                            stack_output,
                                            att_ctx,
                                            multi_step=False)
            log_probs = tf.nn.log_softmax(logits)  # shape (batch, vocab_size)
            if sum_log_probs == None:
                sum_log_probs = log_probs
            else:
                sum_log_probs += log_probs

        # set cost of EOS to zero for completed sentences so that they are in top k
        # Need to make sure only EOS is selected because a completed sentence might
        # kill ongoing sentences
        sum_log_probs = tf.where(tf.equal(prev_ys, 0), eos_log_probs,
                                 sum_log_probs)

        all_costs = sum_log_probs + tf.expand_dims(
            cost, axis=1
        )  # TODO: you might be getting NaNs here since -inf is in log_probs

        all_costs = tf.reshape(all_costs,
                               shape=[-1, target_vocab_size * beam_size])
        values, indices = tf.nn.top_k(
            all_costs, k=beam_size
        )  #the sorted option is by default True, is this needed?
        new_cost = tf.reshape(values, shape=[batch_size])
        offsets = tf.range(start=0,
                           delta=beam_size,
                           limit=batch_size,
                           dtype=tf.int32)
        offsets = tf.expand_dims(offsets, axis=1)
        survivor_idxs = (indices / target_vocab_size) + offsets
        new_ys = indices % target_vocab_size
        survivor_idxs = tf.reshape(survivor_idxs, shape=[batch_size])
        new_ys = tf.reshape(new_ys, shape=[batch_size])
        new_embs = [
            m.decoder.y_emb_layer.forward(new_ys, factor=0) for m in models
        ]
        new_base_states = [
            tf.gather(s, indices=survivor_idxs) for s in base_states
        ]
        new_high_states = [[
            tf.gather(s, indices=survivor_idxs) for s in states
        ] for states in high_states]
        new_cost = tf.where(tf.equal(new_ys, 0), tf.abs(new_cost), new_cost)

        ys_array = ys_array.write(i, value=new_ys)
        p_array = p_array.write(i, value=survivor_idxs)

        return i + 1, new_base_states, new_high_states, new_ys, new_embs, new_cost, ys_array, p_array

    final_loop_vars = tf.while_loop(cond=cond,
                                    body=body,
                                    loop_vars=init_loop_vars,
                                    back_prop=False)
    i, _, _, _, _, cost, ys_array, p_array = final_loop_vars

    indices = tf.range(0, i)
    sampled_ys = ys_array.gather(indices)
    parents = p_array.gather(indices)
    cost = tf.abs(cost)  #to get negative-log-likelihood
    return sampled_ys, parents, cost
Ejemplo n.º 55
0
def add_data(book, outputs, col, name = None, weight = None):
    '''
    Adds data to the workbook. If the scenario is a child scenario, each sheet compares it with its parent.

    Parameters
    ----------
    book (xlsxwriter.Workbook):
        Workbook to add data to
    outputs (dict):
        Dictionary of outputs to add
    col (str):
        Column in outputs to add to the book
    name (str):
        Name of sheet. If none is specified than the name of the column will be the sheet.
    '''
    global formats
    global twp_name
    global mun
    global twp
    global ibrc
    global wp

    if not name:
        name = col

    scenarios = outputs.keys()
    compare = (len(scenarios) == 2) #Only do comparison if the length of scenarios is 2

    data = get_data(outputs, col, scenarios, compare, weight)
    if weight:
        den_data = get_data(outputs, weight, scenarios, compare)
        #import pdb
        #pdb.set_trace()
        data /= (den_data + np.finfo(float).tiny)

    if compare: #Add columns for comparing data
        data['Difference'] = data[scenarios[1]] - data[scenarios[0]]
        data['% Difference'] = (data['Difference']) / (data[scenarios[0]] + np.finfo(float).tiny)

        year1 = int(open(os.path.join(os.path.split(lumsdir)[0], r'FILES\YEAR.txt'), 'r').read())
        year2 = int(open(os.path.join(lumsdir, r'FILES\YEAR.txt'), 'r').read())
        time_span = year2 - year1

        if time_span != 0:
            data['Annual Growth Rate'] = np.power(data[scenarios[1]]/(data[scenarios[0]] + np.finfo(float).tiny), 1/time_span) - 1
            
    else:
        year = int(open(os.path.join(lumsdir, r'FILES\YEAR.txt'), 'r').read())

    data = data.fillna(0)

    sheet = book.add_worksheet(name)

    #Write headers
    for j in range(len(scenarios)):
        sheet.write_string(0, j+1, scenarios[j], formats['header'])
    if compare:
        sheet.write_string(0, 3, 'Difference', formats['header'])
        sheet.write_string(0, 4, '% Difference', formats['header'])
        sheet.write_string(0, 5, 'Annual Growth Rate', formats['header'])
        sheet.write_string(0, 6, 'Woods & Poole', formats['header'])
        sheet.write_string(0, 7, 'IBRC', formats['header'])
    else:
        sheet.write_string(0, j+2, 'Woods & Poole', formats['header'])
        sheet.write_string(0, j+3, 'IBRC', formats['header'])

    counties = data.index
    for i in range(len(counties)):
        #Write county labels and data
        if counties[i] == 'Total':
            sheet.write_string(i+1, 0, 'Total', formats['total_index'])
            for j in range(len(scenarios)):
                sheet.write_number(i+1, j+1, data[scenarios[j]]['Total'], formats['total_number'])
            if compare:
                sheet.write_number(i+1, 3, data['Difference']['Total'], formats['total_number'])
                try:
                    sheet.write_number(i+1, 4, data['% Difference']['Total'], formats['total_percent'])
                except TypeError:
                    continue
                if time_span != 0:
                    sheet.write_number(i+1, 5, data['Annual Growth Rate']['Total'], formats['total_percent'])
                    
                try:
                    sheet.write_number(i+1, 6, wp[name][year2]['Total'], formats['total_number'])
                except KeyError:
                    pass
                try:
                    sheet.write_number(i+1, 7, ibrc[name][year2]['Total'], formats['total_number'])
                except KeyError:
                    pass
                
            else:
                try:
                    sheet.write_number(i+1, j+2, wp[name][year]['Total'], formats['total_number'])
                except KeyError:
                    pass
                try:
                    sheet.write_number(i+1, j+3, ibrc[name][year]['Total'], formats['total_number'])
                except KeyError:
                    pass
                
        elif counties[i] in [' ', '  ']:
            sheet.write_string(i+1, 0, ' ', formats['index'])

        else:
            sheet.write_string(i+1, 0, counties[i], formats['index'])
            for j in range(len(scenarios)):
                sheet.write_number(i+1, j+1, data[scenarios[j]][counties[i]], formats['number'])
            if compare:
                sheet.write_number(i+1, 3, data['Difference'][counties[i]], formats['number'])
                try:
                    sheet.write_number(i+1, 4, data['% Difference'][counties[i]], formats['percent'])
                except TypeError:
                    continue
                if time_span != 0:
                    sheet.write_number(i+1, 5, data['Annual Growth Rate'][counties[i]], formats['percent'])
                    
                try:
                    sheet.write_number(i+1, 6, wp[name][year2][counties[i]], formats['number'])
                except KeyError:
                    pass
                try:
                    sheet.write_number(i+1, 7, ibrc[name][year2][counties[i]], formats['number'])
                except KeyError:
                    pass
                
            else:
                try:
                    sheet.write_number(i+1, j+2, wp[name][year][counties[i]], formats['number'])
                except KeyError:
                    pass
                try:
                    sheet.write_number(i+1, j+3, ibrc[name][year][counties[i]], formats['number'])
                except KeyError:
                    pass

    #Set column widths
    sheet.set_column(0, 0, 20)
    if compare:
        sheet.set_column(1, 7, 20)
    else:
        sheet.set_column(1, len(scenarios) + 2, 20)

##    sheet.conditional_format('D3:D142', {'type': 'cell',
##                                         'criteria': 'between',
##                                         'minimum': -100000,
##                                         'maximum': -50000,
##                                         'format': formats['warning1']})
##    sheet.conditional_format('D3:D142', {'type': 'cell',
##                                         'criteria': 'between',
##                                         'minimum': -200000,
##                                         'maximum': -100000,
##                                         'format': formats['warning2']})
##    sheet.conditional_format('D3:D142', {'type': 'cell',
##                                         'criteria': 'between',
##                                         'minimum': -500000,
##                                         'maximum': -200000,
##                                         'format': formats['warning3']})
##    sheet.conditional_format('D3:D142', {'type': 'cell',
##                                         'criteria': '<=',
##                                         'value': -500000,
##                                         'format': formats['warning4']})
##    sheet.conditional_format('D3:D142', {'type': 'cell',
##                                         'criteria': 'between',
##                                         'minimum': 50000,
##                                         'maximum': 100000,
##                                         'format': formats['warning1']})
##    sheet.conditional_format('D3:D142', {'type': 'cell',
##                                         'criteria': 'between',
##                                         'minimum': 100000,
##                                         'maximum': 200000,
##                                         'format': formats['warning2']})
##    sheet.conditional_format('D3:D142', {'type': 'cell',
##                                         'criteria': 'between',
##                                         'minimum': 200000,
##                                         'maximum': 500000,
##                                         'format': formats['warning3']})
##    sheet.conditional_format('D3:D142', {'type': 'cell',
##                                         'criteria': '>=',
##                                         'value': 500000,
##                                         'format': formats['warning4']})

    if compare:

        sheet.conditional_format('E3:E142', {'type': 'cell',
                                             'criteria': 'between',
                                             'minimum': -1/2,
                                             'maximum': -1/3,
                                             'format': formats['warning1']})
        sheet.conditional_format('E3:E142', {'type': 'cell',
                                             'criteria': 'between',
                                             'minimum': -2/3,
                                             'maximum': -1/2,
                                             'format': formats['warning2']})
        sheet.conditional_format('E3:E142', {'type': 'cell',
                                             'criteria': 'between',
                                             'minimum': -5/6,
                                             'maximum': -2/3,
                                             'format': formats['warning3']})
        sheet.conditional_format('E3:E142', {'type': 'cell',
                                             'criteria': '<=',
                                             'value': -5/6,
                                             'format': formats['warning4']})
        sheet.conditional_format('E3:E142', {'type': 'cell',
                                             'criteria': 'between',
                                             'minimum': 1/2,
                                             'maximum': 1,
                                             'format': formats['warning1']})
        sheet.conditional_format('E3:E142', {'type': 'cell',
                                             'criteria': 'between',
                                             'minimum': 1,
                                             'maximum': 2,
                                             'format': formats['warning2']})
        sheet.conditional_format('E3:E142', {'type': 'cell',
                                             'criteria': 'between',
                                             'minimum': 2,
                                             'maximum': 5,
                                             'format': formats['warning3']})
        sheet.conditional_format('E3:E142', {'type': 'cell',
                                             'criteria': '>=',
                                             'value': 5,
                                             'format': formats['warning4']})

    
        if time_span != 0:
            sheet.conditional_format('F3:F142', {'type': 'cell',
                                                 'criteria': '<',
                                                 'value': 0,
                                                 'format': formats['warning2']})
            sheet.conditional_format('F3:F142', {'type': 'cell',
                                                 'criteria': '>=',
                                                 'value': 0.05,
                                                 'format': formats['warning1']})
    

    sheet.freeze_panes(2, 0)
Ejemplo n.º 56
0
def evalue(check_point,
           cache_path='./result.pkl',
           class_agnostic=False,
           ovthresh=0.5,
           use_07_metric=False):

    ind_class = {v: k for k, v in cfg.class_to_ind.items()}
    class_result_dic = {k: []
                        for k in cfg.class_to_ind.keys()
                        }  # store every class result

    imagenames = []

    if not os.path.exists(cache_path):

        test_set = PASCAL_VOC(cfg.testset_root_path, 'test')
        dataloader = DataLoader(test_set,
                                batch_size=cfg.batch_size,
                                shuffle=True,
                                num_workers=4)

        device = torch.device(
            "cuda: 0" if torch.cuda.is_available() else "cpu")

        fasterRCNN = resnet(cfg.backbone,
                            is_training=False,
                            pretrained=False,
                            class_agnostic=class_agnostic)
        fasterRCNN.create_architecture()

        print("load checkpoint %s" % (check_point))

        checkpoint = torch.load(check_point)
        fasterRCNN.load_state_dict(checkpoint['model_state_dict'])

        print('load model successfully!')

        fasterRCNN.eval()
        fasterRCNN.to(device)

        im_data = torch.FloatTensor(1)
        im_info = torch.FloatTensor(1)
        gt_boxes = torch.FloatTensor(1)
        im_data = im_data.cuda()
        im_info = im_info.cuda()
        gt_boxes = gt_boxes.cuda()

        #detect for result
        for batch_data in tqdm(dataloader):
            # batch_data = dataloader.next()
            with torch.no_grad():
                im_data.resize_(batch_data['image'].size()).copy_(
                    batch_data['image'])
                gt_boxes.resize_(batch_data['gt_boxes'].size()).copy_(
                    batch_data['gt_boxes'])
                im_info.resize_(batch_data['im_info'].size()).copy_(
                    batch_data['im_info'])

                image_name = os.path.basename(
                    batch_data['imname'][0]).split('.')[0]
                imagenames.append(image_name)

                rois, cls_prob, bbox_pred, _, _, _, _, _ = fasterRCNN(
                    im_data, gt_boxes)

                scores = cls_prob.data
                boxes = rois.data[:, :, 1:5]

                box_deltas = bbox_pred.data

                if cfg.bbox_normalize_targets_precomputed:
                    box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.bbox_normalize_std).cuda() \
                                 + torch.FloatTensor(cfg.bbox_normalize_means).cuda()
                    box_deltas = box_deltas.view(1, -1, 4)

                pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
                pred_boxes = clip_boxes(pred_boxes, im_info, 1)
                pred_boxes = pred_boxes / batch_data['im_info'][0, 2]

                scores = scores.squeeze()
                pred_boxes = pred_boxes.squeeze()

                for j in range(1, len(cfg.class_to_ind)):
                    inds = torch.nonzero(scores[:, j] > 0).view(-1)
                    if inds.numel() > 0:
                        cls_scores = scores[:, j][inds]
                        _, order = torch.sort(cls_scores, 0, True)

                        if class_agnostic:
                            cls_boxes = pred_boxes[inds, :]
                        else:
                            cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]

                        cls_dets = pred_boxes[order]
                        cls_scores = cls_scores[order]

                        keep = nms(cls_dets, cls_scores,
                                   cfg.test_nms_threshold)
                        cls_dets = cls_dets[keep.view(
                            -1).long()]  # 当前类别保留下来的目标框
                        cls_scores = cls_scores[keep.view(-1).long()]

                        for score, bbox in zip(cls_scores, cls_dets):
                            class_result_dic[ind_class[j]].append({
                                'image_name':
                                image_name,
                                'score':
                                score,
                                'bbox': [bbox[0], bbox[1], bbox[2], bbox[3]]
                            })

        print('writting result cache ......')
        with open(cache_path, 'wb') as fp:
            pickle.dump(class_result_dic, fp)
    else:
        with open(
                os.path.join(cfg.testset_root_path, 'ImageSets', 'Main',
                             'test.txt')) as fp:
            for line in fp:
                imagenames.append(line.strip())
        with open(cache_path, 'rb') as fp:
            class_result_dic = pickle.load(fp)

    print('computer mAP... ')
    # computer map
    recs = {}
    for i, imagename in enumerate(imagenames):
        recs[imagename] = parse_rec(
            os.path.join(cfg.testset_root_path, 'Annotations',
                         imagename + '.xml'))

    # extract gt objects for this class
    mAP = 0
    for classname in cfg.class_to_ind.keys():
        if classname == 'BG':
            continue
        print(classname, end=' ')
        class_recs = {}
        npos = 0
        for imagename in imagenames:
            R = [obj for obj in recs[imagename] if obj['name'] == classname]
            bbox = np.array([x['bbox'] for x in R])
            difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
            det = [False] * len(R)
            npos = npos + sum(~difficult)
            class_recs[imagename] = {
                'bbox': bbox,
                'difficult': difficult,
                'det': det
            }

        class_result = class_result_dic[classname]
        image_ids = [r['image_name'] for r in class_result]
        confidence = np.array([float(r['score']) for r in class_result])
        BB = np.array([r['bbox'] for r in class_result])

        # sort by confidence
        sorted_ind = np.argsort(-confidence)
        BB = BB[sorted_ind, :]
        image_ids = [image_ids[x] for x in sorted_ind]

        # go down dets and mark TPs and FPs
        nd = len(image_ids)
        tp = np.zeros(nd)
        fp = np.zeros(nd)
        for d in range(nd):
            R = class_recs[image_ids[d]]
            bb = BB[d, :].astype(float)
            ovmax = -np.inf
            BBGT = R['bbox'].astype(float)
            if BBGT.size > 0:
                # compute overlaps
                # intersection
                ixmin = np.maximum(BBGT[:, 0], bb[0])
                iymin = np.maximum(BBGT[:, 1], bb[1])
                ixmax = np.minimum(BBGT[:, 2], bb[2])
                iymax = np.minimum(BBGT[:, 3], bb[3])
                iw = np.maximum(ixmax - ixmin + 1., 0.)
                ih = np.maximum(iymax - iymin + 1., 0.)
                inters = iw * ih

                # union
                uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
                       (BBGT[:, 2] - BBGT[:, 0] + 1.) *
                       (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)

                overlaps = inters / uni
                ovmax = np.max(overlaps)
                jmax = np.argmax(overlaps)

            if ovmax > ovthresh:
                if not R['difficult'][jmax]:
                    if not R['det'][jmax]:
                        tp[d] = 1.
                        R['det'][jmax] = 1
                    else:
                        fp[d] = 1.
            else:
                fp[d] = 1.

        # compute precision recall
        fp = np.cumsum(fp)
        tp = np.cumsum(tp)
        rec = tp / float(npos)
        # avoid divide by zero in case the first detection matches a difficult
        # ground truth
        prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
        ap = voc_ap(rec, prec, use_07_metric)
        print(ap)
        mAP += ap
    mAP = mAP / (len(cfg.class_to_ind) - 1)

    print('mAP:', mAP)
Ejemplo n.º 57
0
def lossfun(x, alpha, scale, approximate=False, epsilon=1e-6):
  r"""Implements the general form of the loss.

  This implements the rho(x, \alpha, c) function described in "A General and
  Adaptive Robust Loss Function", Jonathan T. Barron,
  https://arxiv.org/abs/1701.03077.

  Args:
    x: The residual for which the loss is being computed. x can have any shape,
      and alpha and scale will be broadcasted to match x's shape if necessary.
      Must be a tensorflow tensor or numpy array of floats.
    alpha: The shape parameter of the loss (\alpha in the paper), where more
      negative values produce a loss with more robust behavior (outliers "cost"
      less), and more positive values produce a loss with less robust behavior
      (outliers are penalized more heavily). Alpha can be any value in
      [-infinity, infinity], but the gradient of the loss with respect to alpha
      is 0 at -infinity, infinity, 0, and 2. Must be a tensorflow tensor or
      numpy array of floats with the same precision as `x`. Varying alpha allows
      for smooth interpolation between a number of discrete robust losses:
      alpha=-Infinity: Welsch/Leclerc Loss.
      alpha=-2: Geman-McClure loss.
      alpha=0: Cauchy/Lortentzian loss.
      alpha=1: Charbonnier/pseudo-Huber loss.
      alpha=2: L2 loss.
    scale: The scale parameter of the loss. When |x| < scale, the loss is an
      L2-like quadratic bowl, and when |x| > scale the loss function takes on a
      different shape according to alpha. Must be a tensorflow tensor or numpy
      array of single-precision floats.
    approximate: a bool, where if True, this function returns an approximate and
      faster form of the loss, as described in the appendix of the paper. This
      approximation holds well everywhere except as x and alpha approach zero.
    epsilon: A float that determines how inaccurate the "approximate" version of
      the loss will be. Larger values are less accurate but more numerically
      stable. Must be great than single-precision machine epsilon.

  Returns:
    The losses for each element of x, in the same shape as x. This is returned
    as a TensorFlow graph node of single precision floats.
  """
  # `scale` and `alpha` must have the same type as `x`.
  float_dtype = x.dtype
  tf.debugging.assert_type(scale, float_dtype)
  tf.debugging.assert_type(alpha, float_dtype)
  # `scale` must be > 0.
  assert_ops = [tf.Assert(tf.reduce_all(tf.greater(scale, 0.)), [scale])]
  with tf.control_dependencies(assert_ops):
    # Broadcast `alpha` and `scale` to have the same shape as `x`.
    alpha = tf.broadcast_to(alpha, tf.shape(x))
    scale = tf.broadcast_to(scale, tf.shape(x))

    if approximate:
      # `epsilon` must be greater than single-precision machine epsilon.
      assert epsilon > np.finfo(np.float32).eps
      # Compute an approximate form of the loss which is faster, but innacurate
      # when x and alpha are near zero.
      b = tf.abs(alpha - tf.cast(2., float_dtype)) + epsilon
      d = tf.where(
          tf.greater_equal(alpha, 0.), alpha + epsilon, alpha - epsilon)
      loss = (b / d) * (tf.pow(tf.square(x / scale) / b + 1., 0.5 * d) - 1.)
    else:
      # Compute the exact loss.

      # This will be used repeatedly.
      squared_scaled_x = tf.square(x / scale)

      # The loss when alpha == 2.
      loss_two = 0.5 * squared_scaled_x
      # The loss when alpha == 0.
      loss_zero = util.log1p_safe(0.5 * squared_scaled_x)
      # The loss when alpha == -infinity.
      loss_neginf = -tf.math.expm1(-0.5 * squared_scaled_x)
      # The loss when alpha == +infinity.
      loss_posinf = util.expm1_safe(0.5 * squared_scaled_x)

      # The loss when not in one of the above special cases.
      machine_epsilon = tf.cast(np.finfo(np.float32).eps, float_dtype)
      # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
      beta_safe = tf.maximum(machine_epsilon, tf.abs(alpha - 2.))
      # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
      alpha_safe = tf.where(
          tf.greater_equal(alpha, 0.), tf.ones_like(alpha),
          -tf.ones_like(alpha)) * tf.maximum(machine_epsilon, tf.abs(alpha))
      loss_otherwise = (beta_safe / alpha_safe) * (
          tf.pow(squared_scaled_x / beta_safe + 1., 0.5 * alpha) - 1.)

      # Select which of the cases of the loss to return.
      loss = tf.where(
          tf.equal(alpha, -tf.cast(float('inf'), float_dtype)), loss_neginf,
          tf.where(
              tf.equal(alpha, 0.), loss_zero,
              tf.where(
                  tf.equal(alpha, 2.), loss_two,
                  tf.where(
                      tf.equal(alpha, tf.cast(float('inf'), float_dtype)),
                      loss_posinf, loss_otherwise))))

    return loss
Ejemplo n.º 58
0
def evolution_strength_of_connection(A, B='ones', epsilon=4.0, k=2,
                                     proj_type="l2", block_flag=False,
                                     symmetrize_measure=True):
    """
    Construct strength of connection matrix using an Evolution-based measure

    Parameters
    ----------
    A : {csr_matrix, bsr_matrix}
        Sparse NxN matrix
    B : {string, array}
        If B='ones', then the near nullspace vector used is all ones.  If B is
        an (NxK) array, then B is taken to be the near nullspace vectors.
    epsilon : scalar
        Drop tolerance
    k : integer
        ODE num time steps, step size is assumed to be 1/rho(DinvA)
    proj_type : {'l2','D_A'}
        Define norm for constrained min prob, i.e. define projection
    block_flag : {boolean}
        If True, use a block D inverse as preconditioner for A during
        weighted-Jacobi

    Returns
    -------
    Atilde : {csr_matrix}
        Sparse matrix of strength values

    References
    ----------
    .. [1] Olson, L. N., Schroder, J., Tuminaro, R. S.,
       "A New Perspective on Strength Measures in Algebraic Multigrid",
       submitted, June, 2008.

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.gallery import stencil_grid
    >>> from pyamg.strength import evolution_strength_of_connection
    >>> n=3
    >>> stencil =  np.array([[-1.0,-1.0,-1.0],
    ...                        [-1.0, 8.0,-1.0],
    ...                        [-1.0,-1.0,-1.0]])
    >>> A = stencil_grid(stencil, (n,n), format='csr')
    >>> S = evolution_strength_of_connection(A,  np.ones((A.shape[0],1)))
    """
    # local imports for evolution_strength_of_connection
    from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
    from pyamg.util.linalg import approximate_spectral_radius

    # ====================================================================
    # Check inputs
    if epsilon < 1.0:
        raise ValueError("expected epsilon > 1.0")
    if k <= 0:
        raise ValueError("number of time steps must be > 0")
    if proj_type not in ['l2', 'D_A']:
        raise ValueError("proj_type must be 'l2' or 'D_A'")
    if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
        raise TypeError("expected csr_matrix or bsr_matrix")

    # ====================================================================
    # Format A and B correctly.
    # B must be in mat format, this isn't a deep copy
    if B == 'ones':
        Bmat = np.mat(np.ones((A.shape[0], 1), dtype=A.dtype))
    else:
        Bmat = np.mat(B)

    # Pre-process A.  We need A in CSR, to be devoid of explicit 0's and have
    # sorted indices
    if (not sparse.isspmatrix_csr(A)):
        csrflag = False
        numPDEs = A.blocksize[0]
        D = A.diagonal()
        # Calculate Dinv*A
        if block_flag:
            Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
            Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
                                     np.arange(Dinv.shape[0] + 1)),
                                     shape=A.shape)
            Dinv_A = (Dinv * A).tocsr()
        else:
            Dinv = np.zeros_like(D)
            mask = (D != 0.0)
            Dinv[mask] = 1.0 / D[mask]
            Dinv[D == 0] = 1.0
            Dinv_A = scale_rows(A, Dinv, copy=True)
        A = A.tocsr()
    else:
        csrflag = True
        numPDEs = 1
        D = A.diagonal()
        Dinv = np.zeros_like(D)
        mask = (D != 0.0)
        Dinv[mask] = 1.0 / D[mask]
        Dinv[D == 0] = 1.0
        Dinv_A = scale_rows(A, Dinv, copy=True)

    A.eliminate_zeros()
    A.sort_indices()

    # Handle preliminaries for the algorithm
    dimen = A.shape[1]
    NullDim = Bmat.shape[1]

    # Get spectral radius of Dinv*A, this will be used to scale the time step
    # size for the ODE
    rho_DinvA = approximate_spectral_radius(Dinv_A)

    # Calculate D_A for later use in the minimization problem
    if proj_type == "D_A":
        D_A = sparse.spdiags([D], [0], dimen, dimen, format='csr')
    else:
        D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)

    # Calculate (I - delta_t Dinv A)^k
    #      In order to later access columns, we calculate the transpose in
    #      CSR format so that columns will be accessed efficiently
    # Calculate the number of time steps that can be done by squaring, and
    # the number of time steps that must be done incrementally
    nsquare = int(np.log2(k))
    ninc = k - 2**nsquare

    # Calculate one time step
    I = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
    Atilde = (I - (1.0/rho_DinvA)*Dinv_A)
    Atilde = Atilde.T.tocsr()

    # Construct a sparsity mask for Atilde that will restrict Atilde^T to the
    # nonzero pattern of A, with the added constraint that row i of Atilde^T
    # retains only the nonzeros that are also in the same PDE as i.
    mask = A.copy()

    # Restrict to same PDE
    if numPDEs > 1:
        row_length = np.diff(mask.indptr)
        my_pde = np.mod(range(dimen), numPDEs)
        my_pde = np.repeat(my_pde, row_length)
        mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
        del row_length, my_pde
        mask.eliminate_zeros()

    # If the total number of time steps is a power of two, then there is
    # a very efficient computational short-cut.  Otherwise, we support
    # other numbers of time steps, through an inefficient algorithm.
    if ninc > 0:
        warn("The most efficient time stepping for the Evolution Strength\
             Method is done in powers of two.\nYou have chosen " + str(k) +
             " time steps.")

        # Calculate (Atilde^nsquare)^T = (Atilde^T)^nsquare
        for i in range(nsquare):
            Atilde = Atilde*Atilde

        JacobiStep = (I - (1.0/rho_DinvA)*Dinv_A).T.tocsr()
        for i in range(ninc):
            Atilde = Atilde*JacobiStep
        del JacobiStep

        # Apply mask to Atilde, zeros in mask have already been eliminated at
        # start of routine.
        mask.data[:] = 1.0
        Atilde = Atilde.multiply(mask)
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    elif nsquare == 0:
        if numPDEs > 1:
            # Apply mask to Atilde, zeros in mask have already been eliminated
            # at start of routine.
            mask.data[:] = 1.0
            Atilde = Atilde.multiply(mask)
            Atilde.eliminate_zeros()
            Atilde.sort_indices()

    else:
        # Use computational short-cut for case (ninc == 0) and (nsquare > 0)
        # Calculate Atilde^k only at the sparsity pattern of mask.
        for i in range(nsquare-1):
            Atilde = Atilde*Atilde

        # Call incomplete mat-mat mult
        AtildeCSC = Atilde.tocsc()
        AtildeCSC.sort_indices()
        mask.sort_indices()
        Atilde.sort_indices()
        amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
                                         Atilde.data, AtildeCSC.indptr,
                                         AtildeCSC.indices, AtildeCSC.data,
                                         mask.indptr, mask.indices, mask.data,
                                         dimen)

        del AtildeCSC, Atilde
        Atilde = mask
        Atilde.eliminate_zeros()
        Atilde.sort_indices()

    del Dinv, Dinv_A, mask

    # Calculate strength based on constrained min problem of
    # min( z - B*x ), such that
    # (B*x)|_i = z|_i, i.e. they are equal at point i
    # z = (I - (t/k) Dinv A)^k delta_i
    #
    # Strength is defined as the relative point-wise approx. error between
    # B*x and z.  We don't use the full z in this problem, only that part of
    # z that is in the sparsity pattern of A.
    #
    # Can use either the D-norm, and inner product, or l2-norm and inner-prod
    # to solve the constrained min problem.  Using D gives scale invariance.
    #
    # This is a quadratic minimization problem with a linear constraint, so
    # we can build a linear system and solve it to find the critical point,
    # i.e. minimum.
    #
    # We exploit a known shortcut for the case of NullDim = 1.  The shortcut is
    # mathematically equivalent to the longer constrained min. problem

    if NullDim == 1:
        # Use shortcut to solve constrained min problem if B is only a vector
        # Strength(i,j) = | 1 - (z(i)/b(j))/(z(j)/b(i)) |
        # These ratios can be calculated by diagonal row and column scalings

        # Create necessary vectors for scaling Atilde
        #   Its not clear what to do where B == 0.  This is an
        #   an easy programming solution, that may make sense.
        Bmat_forscaling = np.ravel(Bmat)
        Bmat_forscaling[Bmat_forscaling == 0] = 1.0
        DAtilde = Atilde.diagonal()
        DAtildeDivB = np.ravel(DAtilde) / Bmat_forscaling

        # Calculate best approximation, z_tilde, in span(B)
        #   Importantly, scale_rows and scale_columns leave zero entries
        #   in the matrix.  For previous implementations this was useful
        #   because we assume data and Atilde.data are the same length below
        data = Atilde.data.copy()
        Atilde.data[:] = 1.0
        Atilde = scale_rows(Atilde, DAtildeDivB)
        Atilde = scale_columns(Atilde, np.ravel(Bmat_forscaling))

        # If angle in the complex plane between z and z_tilde is
        # greater than 90 degrees, then weak.  We can just look at the
        # dot product to determine if angle is greater than 90 degrees.
        angle = np.real(Atilde.data) * np.real(data) +\
            np.imag(Atilde.data) * np.imag(data)
        angle = angle < 0.0
        angle = np.array(angle, dtype=bool)

        # Calculate Approximation ratio
        Atilde.data = Atilde.data/data

        # If approximation ratio is less than tol, then weak connection
        weak_ratio = (np.abs(Atilde.data) < 1e-4)

        # Calculate Approximation error
        Atilde.data = abs(1.0 - Atilde.data)

        # Set small ratios and large angles to weak
        Atilde.data[weak_ratio] = 0.0
        Atilde.data[angle] = 0.0

        # Set near perfect connections to 1e-4
        Atilde.eliminate_zeros()
        Atilde.data[Atilde.data < np.sqrt(np.finfo(float).eps)] = 1e-4

        del data, weak_ratio, angle

    else:
        # For use in computing local B_i^H*B, precompute the element-wise
        # multiply of each column of B with each other column.  We also scale
        # by 2.0 to account for BDB's eventual use in a constrained
        # minimization problem
        BDBCols = int(np.sum(range(NullDim + 1)))
        BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
        counter = 0
        for i in range(NullDim):
            for j in range(i, NullDim):
                BDB[:, counter] = 2.0 *\
                    (np.conjugate(np.ravel(np.asarray(B[:, i]))) *
                        np.ravel(np.asarray(D_A * B[:, j])))
                counter = counter + 1

        # Choose tolerance for dropping "numerically zero" values later
        t = Atilde.dtype.char
        eps = np.finfo(np.float).eps
        feps = np.finfo(np.single).eps
        geps = np.finfo(np.longfloat).eps
        _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
        tol = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]

        # Use constrained min problem to define strength
        amg_core.evolution_strength_helper(Atilde.data,
                                           Atilde.indptr,
                                           Atilde.indices,
                                           Atilde.shape[0],
                                           np.ravel(np.asarray(B)),
                                           np.ravel(np.asarray(
                                               (D_A * np.conjugate(B)).T)),
                                           np.ravel(np.asarray(BDB)),
                                           BDBCols, NullDim, tol)

        Atilde.eliminate_zeros()

    # All of the strength values are real by this point, so ditch the complex
    # part
    Atilde.data = np.array(np.real(Atilde.data), dtype=float)

    # Apply drop tolerance
    if epsilon != np.inf:
        amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
                                       Atilde.indices, Atilde.data)
        Atilde.eliminate_zeros()

    # Symmetrize
    if symmetrize_measure:
        Atilde = 0.5*(Atilde + Atilde.T)

    # Set diagonal to 1.0, as each point is strongly connected to itself.
    I = sparse.eye(dimen, dimen, format="csr")
    I.data -= Atilde.diagonal()
    Atilde = Atilde + I

    # If converted BSR to CSR, convert back and return amalgamated matrix,
    #   i.e. the sparsity structure of the blocks of Atilde
    if not csrflag:
        Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))

        n_blocks = Atilde.indices.shape[0]
        blocksize = Atilde.blocksize[0]*Atilde.blocksize[1]
        CSRdata = np.zeros((n_blocks,))
        amg_core.min_blocks(n_blocks, blocksize,
                            np.ravel(np.asarray(Atilde.data)), CSRdata)
        # Atilde = sparse.csr_matrix((data, row, col), shape=(*,*))
        Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
                                   shape=(Atilde.shape[0] / numPDEs,
                                          Atilde.shape[1] / numPDEs))

    # Standardized strength values require small values be weak and large
    # values be strong.  So, we invert the algebraic distances computed here
    Atilde.data = 1.0/Atilde.data

    # Scale C by the largest magnitude entry in each row
    Atilde = scale_rows_by_largest_entry(Atilde)

    return Atilde
Ejemplo n.º 59
0
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from joblib import Parallel, delayed, effective_n_jobs

from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array, gen_batches,
                     gen_even_slices)
from ..utils.fixes import logsumexp
from ..utils.validation import check_non_negative
from ..utils.validation import check_is_fitted

from ._online_lda import (mean_change, _dirichlet_expectation_1d,
                          _dirichlet_expectation_2d)

EPS = np.finfo(np.float).eps


def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
                             max_iters, mean_change_tol, cal_sstats,
                             random_state):
    """E-step: update document-topic distribution.

    Parameters
    ----------
    X : array-like or sparse matrix, shape=(n_samples, n_features)
        Document word matrix.

    exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
        Exponential value of expectation of log topic word distribution.
        In the literature, this is `exp(E[log(beta)])`.
Ejemplo n.º 60
0
def QuarticSolverVec(a,b,c,d,e):
    """
     function [x1, x2, x3, x4]=QuarticSolverVec(a,b,c,d,e)
     v.0.2 - Python Port
     - Added condition in size sorting to avoid floating point errors.
     - Removed early loop abortion when stuck in loop (Inefficient)
     - Improved numerical stability of analytical solution
     - Added code for the case of S==0
     ============================================
     v.0.1 - Nearly identical to QuarticSolver v. 0.4, the first successful vectorized implimentation 
             Changed logic of ChosenSet to accomudate simultaneous convergence of sets 1 & 2
           - Note the periodicity in nearly-convergent solutions can other
             than four (related to text on step 4 after table 3). examples:
             period of 5: [a,b,c,d,e]=[0.111964240308252 -0.88497524334712 -0.197876116344933 -1.07336408259262 -0.373248675102065];
             period of 6: [a,b,c,d,e]=[-1.380904438798326 0.904866918945240 -0.280749330818231 0.990034312758900 1.413106456228119];
             period of 22: [a,b,c,d,e]=[0.903755513939902 0.490545114637739 -1.389679906455410 -0.875910689438623 -0.290630547104907];
             Therefore condition was changed from epsilon1(iiter)==0 to epsilon1(iiter)<8*eps (and similarl for epsilon2)
           - Special case criterion of the analytical formula was changed to
             ind=abs(4*Delta0**3./Delta1**2)<2*eps;  (instead of exact zero)
           - vectorized
     ============================================
     - Solves for the x1-x4 roots of the quartic equation y(x)=ax^4+bx^3+cx^2+dx+e.
       Multiple eqations can be soved simultaneously by entering same-sized column vectors on all inputs.
     - Note the code immediatly tanslates the input parameters ["a","b","c","d","e"] to the reference paper parameters [1,a,b,c,d] for consistency,
       and the code probably performes best when "a"=1.
    
    Parameters
    ----------
    a,b,c,d,e : ``1-D arrays``
        Quartic polynomial coefficients
    
    Returns
    ------
    - x1-x4 : ``2-D array``
        Concatenated array of the polynomial roots. The function always returns four (possibly complex) values. Multiple roots, if exist, are given multiple times. An error will result in four NaN values.
        No convergence may result in four inf values (still?)
    
    Reference: 
    Peter Strobach (2010), Journal of Computational and Applied Mathematics 234
        http://www.sciencedirect.com/science/article/pii/S0377042710002128
    """
#    MaxIter=16;
    MaxIter=50;
    eps = np.finfo(float).eps
    #INPUT CONTROL
    #Note: not all input control is implemented.
    # all-column vectors only
#    if size(a,1)~=size(b,1) or size(a,1)~=size(c,1) or size(a,1)~=size(d,1) or size(a,1)~=size(e,1) or ...
#       size(a,2)~=1 or size(b,2)~=1 or size(c,2)~=1 or size(d,2)~=1 or size(e,2)~=1:
#        fprintf('ERROR: illegal input parameter sizes.\n');
#        x1=inf; x2=inf; x3=inf; x4=inf;    
#        return
    
    # translate input variables to the paper's
    if np.any(a==0):
       print('ERROR: a==0. Not a quartic equation.\n')
       x1=np.NaN; x2=np.NaN; x3=np.NaN; x4=np.NaN;    
       return x1,x2,x3,x4
    else:
        input_a=a;
        input_b=b;
        input_c=c;
        input_d=d;
        input_e=e;
        a=input_b/input_a;
        b=input_c/input_a;
        c=input_d/input_a;
        d=input_e/input_a;
    
    # PRE-ALLOCATE MEMORY
    # ChosenSet is used to track which input set already has a solution (=non-zero value)
    ChosenSet=np.zeros_like(a);
    x1 = np.empty_like(a,complex)
    x1[:] = np.nan
    x2=x1.copy(); x3=x1.copy(); x4=x1.copy(); x11=x1.copy(); x12=x1.copy(); x21=x1.copy(); x22=x1.copy(); alpha01=x1.copy(); alpha02=x1.copy(); beta01=x1.copy(); beta02=x1.copy(); gamma01=x1.copy(); gamma02=x1.copy(); delta01=x1.copy(); delta02=x1.copy(); e11=x1.copy(); e12=x1.copy(); e13=x1.copy(); e14=x1.copy(); e21=x1.copy(); e22=x1.copy(); e23=x1.copy(); e24=x1.copy(); alpha1=x1.copy(); alpha2=x1.copy(); beta1=x1.copy(); beta2=x1.copy(); gamma1=x1.copy(); gamma2=x1.copy(); delta1=x1.copy(); delta2=x1.copy(); alpha=x1.copy(); beta=x1.copy(); gamma=x1.copy(); delta=x1.copy();
    # check multiple roots -cases 2 & 3. indexed by ChosenSet=-2
    test_alpha=0.5*a;
    test_beta=0.5*(b-test_alpha**2);
    test_epsilon=np.stack((c-2*test_alpha*test_beta, d-test_beta**2)).T;
    ind=np.all(test_epsilon==0,1);
    if np.any(ind):
        x1[ind], x2[ind]=SolveQuadratic(np.ones_like(test_alpha[ind]),test_alpha[ind],test_beta[ind]);
        x3[ind]=x1[ind]; x4[ind]=x2[ind];
        ChosenSet[ind]=-2;
    
    # check multiple roots -case 4. indexed by ChosenSet=-4
    i=ChosenSet==0;
    x11[i], x12[i]=SolveQuadratic(np.ones(np.sum(i)),a[i]/2,b[i]/6);
    x21[i]=-a[i]-3*x11[i];    
    test_epsilon[i,:2]=np.stack((c[i]+x11[i]**2*(x11[i]+3*x21[i]), d[i]-x11[i]**3*x21[i])).T;
    ind[i]=np.all(test_epsilon[i]==0,1);
    if np.any(ind[i]):
        x1[ind[i]]=x11[ind[i]]; x2[ind[i]]=x11[ind[i]]; x3[ind[i]]=x11[ind[i]]; x4[ind[i]]=x12[ind[i]];
        ChosenSet[ind[i]]=-4;
    x22[i]=-a[i]-3*x12[i];
    test_epsilon[i,:2]=np.stack((c[i]+x12[i]**2*(x12[i]+3*x22[i]), d[i]-x12[i]**3*x22[i])).T;
    ind[i]=np.all(test_epsilon[i]==0,1);
    if np.any(ind[i]):
        x1[ind[i]]=x21[ind[i]]; x2[ind[i]]=x21[ind[i]]; x3[ind[i]]=x21[ind[i]]; x4[ind[i]]=x22[ind[i]];
        ChosenSet[ind[i]]=-4;
    # General solution
    # initilize
    epsilon1=np.empty((np.size(a),MaxIter))
    epsilon1[:]=np.inf
    epsilon2=epsilon1.copy();
    
    i=ChosenSet==0;
    fi=np.nonzero(i)[0];
    x=np.empty((fi.size,4),complex)
    ii = np.arange(fi.size)
    #Calculate analytical root values
    x[:,0], x[:,1], x[:,2], x[:,3]=AnalyticalSolution(np.ones(np.sum(i)),a[i],b[i],c[i],d[i],eps);
    #Sort the roots in order of their size
    ind=np.argsort(abs(x))[:,::-1]; #'descend'
    x1[i]=x.flatten()[4*ii+ind[:,0]];
    x2[i]=x.flatten()[4*ii+ind[:,1]];
    x3[i]=x.flatten()[4*ii+ind[:,2]];
    x4[i]=x.flatten()[4*ii+ind[:,3]];
    #Avoiding floating point errors.
    #The value chosen is somewhat arbitrary. See Appendix C for details.
    ind = abs(x1)-abs(x4)<8*10**-12;
    x2[ind] = np.conj(x1[ind])
    x3[ind] = -x1[ind]
    x4[ind] = -x2[ind]
    #Initializing parameter values
    alpha01[i]=-np.real(x1[i]+x2[i]);
    beta01[i]=np.real(x1[i]*x2[i]);
    alpha02[i]=-np.real(x2[i]+x3[i]);
    beta02[i]=np.real(x2[i]*x3[i]);
    gamma01[i], delta01[i]=FastGammaDelta(alpha01[i],beta01[i],a[i],b[i],c[i],d[i]);
    gamma02[i], delta02[i]=FastGammaDelta(alpha02[i],beta02[i],a[i],b[i],c[i],d[i]);
    
    alpha1[i]=alpha01[i]; beta1[i]=beta01[i]; gamma1[i]=gamma01[i]; delta1[i]=delta01[i];
    alpha2[i]=alpha02[i]; beta2[i]=beta02[i]; gamma2[i]=gamma02[i]; delta2[i]=delta02[i];
    
    #Backward Optimizer Outer Loop
    e11[i]=a[i]-alpha1[i]-gamma1[i];
    e12[i]=b[i]-beta1[i]-alpha1[i]*gamma1[i]-delta1[i];
    e13[i]=c[i]-beta1[i]*gamma1[i]-alpha1[i]*delta1[i];
    e14[i]=d[i]-beta1[i]*delta1[i];
    
    e21[i]=a[i]-alpha2[i]-gamma2[i];
    e22[i]=b[i]-beta2[i]-alpha2[i]*gamma2[i]-delta2[i];
    e23[i]=c[i]-beta2[i]*gamma2[i]-alpha2[i]*delta2[i];
    e24[i]=d[i]-beta2[i]*delta2[i];
    iiter=0;
    while iiter<MaxIter and np.any(ChosenSet[i]==0):
        i=np.nonzero(ChosenSet==0)[0];
        
        alpha1[i], beta1[i], gamma1[i], delta1[i], e11[i], e12[i], e13[i], e14[i], epsilon1[i,iiter]=BackwardOptimizer_InnerLoop(a[i],b[i],c[i],d[i],alpha1[i],beta1[i],gamma1[i],delta1[i],e11[i],e12[i],e13[i],e14[i]);
        alpha2[i], beta2[i], gamma2[i], delta2[i], e21[i], e22[i], e23[i], e24[i], epsilon2[i,iiter]=BackwardOptimizer_InnerLoop(a[i],b[i],c[i],d[i],alpha2[i],beta2[i],gamma2[i],delta2[i],e21[i],e22[i],e23[i],e24[i]);
    
        j = np.ones_like(a[i])
        j[(epsilon2[i,iiter]<epsilon1[i,iiter]).flatten()] = 2
        BestEps = np.nanmin(np.stack([epsilon1[i,iiter].flatten(), epsilon2[i,iiter].flatten()]),0);
        ind=BestEps<8*eps;
        ChosenSet[i[ind]]=j[ind];
        ind=np.logical_not(ind);
#        if iiter>0 and np.any(ind):
#            ii=i[ind];
#            LimitCycleReached = np.empty((ii.size,2),bool)
#            LimitCycleReached[:,0] = np.any(epsilon1[ii,:iiter]==epsilon1[ii,iiter],0)
#            LimitCycleReached[:,1] = np.any(epsilon2[ii,:iiter]==epsilon2[ii,iiter],0)
##            LimitCycleReached=[any(bsxfun(@eq,epsilon1(i(ind),max(1,iiter-4):max(1,iiter-1)),epsilon1(i(ind),iiter)),2) any(bsxfun(@eq,epsilon2(i(ind),max(1,iiter-4):max(1,iiter-1)),epsilon2(i(ind),iiter)),2)];
#            ChosenSet[ii[np.logical_and(LimitCycleReached[:,0] , np.logical_not(LimitCycleReached[:,1]))]]=1;
#            ChosenSet[ii[np.logical_and(LimitCycleReached[:,1] , np.logical_not(LimitCycleReached[:,0]))]]=2;
##            ChosenSet(ii(~LimitCycleReached(:,1) & LimitCycleReached(:,2)))=2;
##            ind=find(ind);
#            cond = np.logical_and(LimitCycleReached[:,1],LimitCycleReached[:,0])
#            ChosenSet[ii[cond]]=j[ind][cond]
##            ChosenSet(ii(LimitCycleReached(:,1) & LimitCycleReached(:,2)))=j(ind(LimitCycleReached(:,1) & LimitCycleReached(:,2)));
        iiter=iiter+1;
        
    #Checking which of the chains is relevant
    i=np.nonzero(ChosenSet==0)[0];
    ind=epsilon1[i,-1]<epsilon2[i,-1];
#    ind=np.logical_and(epsilon1[i,-1]<epsilon2[i,-1],np.logical_not(np.isnan(epsilon2[i,-1])));
    ChosenSet[i[ind]]=1;
    ChosenSet[i[np.logical_not(ind)]]=2;
    
    # Output
    i=ChosenSet==1;
    alpha[i]=alpha1[i];
    beta[i]=beta1[i];
    gamma[i]=gamma1[i];
    delta[i]=delta1[i];
    
    i=ChosenSet==2;
    alpha[i]=alpha2[i];
    beta[i]=beta2[i];
    gamma[i]=gamma2[i];
    delta[i]=delta2[i];
    
    i=ChosenSet>0;
    x1[i], x2[i]=SolveQuadratic(np.ones(np.sum(i)),alpha[i],beta[i]);
    x3[i], x4[i]=SolveQuadratic(np.ones(np.sum(i)),gamma[i],delta[i]);

    return np.array([x1,x2,x3,x4])