def getAlphaNIEXS(lensParam,X):
	einRad = lensParam[0]
	f = lensParam[1]
	fp = np.sqrt(1 - f*f)
	bc = lensParam[2]
	t = lensParam[3]
	xshear = (lensParam[4] + 1j*lensParam[5])
	
	#### The rotated coordinates	
	Xr = X * np.exp(-1j*t)
	x1 = Xr.real
	x2 = Xr.imag

	#### The impact parameter b
	b = x1 + 1j*f*x2
	#### The b-squared term
	bsq = np.absolute(b)
	bsq = np.multiply(bsq,bsq)
	#### The differentiation of bsq with respect to x
	bsqx = x1 + 1j*f*f*x2

	#### The deflection angle
	alpha = einRad*(np.sqrt(f)/fp)*( np.arctanh( fp * np.sqrt(bsq+(bc*bc)) / bsqx) - np.arctanh(fp*bc/(f*Xr)) )
	alpha = np.conj(alpha)*np.exp(1j*t) - xshear*np.conj(X)
	
	#### This part can return messages like
	#### "divide by zero encountered in divide"
	#### "invalid value encountered in divide"
	#### these messages can be ignored
	return Xr, alpha
Example #2
0
    def kappa(self, r):
        from numpy import arctanh, arctan, arctan2, log, sin, cos, pi, logspace
        x = self.b / self.rs

        if x < 1.:
            norm = x**2 / (4 * arctanh(
                ((1 - x) / (1 + x))**0.5) / (1 - x**2)**0.5 + 2 * log(x / 2))
        elif x == 1.:
            norm = 1. / (2. + 2 * log(0.5))
        else:
            norm = x**2 / (4 * arctan(
                ((x - 1) / (x + 1))**0.5) / (x**2 - 1)**0.5 + 2 * log(x / 2))

        x = r / self.rs
        A = x * 0.
        C = x < 1.
        X = x[C].copy()
        A[C] = (1. - 2 * arctanh(
            ((1. - X) / (1. + X))**0.5) / (1 - X**2)**0.5) / (X**2 - 1.)
        C = x == 1.
        A[C] = 1. / 3
        C = x > 1.
        X = x[C].copy()
        A[C] = (1. - 2 * arctan(
            ((X - 1.) / (1. + X))**0.5) / (X**2 - 1.)**0.5) / (X**2 - 1.)
        return norm * A
Example #3
0
    def shear(self, r=None, zs=None):
        rs = self.rs()
        k = self.rhoC()*self.deltaC()
        sigmaC = self.sigmaC(zs)
        x = r/rs

        if isinstance(r,np.ndarray):
            f = np.piecewise(x, [x>1., x<1., x==1.], [lambda x: (rs*k)*((8*np.arctan(((x-1)/(1+x))**0.5)/(x**2*(x**2-1)**0.5)) \
                    + (4*np.log(x/2.)/x**2) \
                    - (2./(x**2-1)) \
                    + (4*np.arctan(((x-1)/(1+x))**0.5)/((x**2-1)**1.5))), \
                    lambda x: (rs*k)*((8*np.arctanh(((1-x)/(1+x))**0.5)/(x**2*(1-x**2)**0.5)) \
                    + (4*np.log(x/2.)/x**2) \
                    - (2./(x**2-1)) \
                    + (4*np.arctanh(((1-x)/(1+x))**0.5)/((x**2-1)*(1-x**2)**0.5))), \
                    lambda x: (rs*k)*(10./3+4.*np.log(0.5))])
        else:
            if (x<1.):
                f = (rs*k)*((8*np.arctanh(((1-x)/(1+x))**0.5)/(x**2*(1-x**2)**0.5)) \
                    + (4*np.log(x/2.)/x**2) \
                    - (2./(x**2-1)) \
                    + (4*np.arctanh(((1-x)/(1+x))**0.5)/((x**2-1)*(1-x**2)**0.5)))
            elif (x>1.):
                f = (rs*k)*((8*np.arctan(((x-1)/(1+x))**0.5)/(x**2*(x**2-1)**0.5)) \
                    + (4*np.log(x/2.)/x**2) \
                    - (2./(x**2-1)) \
                    + (4*np.arctan(((x-1)/(1+x))**0.5)/((x**2-1)**1.5)))
            else:
                f = (rs*k)*(10./3+4.*np.log(0.5))

        #print f/sigmaC
        #print isinstance(f,np.ndarray)
        return f/sigmaC
Example #4
0
    def deflections(self, xin, yin):
        from numpy import arctanh, arctan, arctan2, log, sin, cos

        #x,y = self.align_coords(xin,yin)
        x = xin - self.x
        y = yin - self.y
        b, rs = self.b, self.rs
        X = b / rs
        if X < 1.:
            amp = X**2 / (8 * arctanh(
                ((1 - X) / (1 + X))**0.5) / (1 - X**2)**0.5 + 4 * log(X / 2.))
        elif X == 1:
            amp = 0.25 / (1. + log(0.5))
        else:
            amp = X**2 / (8 * arctan(
                ((X - 1) / (1 + X))**0.5) / (X**2 - 1)**0.5 + 4 * log(X / 2.))

        r2 = (x**2 + y**2) / rs**2
        r = r2**0.5
        F = r * 0.
        F[r < 1.] = arctanh((1 - r2[r < 1.])**0.5) / (1 - r2[r < 1.])**0.5
        F[r == 1.] = 1.
        F[r > 1.] = arctan((r2[r > 1.] - 1.)**0.5) / (r2[r > 1.] - 1)**0.5

        dr = 4 * amp * rs * (log(r / 2) + F) / r
        A = arctan2(y, x)
        return dr * cos(A), dr * sin(A)
def transfrom(N, E):
    N = np.float(N)
    xi = N/(A1*k0)
    eta = (E-E0)/(A1*k0)
    print xi, eta
    xip1 = h1*np.sin(2*xi)*np.cosh(2*eta)
    xip2 = h2*np.sin(4*xi)*np.cosh(4*eta)
    xip3 = h3*np.sin(6*xi)*np.cosh(6*eta)
    xip4 = h4*np.sin(8*xi)*np.cosh(8*eta)
    print xip1, xip2, xip3, xip4
    etap1 = h1*np.cos(2*xi)*np.sinh(2*eta)
    etap2 = h2*np.cos(4*xi)*np.sinh(4*eta)
    etap3 = h3*np.cos(6*xi)*np.sinh(6*eta)
    etap4 = h4*np.cos(8*xi)*np.sinh(8*eta)
    print etap1, etap2, etap4, etap4
    xip  = xi-xip1-xip2-xip3-xip4
    etap = eta-etap1-etap2-etap3-etap4
    print xip, etap
    beta = np.arcsin(sech(etap)*np.sin(xip))
    l = np.arcsin(np.tanh(etap)/np.cos(beta))
    print beta, l
    Q = np.arcsinh(np.tan(beta))
    print Q
    #e = np.e # ??
    e = np.sqrt(0.006694380023)
    Qp = Q + e*np.arctanh(e*np.tanh(Q))

    for i in xrange(3):
        Qp = Q + e*np.arctanh(e*np.tanh(Qp))

    print Qp

    rhoo = np.arctan(np.sinh(Qp))
    lamb = lamb0 + l
    return rhoo*180.0/np.pi, lamb*180.0/np.pi
def generateHeatMaps(GroupDF,goodsubj,GroupTrain=[]):

    numberOfICs=10
    columnNames=[]
    for rsnNumber in range(numberOfICs):
            columnNames.append('RSN%d' % rsnNumber)


    heatmapDF=GroupDF[GroupDF.Subject_ID.isin(goodsubj)].groupby(['Subject_ID','FB','TR']).mean()
    hmDiff=np.zeros((10,10,len(unique(GroupDF[GroupDF.Subject_ID.isin(goodsubj)]['Subject_ID']))))
    hmFB=hmDiff.copy()
    hmNFB=hmDiff.copy()
    if len(GroupTrain)>0:
        heatmapTrainDF=GroupTrain[GroupTrain.Subject_ID.isin(goodsubj)].groupby(['Subject_ID','TR']).mean()
        hmTrain=hmDiff.copy()
        hmFB_Train=hmDiff.copy()
        hmNFB_Train=hmDiff.copy()

    for indx,subj in enumerate(unique(GroupDF[GroupDF.Subject_ID.isin(goodsubj)]['Subject_ID'])):
        hmFB[:,:,indx]=heatmapDF.loc[subj,'FEEDBACK'][columnNames].corr()
        hmNFB[:,:,indx]=heatmapDF.loc[subj,'NOFEEDBACK'][columnNames].corr()
        hmDiff[:,:,indx]=np.arctanh(heatmapDF.loc[subj,'FEEDBACK'][columnNames].corr())*np.sqrt(405)-np.arctanh(heatmapDF.loc[subj,'NOFEEDBACK'][columnNames].corr())*np.sqrt(405)
        if len(GroupTrain)>0:
            hmTrain[:,:,indx]=heatmapTrainDF.loc[subj][columnNames].corr()
            hmFB_Train[:,:,indx]=np.arctanh(heatmapDF.loc[subj,'FEEDBACK'][columnNames].corr())*np.sqrt(405)-np.arctanh(heatmapTrainDF.loc[subj][columnNames].corr())*np.sqrt(175)
            hmNFB_Train[:,:,indx]=np.arctanh(heatmapDF.loc[subj,'NOFEEDBACK'][columnNames].corr())*np.sqrt(405)-np.arctanh(heatmapTrainDF.loc[subj][columnNames].corr())*np.sqrt(175)

    if len(GroupTrain)>0:
        return hmFB,hmNFB,hmDiff,hmTrain,hmFB_Train,hmNFB_Train
    else:
        return hmFB,hmNFB,hmDiff
Example #7
0
def slRSA_m_1Ss(ds, model, omit, partial_dsm = None, radius=3, cmetric='pearson'):
    '''one subject

    Executes slRSA on single subjects and returns tuple of arrays of 1-p's [0], and fisher Z transformed r's [1]

    ds: pymvpa dsets for 1 subj
    model: model DSM to be correlated with neural DSMs per searchlight center
    partial_dsm: model DSM to be partialled out of model-neural DSM correlation
    omit: list of targets omitted from pymvpa datasets
    radius: sl radius, default 3
    cmetric: default pearson, other optin 'spearman'
    '''        

    if __debug__:
        debug.active += ["SLC"]

    for om in omit:
        ds = ds[ds.sa.targets != om] # cut out omits
        print('Target |%s| omitted from analysis' % (om))
    ds = mean_group_sample(['targets'])(ds) #make UT ds
    print('Mean group sample computed at size:',ds.shape,'...with UT:',ds.UT)

    print('Beginning slRSA analysis...')
    if partial_dsm == None: tdcm = rsa.TargetDissimilarityCorrelationMeasure(squareform(model), comparison_metric=cmetric)
    elif partial_dsm != None: tdcm = rsa.TargetDissimilarityCorrelationMeasure(squareform(model), comparison_metric=cmetric, partial_dsm = squareform(partial_dsm))
    sl = sphere_searchlight(tdcm,radius=radius)
    slmap = sl(ds)
    if partial_dsm == None:
        print('slRSA complete with map of shape:',slmap.shape,'...p max/min:',slmap.samples[0].max(),slmap.samples[0].min(),'...r max/min',slmap.samples[1].max(),slmap.samples[1].min())
        return 1-slmap.samples[1],np.arctanh(slmap.samples[0])
    else:
        print('slRSA complete with map of shape:',slmap.shape,'...r max/min:',slmap.samples[0].max(),slmap.samples[0].min())
        return 1-slmap.samples[1],np.arctanh(slmap.samples[0])
Example #8
0
 def _surfdens(self,R,z,phi=0.,t=0.):
     """
     NAME:
        _surfdens
     PURPOSE:
        evaluate the surface density for this potential
     INPUT:
        R - Galactocentric cylindrical radius
        z - vertical height
        phi - azimuth
        t - time
     OUTPUT:
        the surface density
     HISTORY:
        2018-08-19 - Written - Bovy (UofT)
     """
     r= numpy.sqrt(R**2.+z**2.)
     x= r/self.a
     Rpa= numpy.sqrt(R**2.+self.a**2.)
     Rma= numpy.sqrt(R**2.-self.a**2.+0j)
     if Rma == 0:
         za= z/self.a
         return self.a**2./2.*((2.-2.*numpy.sqrt(za**2.+1)
                                +numpy.sqrt(2.)*za\
                                    *numpy.arctan(za/numpy.sqrt(2.)))/z
                               +numpy.sqrt(2*za**2.+2.)\
                                *numpy.arctanh(za/numpy.sqrt(2.*(za**2.+1)))
                               /numpy.sqrt(self.a**2.+z**2.))
     else:
         return self.a**2.*(numpy.arctan(z/x/Rma)/Rma
                            +numpy.arctanh(z/x/Rpa)/Rpa
                            -numpy.arctan(z/Rma)/Rma
                            +numpy.arctan(z/Rpa)/Rpa).real
Example #9
0
def coherr(C,J1,J2,p=0.05,Nsp1=None,Nsp2=None):
    """
    Function to compute lower and upper confidence intervals on
    coherency (absolute value of coherence).

    C:            coherence (real or complex)
    J1,J2:        tapered fourier transforms
    p:            the target P value (default 0.05)
    Nsp1:         number of spikes in J1, used for finite size correction.
    Nsp2:         number of spikes in J2, used for finite size correction.
                  Default is None, for no correction

    Outputs:
    CI:           confidence interval for C, N x 2 array, (lower, upper)
    phi_std:      stanard deviation of phi, N array
    """
    from numpy import iscomplexobj, absolute, fix, zeros, setdiff1d, real, sqrt,\
         arctanh, tanh
    from scipy.stats import t

    J1 = _combine_trials(J1)
    J2 = _combine_trials(J2)
    N,K = J1.shape
    assert J1.shape==J2.shape, "J1 and J2 must have the same dimensions."
    assert N == C.size, "S and J lengths don't match"
    if iscomplexobj(C): C = absolute(C)

    pp = 1 - p/2
    dof = 2*K
    dof1 = dof if Nsp1 is None else fix(2.*Nsp1*dof/(2.*Nsp1+dof))
    dof2 = dof if Nsp2 is None else fix(2.*Nsp2*dof/(2.*Nsp2+dof))
    dof = min(dof1,dof2)

    Cerr = zeros((N,2))
    tcrit = t(dof-1).ppf(pp).tolist()
    atanhCxyk = zeros((N,K))
    phasefactorxyk = zeros((N,K),dtype='complex128')

    for k in xrange(K):
        indxk = setdiff1d(range(K),[k])
        J1k = J1[:,indxk]
        J2k = J2[:,indxk]
        eJ1k = real(J1k * J1k.conj()).sum(1)
        eJ2k = real(J2k * J2k.conj()).sum(1)
        eJ12k = (J1k.conj() * J2k).sum(1)
        Cxyk = eJ12k/sqrt(eJ1k*eJ2k)
        absCxyk = absolute(Cxyk)
        atanhCxyk[:,k] = sqrt(2*K-2)*arctanh(absCxyk)
        phasefactorxyk[:,k] = Cxyk / absCxyk

    atanhC = sqrt(2*K-2)*arctanh(C);
    sigma12 = sqrt(K-1)* atanhCxyk.std(1)

    Cu = atanhC + tcrit * sigma12
    Cl = atanhC - tcrit * sigma12
    Cerr[:,0] = tanh(Cl / sqrt(2*K-2))
    Cerr[:,1] = tanh(Cu / sqrt(2*K-2))
    phistd = (2*K-2) * (1 - absolute(phasefactorxyk.mean(1)))
    return Cerr, phistd
Example #10
0
 def function_A(vega, a, b, t):
     """Helper function A(t) defined in thesis."""
     kappa = 2 * b ** (-2) / vega
     val = (1 - sqrt( 1 + kappa * (1 - exp( -vega * t )))) \
         / kappa + 1 / sqrt( 1 + kappa) \
         * (arctanh( sqrt( 1 + kappa * ( 1 - exp( -vega * t)))/ \
         sqrt(1 + kappa)) - arctanh(1 / sqrt( 1 + kappa))) 
     return val
Example #11
0
def test_arctanh():
    a = afnumpy.random.random((2,3))
    b = numpy.array(a)
    fassert(afnumpy.arctanh(a), numpy.arctanh(b))
    c = afnumpy.random.random((2,3))
    d = numpy.array(a)
    fassert(afnumpy.arctanh(a, out=c), numpy.arctanh(b, out=d))
    fassert(c, d)
Example #12
0
def get_ttest_map(map_name):
    subjs = map(lambda n: 'sub%03d' % n, SUBJECTS)
    vols = load_maps(map_name, subjs)
    # se = 1.0/np.sqrt(1199-6)
    popmean = np.arctanh(0.03)
    ttest = stats.ttest_1samp(np.arctanh(vols), popmean, axis=0)[0]
    ttest[ttest < 4] = np.NaN
    return get_volume(ttest, 4, 15)
Example #13
0
def g1g2_to_e1e2(g1, g2):
    """
    convert reduced shear g1,g2 to standard ellipticity
    parameters e1,e2

    uses eta representation but could also use
        e1 = 2*g1/(1 + g1**2 + g2**2)
        e2 = 2*g2/(1 + g1**2 + g2**2)

    parameters
    ----------
    g1,g2: scalars
        Reduced shear space shapes

    outputs
    -------
    e1,e2: tuple of scalars
        shapes in (ixx-iyy)/(ixx+iyy) style space
    """
    g=numpy.sqrt(g1*g1 + g2*g2)

    if isinstance(g1, numpy.ndarray):
        w,=numpy.where(g >= 1.0)
        if w.size != 0:
            raise GMixRangeError("some g were out of bounds")

        eta = 2*numpy.arctanh(g)
        e = numpy.tanh(eta)

        numpy.clip(e, 0.0, 0.99999999, e)

        e1=numpy.zeros(g.size)
        e2=numpy.zeros(g.size)
        w,=numpy.where(g != 0.0)
        if w.size > 0:
            fac = e[w]/g[w]

            e1[w] = fac*g1[w]
            e2[w] = fac*g2[w]

    else:
        if g >= 1.:
            raise GMixRangeError("g out of bounds: %s" % g)
        if g == 0.0:
            return (0.0, 0.0)

        eta = 2*numpy.arctanh(g)
        e = numpy.tanh(eta)
        if e >= 1.:
            # round off?
            e = 0.99999999

        fac = e/g

        e1 = fac*g1
        e2 = fac*g2

    return e1,e2
Example #14
0
def e1e2_to_g1g2(e1, e2):
    """
    convert e1,e2 to reduced shear style ellipticity

    parameters
    ----------
    e1,e2: tuple of scalars
        shapes in (ixx-iyy)/(ixx+iyy) style space

    outputs
    -------
    g1,g2: scalars
        Reduced shear space shapes

    """

    e = numpy.sqrt(e1*e1 + e2*e2)
    if isinstance(e1, numpy.ndarray):
        w,=numpy.where(e >= 1.0)
        if w.size != 0:
            raise GMixRangeError("some e were out of bounds")

        eta=numpy.arctanh(e)
        g = numpy.tanh(0.5*eta)

        numpy.clip(g, 0.0, 0.99999999, g)

        g1=numpy.zeros(g.size)
        g2=numpy.zeros(g.size)
        w,=numpy.where(e != 0.0)
        if w.size > 0:
            fac = g[w]/e[w]

            g1[w] = fac*e1[w]
            g2[w] = fac*e2[w]

    else:
        if e >= 1.:
            raise GMixRangeError("e out of bounds: %s" % e)
        if e == 0.0:
            g1,g2=0.0,0.0

        else:

            eta=numpy.arctanh(e)
            g = numpy.tanh(0.5*eta)

            if g >= 1.:
                # round off?
                g = 0.99999999


            fac = g/e

            g1 = fac*e1
            g2 = fac*e2

    return g1,g2
Example #15
0
    def make_from_examples(cls, X, low, high, directed = True):
        #for every pair of examples (i,j)
        #make a feature that takes on the value low at i
        #and value high at j
        #if directed, order of (i,j) matters, otherwise it does not


        m,n =  X.shape

        if directed:
            h = m **2 - m
        else:
            h = m * (m-1)/2
        W = N.zeros((n,h))
        b = N.zeros(h)
        idx = 0

        inv_low = N.arctanh(low)
        if N.abs(N.tanh(inv_low)-low) > 1e-6:
            assert False
        #

        inv_high = N.arctanh(high)

        for i in xrange(X.shape[0]):
            if directed:
                r = xrange(m)
            else:
                r = xrange(i+1,m)

            for j in r:
                if i == j:
                    continue

                diff = X[j,:] - X[i,:]
                direction = diff / N.sqrt(N.square(diff).sum())
                pi = N.dot(X[i,:],direction)
                pj = N.dot(X[j,:],direction)

                wmag =  (inv_high - inv_low) / (pj - pi)

                b[idx] = (pj*inv_low - pi*inv_high) / (pj - pi)
                W[:,idx] = wmag * direction

                #check it
                ival = N.tanh(N.dot(W[:,idx],X[i,:])+b[idx])
                jval = N.tanh(N.dot(W[:,idx],X[j,:])+b[idx])


                assert abs(ival-low) < 1e-6
                assert abs(jval-high) < 1e-6

                idx += 1

        assert idx == h


        return TanhFeatureExtractor(W,b)
Example #16
0
def diffcorrcoeftest(rvalue1, rvalue2, N1, N2):
    ''' tests for difference between 2 r values by performing fishers transformation and performing t-test on the z values. returns z and two tailed p '''
    r_z1=np.arctanh(rvalue1) #equivalent to 0.5 * np.log((1 + rvalue1)/(1 - rvalue1))
    r_z2=np.arctanh(rvalue2)
    se_diff_r = np.sqrt(1.0/(N1 - 3.0) + 1.0/(N2 - 3.0))
    diff = r_z1 - r_z2
    z = abs(diff / se_diff_r)
    p = (1 - scipy.stats.norm.cdf(z))*2
    return round(z,3), round(p,3)
Example #17
0
def atanh(x):
    """
    Inverse hyperbolic tangent
    """
    if isinstance(x, UncertainFunction):
        mcpts = np.arctanh(x._mcpts)
        return UncertainFunction(mcpts)
    else:
        return np.arctanh(x)
def fisherTransformation(r1, r2, n1, n2):
    z1 = numpy.arctanh(r1)
    z2 = numpy.arctanh(r2)
    
    se = numpy.sqrt(1/(float(n1)-3) + 1/(float(n2)-3))
    
    z = (z1-z2)/se
    
    return z
Example #19
0
    def test_arctanh(self):
        import math
        from numpy import arctanh

        for v in [0.99, 0.5, 0, -0.5, -0.99]:
            assert math.atanh(v) == arctanh(v)
        for v in [2.0, -2.0]:
            assert math.isnan(arctanh(v))
        for v in [1.0, -1.0]:
            assert arctanh(v) == math.copysign(float("inf"), v)
Example #20
0
    def compute_S(x):
        ''' compute the nonlinearity of the belief prop system
            S(x) is obtained by applying arctanh to x at i in S_ '''

        global S_, q

        y = np.ones(q,1)
        for i in (i,q):
            for j in nonzero(S_[i,:]):
                y[i] *= np.arctanh(x[j]/2)
        y = 2 * np.arctanh(y)
def func_hernquist(x):
	res = x*0
	idx1 = x>0
	idx2 = x<1
	idx = idx1&idx2
	res[idx] = 1.0/np.sqrt(1-x[idx]*x[idx])*np.arctanh(np.sqrt(1-x[idx]*x[idx]))
	idx = x == 1
	res[idx] = 1.0
	idx = x>1
	res[idx] = 1.0/np.sqrt(x[idx]*x[idx]-1.0)*np.arctanh(np.sqrt(x[idx]*x[idx]-1))
	return res
Example #22
0
def corr_ci(r, n, confidence=0.95):
    """ Compute confidence interval for Spearman or Pearson correlation coefficients based on Fisher transformation
    https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient#Using_the_Fisher_transformation
    :param r: correlation coefficient
    :param n: sample size
    :param confidence: sample size
    :return: low and high
    """
    delta = stats.norm.ppf(1.0 - (1 - confidence) / 2) / np.sqrt(n - 3)
    lower = np.tanh(np.arctanh(r) - delta)
    upper = np.tanh(np.arctanh(r) + delta)
    return lower, upper
Example #23
0
def g1g2_to_eta1eta2(g1, g2):
    """
    convert reduced shear g1,g2 to eta style ellipticity

    parameters
    ----------
    g1,g2: scalars
        Reduced shear space shapes

    outputs
    -------
    eta1,eta2: tuple of scalars
        eta space shapes
    """


    if isinstance(g1, numpy.ndarray):

        g=numpy.sqrt(g1*g1 + g2*g2)
        w,=numpy.where(g >= 1.0)
        if w.size != 0:
            raise GMixRangeError("some g were out of bounds")

        eta1=numpy.zeros(g.size)
        eta2=eta1.copy()

        w,=numpy.where(g > 0.0)
        if w.size > 0:

            eta = 2*numpy.arctanh(g[w])
            fac = eta[w]/g[w]

            eta1[w] = fac*g1[w]
            eta2[w] = fac*g2[w]

    else:
        g=numpy.sqrt(g1*g1 + g2*g2)

        if g >= 1.:
            raise GMixRangeError("g out of bounds: %s converting to eta" % g)

        if g == 0.0:
            eta1, eta2=0.0, 0.0
        else:

            eta = 2*numpy.arctanh(g)

            fac = eta/g

            eta1 = fac*g1
            eta2 = fac*g2

    return eta1,eta2
Example #24
0
def compute_best_J(im):
    # A more precise computation of sisj could be done...
    l_x = len(im)
    X, Y = np.ogrid[:l_x, :l_x]
    mask = ((X - l_x/2)**2 + (Y - l_x/2)**2 <= (l_x/2)**2)
    grad = ndimage.morphological_gradient(im, footprint=np.ones((3, 3)))
    sisj_average = 1 - (grad[mask] > 0).mean()
    J1 = np.arctanh(sisj_average)
    grad2 = np.abs(np.diff(im, axis=0))[:, :-1] + np.abs(np.diff(im, axis=1))[:-1]
    sisj_average = 1 - 2*(grad2[mask[:-1, :-1]] > 0).mean()
    J2 = np.arctanh(sisj_average)
    return J1, J2
def spearmanr_ci_95(r,n):
    """Calculate the 95% confidence interval for a spearman r coefficient

    See http://stats.stackexchange.com/a/18904
    """
    if n < 5:
        print('Cannot compute CI for Spearman R with less than five samples')
        return (np.nan,np.nan)
    delta = 1.96/np.sqrt(n-3)
    lower = np.tanh(np.arctanh(r)-delta)
    upper = np.tanh(np.arctanh(r)+delta)
    return (lower,upper)
Example #26
0
def getcaustics(b_I,q):
  ## Do not change the next line or face consequences
  t=np.arange(0.0,pi/2.0+pi/100.0,pi/200.0);

  den=(1+q**2)-(1-q**2)*np.cos(2*t)  
  r=sqrt(2.)*b_I/np.sqrt(den)
  x=r*np.cos(t);
  y=r*np.sin(t);

  ## Generate parametric function for the tangential caustic
  xi=np.sqrt(2.*(1-q**2)/den);
  u=np.cos(t)*r;
  v=np.sin(t)*r;
  if(q!=1.0):
      u=u-b_I*np.arctan(xi*np.cos(t))/sqrt(1-q**2);
      v=v-b_I*np.arctanh(xi*np.sin(t))/sqrt(1-q**2);

  ## Generate parametric function for the radial caustic
  up=b_I*0.0;
  vp=b_I*0.0;
  if(q!=1.0):
      up=-b_I*np.arctan(xi*np.cos(t))/sqrt(1-q**2);
      vp=-b_I*np.arctanh(xi*np.sin(t))/sqrt(1-q**2);

  ## Set up splines for the tangential and radial caustic
  r1=np.sqrt(u**2+v**2);
  r2=np.sqrt(up**2+vp**2);
  
  t1=[0.]*u.size; 
  t2=[0.]*up.size; 
  for ii in range(u.size):
      if(u[ii]==0.):
	  t1[ii]=np.pi/2.;
      else:
	  t1[ii]=arctan(np.abs(v[ii]/u[ii]));
      if(t[ii]>pi/2.0):
	  t1[ii]=pi-t1[ii];
  
  for ii in range(up.size):
      if(up[ii]==0.):
	  t2[ii]=np.pi/2.;
      else:
	  t2[ii]=arctan(np.abs(vp[ii]/up[ii]));
      if(t[ii]>pi/2.0):
	  t2[ii]=pi-t2[ii];
  c1=interp1d(t1,r1,kind='cubic');
  c2=interp1d(t2,r2,kind='cubic');

  ## Calculate maximum radial distance between the caustics and the origin
  maxr=np.max([r1,r2]);

  return c1,c2,maxr;
Example #27
0
def make_ttest(reg1, reg2):
    masker = NiftiMasker(nib.load(MASK_FILE), standardize=False)
    masker.fit()

    subjects = [1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]

    a = np.arctanh(join_all_subjects(reg1, subjects, masker))
    b = np.arctanh(join_all_subjects(reg2, subjects, masker))
    t, prob = ttest_rel(a, b)

    tt = masker.inverse_transform(t)
    pp = masker.inverse_transform(prob)
    return tt, pp
Example #28
0
def arctanh(x, out=None):
    """
    Raises a ValueError if input cannot be rescaled to a dimensionless
    quantity.
    """
    if not isinstance(x, Quantity):
        return np.arctanh(x, out)

    return Quantity(
        np.arctanh(x.rescale(dimensionless).magnitude, out),
        dimensionless,
        copy=False
    )
Example #29
0
def show_map():
    sub_nums = map(int, raw_input('enter subject numbers --> ').split())
    if len(sub_nums) == 0:
        sub_nums = SUBJECTS
    subjs = map(lambda n: 'sub%03d' % n, sub_nums)

    maps = sorted(REGRESSORS)
    for i, m in enumerate(maps):
        print '%d. %s' % (i, m)

    map_nums = map(int, raw_input('Choose map --> ').split())
    if len(map_nums) == 1:
        map_name = maps[map_nums[0]]
        vols = load_maps(map_name, subjs)
        volumes = dict(zip(subjs, vols))
        # vol_objects = dict(map(lambda x: (map_name+' '+x[0], get_volume(x[1], 0.1, 0.4)), volumes.items()))
        vol_objects = dict()
        if len(vols) > 1:
            vols = np.arctanh(vols)
            se = 1.0/np.sqrt(1199-6)  # ~0.03
            popmean = np.arctanh(0.03)
            ttest = stats.ttest_1samp(vols, popmean, axis=0)[0]
            ttest[ttest < 4] = np.NaN
            vol_objects.update(**{map_name + ' ttest': get_volume(ttest, 4, 15)})
            std = np.tanh(np.std(vols, axis=0))
            mean = np.tanh(mean_vol(vols))
            vols = np.tanh(vols)
            vol_objects.update(**{map_name + ' std': get_volume(std, 0.05, 0.15)})
            vol_objects.update(**{map_name + ' mean': get_volume(mean, 0.1, 0.4)})
        ds = cortex.dataset.Dataset(**vol_objects)
    if len(map_nums) == 2:
        mname1 = maps[map_nums[0]]
        mname2 = maps[map_nums[1]]
        maps1 = load_maps(mname1, subjs)
        maps2 = load_maps(mname2, subjs)
        diff_maps = [compute_diff_map(x[0], x[1]) for x in zip(maps1, maps2)]
        vol = np.tanh(compute_diff_map(mean_vol(np.arctanh(maps1)), mean_vol(np.arctanh(maps2))))

        d = get_volume(vol, -0.1, 0.1, cmap='coldwarm')
        diff_vols = [get_volume(vol, -0.1, 0.1, cmap='coldwarm') for vol in diff_maps]

        # vol_objects = dict(zip(subjs, diff_vols))
        vol_objects = dict()
        vol_objects[mname1+'-'+mname2] = d

        ds = cortex.dataset.Dataset(**vol_objects)

    web = cortex.webgl.show(ds, types=('inflated',), port=8081, open_browser=True)
    raw_input()
Example #30
0
def check_equation_20(q,gamma=3.):
	'''
		Kinematic factor difference between analytic and numerical for
		gamma = 3
	'''
	Q = q/qpot_from_q(q)
	F=.5*quad(lambda t: np.sin(t)**3*(np.sin(t)**2+np.cos(t)**2./Q/Q)**(-gamma/2.),0.,np.pi)[0]/quad(lambda t: np.sin(t)*np.cos(t)**2*(np.sin(t)**2+np.cos(t)**2./Q/Q)**(-gamma/2.),0.,np.pi)[0]
	F2 = binney_tremaine_virial_ratio(q)
	Qst = 1./Q/Q
	if(gamma==3.):
		if(Qst>1.):
			Q = np.sqrt(Qst-1.)
			G = .5*(Qst*Q-np.sqrt(Qst)*np.arcsinh(Q))/(np.sqrt(Qst)*np.arcsinh(Q)-Q)
		else:
			Q = np.sqrt(1.-Qst)
			G = .5*(Qst*Q*Q-np.sqrt(Qst)*Q*np.arccos(np.sqrt(Qst)))/(np.sqrt(Qst)*Q*np.arccos(np.sqrt(Qst))-Q*Q)
	if(gamma==2.):
		if(Qst>1.):
			Q = np.sqrt(Qst-1.)
			T = np.arctan(Q)
			G = .5*(Qst*T-Q)/(Q-T)
		else:
			Q = np.sqrt(1.-Qst)
			T = np.arctanh(Q)
			G = .5*(Qst*T-Q)/(Q-T)
	if(gamma==4.):
		if(Qst>1.):
			Q = np.sqrt(Qst-1.)
			G = (Qst*(Qst-2.)*np.arctan(Q)+Qst*Q)/(Qst*np.arctan(Q)-Q)/2.
		else:
			Q = np.sqrt(1.-Qst)
			G = (Qst*(Qst-2.)*np.arctanh(Q)+Qst*Q)/(Qst*np.arctanh(Q)-Q)/2.
	if(gamma==3.):
		q = np.linspace(0.2,4.)
		FF = np.zeros(len(q))
		FF2 = np.zeros(len(q))
		for n,i in enumerate(q):
			Q = i/qpot_from_q(i)
			FF[n]=.5*quad(lambda t: np.sin(t)**3*(np.sin(t)**2+np.cos(t)**2./Q/Q)**(-gamma/2.),0.,np.pi)[0]/quad(lambda t: np.sin(t)*np.cos(t)**2*(np.sin(t)**2+np.cos(t)**2./Q/Q)**(-gamma/2.),0.,np.pi)[0]
			FF2[n] = binney_tremaine_virial_ratio(i)
		plt.plot(q,np.log10(FF))
		plt.plot(q,np.log10(FF2))
		plt.savefig('tmp.pdf')
		plt.clf()
	if(gamma==3.):
		return F-G,F2-G
	else:
		return F-G
Example #31
0
def extractnetstats(ID,
                    network,
                    thr,
                    conn_model,
                    est_path,
                    mask,
                    out_file=None):
    from pynets import thresholding, utils

    pruning = True

    ##Load and threshold matrix
    in_mat = np.array(np.genfromtxt(est_path))
    in_mat = thresholding.autofix(in_mat)

    ##Normalize connectivity matrix (weights between 0-1)
    in_mat = thresholding.normalize(in_mat)

    ##Get hyperbolic tangent of matrix if non-sparse (i.e. fischer r-to-z transform)
    if conn_model == 'corr':
        in_mat = np.arctanh(in_mat)
        in_mat[np.isnan(in_mat)] = 0
        in_mat[np.isinf(in_mat)] = 1

    ##Get dir_path
    dir_path = os.path.dirname(os.path.realpath(est_path))

    ##Load numpy matrix as networkx graph
    G_pre = nx.from_numpy_matrix(in_mat)

    ##Prune irrelevant nodes (i.e. nodes who are fully disconnected from the graph and/or those whose betweenness centrality are > 3 standard deviations below the mean)
    if pruning == True:
        [G_pruned, _, _] = most_important(G_pre)
    else:
        G_pruned = G_pre

    ##Make directed if sparse
    if conn_model != 'corr' and conn_model != 'cov' and conn_model != 'tangent':
        G_di = nx.DiGraph(G_pruned)
        G_dir = G_di.to_directed()
        G = G_pruned
    else:
        G = G_pruned

    ##Get corresponding matrix
    in_mat = nx.to_numpy_array(G)

    ##Print graph summary
    print('\n\nThreshold: ' + str(thr))
    print('Source File: ' + str(est_path))
    info_list = list(nx.info(G).split('\n'))[2:]
    for i in info_list:
        print(i)

    try:
        G_dir
        print('Analyzing DIRECTED graph when applicable...')
    except:
        print('Graph is UNDIRECTED')

    if conn_model == 'corr' or conn_model == 'cov' or conn_model == 'tangent':
        if nx.is_connected(G) == True:
            num_conn_comp = nx.number_connected_components(G)
            print('Graph is CONNECTED with ' + str(num_conn_comp) +
                  ' connected component(s)')
        else:
            print('Graph is DISCONNECTED')
    print('\n')

    ##Create Length matrix
    mat_len = thresholding.weight_conversion(in_mat, 'lengths')
    ##Load numpy matrix as networkx graph
    G_len = nx.from_numpy_matrix(mat_len)

    ##Save G as gephi file
    if mask:
        if network:
            nx.write_graphml(
                G, dir_path + '/' + ID + '_' + network + '_' +
                str(os.path.basename(mask).split('.')[0]) + '.graphml')
        else:
            nx.write_graphml(
                G, dir_path + '/' + ID + '_' +
                str(os.path.basename(mask).split('.')[0]) + '.graphml')
    else:
        if network:
            nx.write_graphml(G,
                             dir_path + '/' + ID + '_' + network + '.graphml')
        else:
            nx.write_graphml(G, dir_path + '/' + ID + '.graphml')

    ###############################################################
    ########### Calculate graph metrics from graph G ##############
    ###############################################################
    from networkx.algorithms import degree_assortativity_coefficient, average_clustering, average_shortest_path_length, degree_pearson_correlation_coefficient, graph_number_of_cliques, transitivity, betweenness_centrality, eigenvector_centrality, communicability_betweenness_centrality, clustering, degree_centrality
    from pynets.netstats import average_local_efficiency, global_efficiency, local_efficiency, modularity_louvain_dir, smallworldness
    ##For non-nodal scalar metrics from custom functions, add the name of the function to metric_list and add the function  (with a G-only input) to the netstats module.
    metric_list = [
        global_efficiency, average_local_efficiency, smallworldness,
        degree_assortativity_coefficient, average_clustering,
        average_shortest_path_length, degree_pearson_correlation_coefficient,
        graph_number_of_cliques, transitivity
    ]

    ##Custom Weight Parameter
    #custom_weight = 0.25
    custom_weight = None

    ##Iteratively run functions from above metric list that generate single scalar output
    num_mets = len(metric_list)
    net_met_arr = np.zeros([num_mets, 2], dtype='object')
    j = 0
    for i in metric_list:
        met_name = str(i).split('<function ')[1].split(' at')[0]
        net_met = met_name
        try:
            if i is 'average_shortest_path_length':
                try:
                    try:
                        net_met_val = float(i(G_dir))
                        print('Calculating from directed graph...')
                    except:
                        net_met_val = float(i(G))
                except:
                    ##case where G is not fully connected
                    net_met_val = float(
                        average_shortest_path_length_for_all(G))
            if custom_weight is not None and i is 'degree_assortativity_coefficient' or i is 'global_efficiency' or i is 'average_local_efficiency' or i is 'average_clustering':
                custom_weight_param = 'weight = ' + str(custom_weight)
                try:
                    net_met_val = float(i(G_dir, custom_weight_param))
                    print('Calculating from directed graph...')
                except:
                    net_met_val = float(i(G, custom_weight_param))
            else:
                try:
                    net_met_val = float(i(G_dir))
                    print('Calculating from directed graph...')
                except:
                    net_met_val = float(i(G))
        except:
            net_met_val = np.nan
        net_met_arr[j, 0] = net_met
        net_met_arr[j, 1] = net_met_val
        print(net_met)
        print(str(net_met_val))
        print('\n')
        j = j + 1
    net_met_val_list = list(net_met_arr[:, 1])

    ##Run miscellaneous functions that generate multiple outputs
    ##Calculate modularity using the Louvain algorithm
    [community_aff, modularity] = modularity_louvain_dir(in_mat)

    ##Calculate core-periphery subdivision
    [Coreness_vec, Coreness_q] = core_periphery_dir(in_mat)

    ##Local Efficiency
    try:
        try:
            le_vector = local_efficiency(G_dir)
        except:
            le_vector = local_efficiency(G)
        print('\nExtracting Local Efficiency vector for all network nodes...')
        le_vals = list(le_vector.values())
        le_nodes = list(le_vector.keys())
        num_nodes = len(le_nodes)
        le_arr = np.zeros([num_nodes + 1, 2], dtype='object')
        j = 0
        for i in range(num_nodes):
            le_arr[j, 0] = str(le_nodes[j]) + '_local_efficiency'
            #print('\n' + str(le_nodes[j]) + '_local_efficiency')
            try:
                le_arr[j, 1] = le_vals[j]
            except:
                le_arr[j, 1] = np.nan
            #print(str(le_vals[j]))
            j = j + 1
        le_arr[num_nodes, 0] = 'MEAN_local_efficiency'
        nonzero_arr_le = np.delete(le_arr[:, 1], [0])
        le_arr[num_nodes, 1] = np.mean(nonzero_arr_le)
        print('Mean Local Efficiency across nodes: ' +
              str(le_arr[num_nodes, 1]))
        print('\n')
    except:
        pass

    ##Local Clustering
    try:
        cl_vector = clustering(G)
        print('\nExtracting Local Clustering vector for all network nodes...')
        cl_vals = list(cl_vector.values())
        cl_nodes = list(cl_vector.keys())
        num_nodes = len(cl_nodes)
        cl_arr = np.zeros([num_nodes + 1, 2], dtype='object')
        j = 0
        for i in range(num_nodes):
            cl_arr[j, 0] = str(cl_nodes[j]) + '_local_clustering'
            #print('\n' + str(cl_nodes[j]) + '_local_clustering')
            try:
                cl_arr[j, 1] = cl_vals[j]
            except:
                cl_arr[j, 1] = np.nan
            #print(str(cl_vals[j]))
            j = j + 1
        cl_arr[num_nodes, 0] = 'MEAN_local_efficiency'
        nonzero_arr_cl = np.delete(cl_arr[:, 1], [0])
        cl_arr[num_nodes, 1] = np.mean(nonzero_arr_cl)
        print('Mean Local Clustering across nodes: ' +
              str(cl_arr[num_nodes, 1]))
        print('\n')
    except:
        pass

    ##Degree centrality
    try:
        try:
            dc_vector = degree_centrality(G_dir)
        except:
            dc_vector = degree_centrality(G)
        print('\nExtracting Degree Centrality vector for all network nodes...')
        dc_vals = list(dc_vector.values())
        dc_nodes = list(dc_vector.keys())
        num_nodes = len(dc_nodes)
        dc_arr = np.zeros([num_nodes + 1, 2], dtype='object')
        j = 0
        for i in range(num_nodes):
            dc_arr[j, 0] = str(dc_nodes[j]) + '_degree_centrality'
            #print('\n' + str(dc_nodes[j]) + '_degree_centrality')
            try:
                dc_arr[j, 1] = dc_vals[j]
            except:
                dc_arr[j, 1] = np.nan
            #print(str(cl_vals[j]))
            j = j + 1
        dc_arr[num_nodes, 0] = 'MEAN_degree_centrality'
        nonzero_arr_dc = np.delete(dc_arr[:, 1], [0])
        dc_arr[num_nodes, 1] = np.mean(nonzero_arr_dc)
        print('Mean Degree Centrality across nodes: ' +
              str(dc_arr[num_nodes, 1]))
        print('\n')
    except:
        pass

    ##Betweenness Centrality
    try:
        bc_vector = betweenness_centrality(G_len, normalized=True)
        print(
            '\nExtracting Betweeness Centrality vector for all network nodes...'
        )
        bc_vals = list(bc_vector.values())
        bc_nodes = list(bc_vector.keys())
        num_nodes = len(bc_nodes)
        bc_arr = np.zeros([num_nodes + 1, 2], dtype='object')
        j = 0
        for i in range(num_nodes):
            bc_arr[j, 0] = str(bc_nodes[j]) + '_betweenness_centrality'
            #print('\n' + str(bc_nodes[j]) + '_betw_cent')
            try:
                bc_arr[j, 1] = bc_vals[j]
            except:
                bc_arr[j, 1] = np.nan
            #print(str(bc_vals[j]))
            j = j + 1
        bc_arr[num_nodes, 0] = 'MEAN_betw_cent'
        nonzero_arr_betw_cent = np.delete(bc_arr[:, 1], [0])
        bc_arr[num_nodes, 1] = np.mean(nonzero_arr_betw_cent)
        print('Mean Betweenness Centrality across nodes: ' +
              str(bc_arr[num_nodes, 1]))
        print('\n')
    except:
        pass

    ##Eigenvector Centrality
    try:
        try:
            ec_vector = eigenvector_centrality(G_dir, max_iter=1000)
        except:
            ec_vector = eigenvector_centrality(G, max_iter=1000)
        print(
            '\nExtracting Eigenvector Centrality vector for all network nodes...'
        )
        ec_vals = list(ec_vector.values())
        ec_nodes = list(ec_vector.keys())
        num_nodes = len(ec_nodes)
        ec_arr = np.zeros([num_nodes + 1, 2], dtype='object')
        j = 0
        for i in range(num_nodes):
            ec_arr[j, 0] = str(ec_nodes[j]) + '_eigenvector_centrality'
            #print('\n' + str(ec_nodes[j]) + '_eig_cent')
            try:
                ec_arr[j, 1] = ec_vals[j]
            except:
                ec_arr[j, 1] = np.nan
            #print(str(ec_vals[j]))
            j = j + 1
        ec_arr[num_nodes, 0] = 'MEAN_eig_cent'
        nonzero_arr_eig_cent = np.delete(ec_arr[:, 1], [0])
        ec_arr[num_nodes, 1] = np.mean(nonzero_arr_eig_cent)
        print('Mean Eigenvector Centrality across nodes: ' +
              str(ec_arr[num_nodes, 1]))
        print('\n')
    except:
        pass

    ##Communicability Centrality
    try:
        cc_vector = communicability_betweenness_centrality(G, normalized=True)
        print(
            '\nExtracting Communicability Centrality vector for all network nodes...'
        )
        cc_vals = list(cc_vector.values())
        cc_nodes = list(cc_vector.keys())
        num_nodes = len(cc_nodes)
        cc_arr = np.zeros([num_nodes + 1, 2], dtype='object')
        j = 0
        for i in range(num_nodes):
            cc_arr[j, 0] = str(cc_nodes[j]) + '_communicability_centrality'
            #print('\n' + str(cc_nodes[j]) + '_comm_cent')
            try:
                cc_arr[j, 1] = cc_vals[j]
            except:
                cc_arr[j, 1] = np.nan
            #print(str(cc_vals[j]))
            j = j + 1
        cc_arr[num_nodes, 0] = 'MEAN_comm_cent'
        nonzero_arr_comm_cent = np.delete(cc_arr[:, 1], [0])
        cc_arr[num_nodes, 1] = np.mean(nonzero_arr_comm_cent)
        print('Mean Communicability Centrality across nodes: ' +
              str(cc_arr[num_nodes, 1]))
        print('\n')
    except:
        pass

    ##Rich club coefficient
    try:
        rc_vector = rich_club_coefficient(G, normalized=True)
        print(
            '\nExtracting Rich Club Coefficient vector for all network nodes...'
        )
        rc_vals = list(rc_vector.values())
        rc_edges = list(rc_vector.keys())
        num_edges = len(rc_edges)
        rc_arr = np.zeros([num_edges + 1, 2], dtype='object')
        j = 0
        for i in range(num_edges):
            rc_arr[j, 0] = str(rc_edges[j]) + '_rich_club'
            #print('\n' + str(rc_edges[j]) + '_rich_club')
            try:
                rc_arr[j, 1] = rc_vals[j]
            except:
                rc_arr[j, 1] = np.nan
            #print(str(rc_vals[j]))
            j = j + 1
        ##Add mean
        rc_arr[num_edges, 0] = 'MEAN_rich_club'
        nonzero_arr_rich_club = np.delete(rc_arr[:, 1], [0])
        rc_arr[num_edges, 1] = np.mean(nonzero_arr_rich_club)
        print('Mean Rich Club Coefficient across edges: ' +
              str(rc_arr[num_edges, 1]))
        print('\n')
    except:
        pass

    ##Create a list of metric names for scalar metrics
    metric_list_names = []
    net_met_val_list_final = net_met_val_list
    for i in net_met_arr[:, 0]:
        metric_list_names.append(i)

    ##Add modularity measure
    try:
        metric_list_names.append('Modularity')
        net_met_val_list_final.append(modularity)
    except:
        pass

    ##Add Core/Periphery measure
    try:
        metric_list_names.append('Coreness')
        net_met_val_list_final.append(Coreness_q)
    except:
        pass

    ##Add local efficiency measures
    try:
        for i in le_arr[:, 0]:
            metric_list_names.append(i)
        net_met_val_list_final = net_met_val_list_final + list(le_arr[:, 1])
    except:
        pass

    ##Add local clustering measures
    try:
        for i in cl_arr[:, 0]:
            metric_list_names.append(i)
        net_met_val_list_final = net_met_val_list_final + list(cl_arr[:, 1])
    except:
        pass

    ##Add centrality measures
    try:
        for i in dc_arr[:, 0]:
            metric_list_names.append(i)
        net_met_val_list_final = net_met_val_list_final + list(dc_arr[:, 1])
    except:
        pass
    try:
        for i in bc_arr[:, 0]:
            metric_list_names.append(i)
        net_met_val_list_final = net_met_val_list_final + list(bc_arr[:, 1])
    except:
        pass
    try:
        for i in ec_arr[:, 0]:
            metric_list_names.append(i)
        net_met_val_list_final = net_met_val_list_final + list(ec_arr[:, 1])
    except:
        pass
    try:
        for i in cc_arr[:, 0]:
            metric_list_names.append(i)
        net_met_val_list_final = net_met_val_list_final + list(cc_arr[:, 1])
    except:
        pass

    ##Add rich club measure
    try:
        for i in rc_arr[:, 0]:
            metric_list_names.append(i)
        net_met_val_list_final = net_met_val_list_final + list(rc_arr[:, 1])
    except:
        pass

    ##Save metric names as pickle
    try:
        import cPickle
    except ImportError:
        import _pickle as cPickle

    if mask != None:
        if network != None:
            met_list_picke_path = os.path.dirname(os.path.abspath(
                est_path)) + '/net_metric_list_' + network + '_' + str(
                    os.path.basename(mask).split('.')[0])
        else:
            met_list_picke_path = os.path.dirname(
                os.path.abspath(est_path)) + '/net_metric_list_' + str(
                    os.path.basename(mask).split('.')[0])
    else:
        if network != None:
            met_list_picke_path = os.path.dirname(
                os.path.abspath(est_path)) + '/net_metric_list_' + network
        else:
            met_list_picke_path = os.path.dirname(
                os.path.abspath(est_path)) + '/net_metric_list'
    cPickle.dump(metric_list_names, open(met_list_picke_path, 'wb'))

    ##And save results to csv
    out_path = utils.create_csv_path(ID, network, conn_model, thr, mask,
                                     dir_path)
    np.savetxt(out_path, net_met_val_list_final)

    return (out_path)
Example #32
0
def selcomps(seldict,
             mmix,
             mask,
             ref_img,
             manacc,
             n_echos,
             t2s,
             s0,
             olevel=2,
             oversion=99,
             filecsdata=True,
             savecsdiag=True,
             strict_mode=False):
    """
    Labels ICA components to keep or remove from denoised data

    The selection process uses pre-calculated parameters for each ICA component
    inputted into this function in `seldict` such as
    Kappa (a T2* weighting metric), Rho (an S0 weighting metric), and variance
    explained. Additonal selection metrics are calculated within this function
    and then used to classify each component into one of four groups.

    Parameters
    ----------
    seldict : :obj:`dict`
        As output from `fitmodels_direct`
    mmix : (C x T) array_like
        Mixing matrix for converting input data to component space, where `C`
        is components and `T` is the number of volumes in the original data
    mask : (S,) array_like
        Boolean mask array
    ref_img : :obj:`str` or img_like
        Reference image to dictate how outputs are saved to disk
    manacc : :obj:`list`
        Comma-separated list of indices of manually accepted components
    n_echos : :obj:`int`
        Number of echos in original data
    t2s : (S,) array_like
        Estimated T2* map
    s0 : (S,) array_like
        S0 map
    olevel : :obj:`int`, optional
        Default: 2
    oversion : :obj:`int`, optional
        Default: 99
    filecsdata: :obj:`bool`, optional
        Default: False
    savecsdiag: :obj:`bool`, optional
        Default: True
    strict_mode: :obj:`bool`, optional
        Default: False

    Returns
    -------
    acc : :obj:`list`
        Indices of accepted (BOLD) components in `mmix`
    rej : :obj:`list`
        Indices of rejected (non-BOLD) components in `mmix`
    midk : :obj:`list`
        Indices of mid-K (questionable) components in `mmix`
        These components are typically removed from the data during denoising
    ign : :obj:`list`
        Indices of ignored components in `mmix`
        Ignored components are considered to have too low variance to matter.
        They are not processed through the accept vs reject decision tree and
        are NOT removed during the denoising process

    Notes
    -----
    The selection algorithm used in this function is from work by prantikk
    It is from selcomps function in select_model_fft20e.py in
    version 3.2 of MEICA at:
    https://github.com/ME-ICA/me-ica/blob/b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model_fft20e.py
    Many of the early publications using and evaulating the MEICA method used a
    different selection algorithm by prantikk. The final 2.5 version of that
    algorithm in the selcomps function in select_model.py at:
    https://github.com/ME-ICA/me-ica/blob/b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py

    In both algorithms, the ICA component selection process uses multiple
    metrics that include: kappa, rho, variance explained, compent spatial
    weighting maps, noise and spatial frequency metrics, and measures of
    spatial overlap across metrics. The precise calculations may vary between
    algorithms. The most notable difference is that the v2.5 algorithm is a
    fixed decision tree where all sections were made based on whether
    combinations of metrics crossed various thresholds. In the v3.5 algorithm,
    clustering and support vector machines are also used to classify components
    based on how similar metrics in one component are similar to metrics in
    other components.
    """
    if mmix.ndim != 2:
        raise ValueError('Parameter mmix should be 2d, not {0}d'.format(
            mmix.ndim))
    elif t2s.ndim != 1:  # FIT not necessarily supported
        raise ValueError('Parameter t2s should be 1d, not {0}d'.format(
            t2s.ndim))
    elif s0.ndim != 1:  # FIT not necessarily supported
        raise ValueError('Parameter s0 should be 1d, not {0}d'.format(s0.ndim))
    elif not (t2s.shape[0] == s0.shape[0] == mask.shape[0]):
        raise ValueError('First dimensions (number of samples) of t2s ({0}), '
                         's0 ({1}), and mask ({2}) do not '
                         'match'.format(t2s.shape[0], s0.shape[0],
                                        mask.shape[0]))
    """
    handwerkerd and others are working to "hypercomment" this function to
    help everyone understand it sufficiently with the goal of eventually
    modularizing the algorithm. This is still a work-in-process with later
    sections not fully commented, some points of uncertainty are noted, and the
    summary of the full algorithm is not yet complete.

    There are sections of this code that calculate metrics that are used in
    the decision tree for the selection process and other sections that
    are part of the decision tree. Certain comments are prefaced with METRIC
    and variable names to make clear which are metrics and others are prefaced
    with SELECTION to make clear which are for applying metrics. METRICs tend
    to be summary values that contain a signal number per component.

    Note there are some variables that are calculated in one section of the code
    that are later transformed into another metric that is actually part of a
    selection criterion. This running list is an attempt to summarize
    intermediate metrics vs the metrics that are actually used in decision
    steps. For applied metrics that are made up of intermediate metrics defined
    in earlier sections of the code, the constituent metrics are noted. More
    metrics will be added to the applied metrics section as the commenting of
    this function continues.

    Intermediate Metrics:  seldict['F_S0_clmaps'] seldict['F_R2_clmaps']
        seldict['Br_clmaps_S0'] seldict['Br_clmaps_R2'] seldict['Z_maps']
        dice_tbl countnoise
        counts_FR2_Z tt_table mmix_kurt mmix_std
        spr fproj_arr_val fdist
        Rtz, Dz

    Applied Metrics:
        seldict['Rhos']
        seldict['Kappas']
        seldict['varex']
        countsigFS0
        countsigFR2
        fz (a combination of multiple z-scored metrics: tt_table,
            seldict['varex'], seldict['Kappa'], seldict['Rho'], countnoise,
            mmix_kurt, fdist)
        tt_table[:,0]
        spz (z score of spr)
        KRcut
    """
    """
    If seldict exists, save it into a pickle file called compseldata.pklbz
    that can be loaded directly into python for future analyses
    If seldict=None, load it from the pre-saved pickle file to use for the
    rest of this function
    """
    if filecsdata:
        import bz2
        if seldict is not None:
            LGR.info('Saving component selection data')
            with bz2.BZ2File('compseldata.pklbz', 'wb') as csstate_f:
                pickle.dump(seldict, csstate_f)
        else:
            try:
                with bz2.BZ2File('compseldata.pklbz', 'rb') as csstate_f:
                    seldict = pickle.load(csstate_f)
            except FileNotFoundError:
                LGR.warning('Failed to load component selection data')
                return None
    """
    List of components
    all_comps and acc_comps start out as an ordered list of the component numbers
    all_comps is constant throughout the function.
    acc_comps changes through his function as components are assigned to other
    categories (i.e. components that are classified as rejected are removed
    from acc_comps)
    """
    midk = []
    ign = []
    all_comps = np.arange(len(seldict['Kappas']))
    acc_comps = np.arange(len(seldict['Kappas']))
    """
    If user has specified components to accept manually, just assign those
    components to the accepted and rejected comp lists and end the function
    """
    if manacc:
        acc = sorted([int(vv) for vv in manacc.split(',')])
        midk = []
        rej = sorted(np.setdiff1d(all_comps, acc))
        ign = []
        return acc, rej, midk, ign  # Add string for ign
    """
    METRICS: countsigFS0 countsigFR2
    F_S0_clmaps & F_R2_clmaps are the thresholded & binarized clustered maps of
    significant fits for the separate S0 and R2 cross-echo models per component.
    Since the values are 0 or 1, the countsig variables are a count of the
    significant voxels per component.
    The cluster size is a function of the # of voxels in the mask.
    The cluster threshold is based on the # of echos acquired
    """
    countsigFS0 = seldict['F_S0_clmaps'].sum(0)
    countsigFR2 = seldict['F_R2_clmaps'].sum(0)
    countnoise = np.zeros(len(all_comps))
    """
    Make table of dice values
    METRICS: dice_tbl
    dice_FR2, dice_FS0 are calculated for each component and the concatenated
    values are in dice_tbl
    Br_clmaps_R2 and Br_clmaps_S0 are binarized clustered Z_maps.
    The volume being clustered is the rank order indices of the absolute value
    of the beta values for the fit between the optimally combined time series
    and the mixing matrix (i.e. the lowest beta value is 1 and the highest is
    the # of voxels).
    The cluster size is a function of the # of voxels in the mask.
    The cluster threshold are the voxels with beta ranks greater than
    countsigFS0 or countsigFR2 (i.e. roughly the same number of voxels will be
    in the countsig clusters as the ICA beta map clusters)
    These dice values are the Dice-Sorenson index for the Br_clmap_?? and the
    F_??_clmap.
    If handwerkerd understands this correctly, if the voxels with the above
    threshold F stats are clustered in the same voxels with the highest beta
    values, then the dice coefficient will be 1. If the thresholded F or betas
    aren't spatially clustered (i.e. the component map is less spatially smooth)
    or the clusters are in different locations (i.e. voxels with high betas
    are also noiser so they have lower F values), then the dice coefficients
    will be lower
    """
    dice_tbl = np.zeros([all_comps.shape[0], 2])
    for comp_num in all_comps:
        dice_FR2 = utils.dice(
            utils.unmask(seldict['Br_clmaps_R2'][:, comp_num], mask)[t2s != 0],
            seldict['F_R2_clmaps'][:, comp_num])
        dice_FS0 = utils.dice(
            utils.unmask(seldict['Br_clmaps_S0'][:, comp_num], mask)[t2s != 0],
            seldict['F_S0_clmaps'][:, comp_num])
        dice_tbl[comp_num, :] = [dice_FR2, dice_FS0]  # step 3a here and above
    dice_tbl[np.isnan(dice_tbl)] = 0
    """
    Make table of noise gain
    METRICS: countnoise, counts_FR2_Z, tt_table
    (This is a bit confusing & is handwerkerd's attempt at making sense of this)
    seldict['Z_maps'] is the Fisher Z normalized beta fits for the optimally
    combined time series and the mixing matrix. Z_clmaps is a binarized cluster
    of Z_maps with the cluster size based on the # of voxels and the cluster
    threshold of 1.95. utils.andb is a sum of the True values in arrays so
    comp_noise_sel is true for voxels where the Z values are greater than 1.95
    but not part of a cluster of Z values that are greater than 1.95.
    Spatially unclustered voxels with high Z values could be considerd noisy.
    countnoise is the # of voxels per component where comp_noise_sel is true.

    counts_FR2_Z is the number of voxels with Z values above the threshold
    that are in clusters (signal) and the number outside of clusters (noise)

    tt_table is a bit confusing. For each component, the first index is
    some type of normalized, log10, signal/noise t statistic and the second is
    the p value for the signal/noise t statistic (for the R2 model).
    In general, these should be bigger t or have lower p values when most of
    the Z values above threshold are inside clusters.
    Because of the log10, values below 1 are negative, which is later used as
    a threshold. It doesn't seem like the p values are ever used.
    """
    tt_table = np.zeros([len(all_comps), 4])
    counts_FR2_Z = np.zeros([len(all_comps), 2])
    for comp_num in all_comps:
        comp_noise_sel = utils.andb([
            np.abs(seldict['Z_maps'][:, comp_num]) > 1.95,
            seldict['Z_clmaps'][:, comp_num] == 0
        ]) == 2
        countnoise[comp_num] = np.array(comp_noise_sel, dtype=np.int).sum()
        noise_FR2_Z_mask = utils.unmask(comp_noise_sel, mask)[t2s != 0]
        noise_FR2_Z = np.log10(
            np.unique(seldict['F_R2_maps'][noise_FR2_Z_mask, comp_num]))
        signal_FR2_Z_mask = utils.unmask(seldict['Z_clmaps'][:, comp_num],
                                         mask)[t2s != 0] == 1
        signal_FR2_Z = np.log10(
            np.unique(seldict['F_R2_maps'][signal_FR2_Z_mask, comp_num]))
        counts_FR2_Z[comp_num, :] = [len(signal_FR2_Z), len(noise_FR2_Z)]
        ttest = stats.ttest_ind(signal_FR2_Z, noise_FR2_Z, equal_var=True)
        # avoid DivideByZero RuntimeWarning
        if signal_FR2_Z.size > 0 and noise_FR2_Z.size > 0:
            mwu = stats.norm.ppf(
                stats.mannwhitneyu(signal_FR2_Z, noise_FR2_Z)[1])
        else:
            mwu = -np.inf
        tt_table[comp_num, 0] = np.abs(mwu) * ttest[0] / np.abs(ttest[0])
        tt_table[comp_num, 1] = ttest[1]
    tt_table[np.isnan(tt_table)] = 0
    tt_table[np.isinf(tt_table[:, 0]),
             0] = np.percentile(tt_table[~np.isinf(tt_table[:, 0]), 0], 98)
    """
    Time series derivative kurtosis
    METRICS: mmix_kurt and mmix_std
    Take the derivative of the time series for each component in the ICA
    mixing matrix and calculate the kurtosis & standard deviation.
    handwerkerd thinks these metrics are later used to calculate measures
    of time series spikiness and drift in the component time series.
    """
    mmix_dt = (mmix[:-1, :] - mmix[1:, :])
    mmix_kurt = stats.kurtosis(mmix_dt)
    mmix_std = np.std(mmix_dt, axis=0)
    """
    SELECTION #1 (prantikk labeled "Step 1")
    Reject anything that is obviously an artifact
    Obvious artifacts are components with Rho>Kappa or with more clustered,
    significant voxels for the S0 model than the R2 model
    """
    LGR.debug('Rejecting gross artifacts based on Rho/Kappa values and S0/R2 '
              'counts')
    rej = acc_comps[utils.andb(
        [seldict['Rhos'] > seldict['Kappas'], countsigFS0 > countsigFR2]) > 0]
    acc_comps = np.setdiff1d(acc_comps, rej)
    """
    prantikk labeled "Step 2"
    Compute 3-D spatial FFT of Beta maps to detect high-spatial
    frequency artifacts

    METRIC spr, fproj_arr_val, fdist
    PSC is the mean centered beta map for each ICA component
    The FFT is sequentially calculated across each dimension of PSC & the max
    value is removed (probably the 0Hz bin). The maximum remaining frequency
    magnitude along the z dimenions is calculated leaving a 2D matrix.
    spr contains a count of the number of frequency bins in the 2D matrix where
    the frequency magnitude is greater than 4* the maximum freq in the matrix.
    spr is later z-normed across components into spz and this is actually used
    as a selection metric.
    handwerkerd interpretation: spr is bigger the more values of the fft are
    >1/4 the max. Thus, if you assume the highest mag bins are low frequency, &
    all components have roughly the same low freq power (i.e. a brain-shaped
    blob), then spr will be bigger the more high frequency bins have magnitudes
    that are more than 1/4 of the low frequency bins.

    fproj_arr_val is a flattened 1D vector of the 2D max projection fft
    of each component. This seems to be later used in an SVM to train on
    this value for rejected components to classify some remaining n_components
    as midk
    Note: fproj_arr is created here and is a ranked list of FFT values, but is
    not used anywhere in the code. Was fproj_arr_val supposed to contain this
    ranking?

    fdist isn't completely clear to handwerkerd yet but it looks like the fit of
    the fft of the spatial map to a Gaussian distribution. If so, then the
    worse the fit, the more high frequency power would be in the component
    """
    LGR.debug(
        'Computing 3D spatial FFT of beta maps to detect high-spatial frequency artifacts'
    )
    # spatial information is important so for NIFTI we convert back to 3D space
    dim1 = np.prod(check_niimg(ref_img).shape[:2])
    fproj_arr = np.zeros([dim1, len(all_comps)])
    fproj_arr_val = np.zeros([dim1, len(all_comps)])
    spr = []
    fdist = []
    for comp_num in all_comps:
        # convert data back to 3D array
        tproj = utils.new_nii_like(
            ref_img,
            utils.unmask(seldict['PSC'], mask)[:, comp_num]).get_data()
        fproj = np.fft.fftshift(np.abs(np.fft.rfftn(tproj)))
        fproj_z = fproj.max(axis=-1)
        fproj[fproj == fproj.max()] = 0
        spr.append(np.array(fproj_z > fproj_z.max() / 4, dtype=np.int).sum())
        fproj_arr[:, comp_num] = stats.rankdata(fproj_z.flatten())
        fproj_arr_val[:, comp_num] = fproj_z.flatten()
        fprojr = np.array([fproj, fproj[:, :, ::-1]]).max(0)
        fdist.append(
            np.max([
                utils.fitgaussian(fproj.max(jj))[3:].max()
                for jj in range(fprojr.ndim)
            ]))
    if type(fdist) is not np.ndarray:
        fdist = np.array(fdist)
    spr = np.array(spr)
    # import ipdb; ipdb.set_trace()
    """
    prantikk labelled Step 3
    Create feature space of component properties
    METRIC fz, spz, Rtz, Dz

    fz is matrix of multiple other metrics described above and calculated
    in this section. Most are all of these have one number per component and
    they are z-scored across components
    Attempted explanations in order:
    Tz: The z-scored t statistics of the spatial noisiness metric in tt_table
    Vz: The z-scored the natural log of the non-normalized variance explained
        of each component
    Ktz: The z-scored natural log of the Kappa values
    (the '/ 2' does not seem necessary beacuse it will be removed by z-scoring)
    KRr: The z-scored ratio of the natural log of Kappa / nat log of Rho
    (unclear why sometimes using stats.zcore and other times writing the eq out)
    cnz: The z-scored measure of a noisy voxel count where the noisy voxels are
         the voxels with large beta estimates, but aren't part of clusters
    Rz: z-scored rho values (why aren't this log scaled, like kappa in Ktz?)
    mmix_kurt: Probably a rough measure of the spikiness of each component's
        time series in the ICA mixing matrix
    fdist_z: z-score of fdist, which is probably a measure of high freq info
        in the spatial FFT of the components (with lower being more high freq?)

    NOT in fz:
    spz: Z-scored measure probably of how much high freq is in the data. Larger
        values mean more bins of the FFT have over 1/4 the power of the maximum
        bin (read about spr above for more info)
    Rtz: Z-scored natural log of the Rho values
    Dz: Z-scored Fisher Z transformed dice values of the overlap between
        clusters for the F stats and clusters of the ICA spatial beta maps with
        roughly the same number of voxels as in the clustered F maps.
        Dz saves this for the R2 model, there are also Dice coefs for the S0
        model in dice_tbl
    """
    LGR.debug('Creating feature space of component properties')
    fdist_pre = fdist.copy()
    fdist_pre[fdist > np.median(fdist) * 3] = np.median(fdist) * 3
    fdist_z = (fdist_pre - np.median(fdist_pre)) / fdist_pre.std()  # not z
    spz = stats.zscore(spr)
    Tz = stats.zscore(tt_table[:, 0])
    varex_log = np.log(seldict['varex'])
    Vz = stats.zscore(varex_log)
    Rz = stats.zscore(seldict['Rhos'])
    Ktz = stats.zscore(np.log(seldict['Kappas']) / 2)
    #  Rtz = stats.zscore(np.log(seldict['Rhos']) / 2)
    KRr = stats.zscore(np.log(seldict['Kappas']) / np.log(seldict['Rhos']))
    cnz = stats.zscore(countnoise)
    Dz = stats.zscore(np.arctanh(dice_tbl[:, 0] + 0.001))
    fz = np.array([Tz, Vz, Ktz, KRr, cnz, Rz, mmix_kurt, fdist_z])
    """
    METRICS Kcut, Rcut, KRcut, KRcutguesses, Khighelbowval
    Step 3: Make initial guess of where BOLD components are and use DBSCAN
    to exclude noise components and find a sample set of 'good' components
    """
    LGR.debug('Making initial guess of BOLD components')
    # The F threshold for the echo fit (based on the # of echos) for p<0.05
    #    p<0.025, and p<0.001 (Confirm this is accurate since the function
    #    contains a lookup table rather than a calculation)
    F05, F025, F01 = utils.getfbounds(n_echos)
    # epsmap is [index,level of overlap with dicemask,
    # number of high Rho components]
    epsmap = []
    Rhos_sorted = np.array(sorted(seldict['Rhos']))[::-1]
    """
    Make an initial guess as to number of good components based on
     consensus of control points across Rhos and Kappas
    For terminology later, typically getelbow _aggr > _mod > _cons
      though this might not be universally true. A more "inclusive" threshold
      has a lower kappa since that means more components are above that thresh
      and are likely to be accepted. For Rho, a more "inclusive" threshold is
      higher since that means fewer components will be rejected based on rho.
    KRcut seems weird to handwerkerd. I see that the thresholds are slightly
     shifted for kappa & rho later in the code, but why would we ever want to
     set a common threhsold reference point for both? These are two different
     elbows on two different data sets.
    """
    KRcutguesses = [
        getelbow_mod(seldict['Rhos']),
        getelbow_cons(seldict['Rhos']),
        getelbow_aggr(seldict['Rhos']),
        getelbow_mod(seldict['Kappas']),
        getelbow_cons(seldict['Kappas']),
        getelbow_aggr(seldict['Kappas'])
    ]
    KRcut = np.median(KRcutguesses)
    """
    Also a bit weird to handwerkerd. This is the 75th percentile of Kappa F
    stats of the components with the 3 elbow selection criteria and the
    F states for 3 significance thresholds based on the # of echos.
    This is some type of way to get a significance criterion for a component
    fit, but it's include why this specific criterion is useful.
    """
    Khighelbowval = stats.scoreatpercentile([
        getelbow_mod(seldict['Kappas'], return_val=True),
        getelbow_cons(seldict['Kappas'], return_val=True),
        getelbow_aggr(seldict['Kappas'], return_val=True)
    ] + list(utils.getfbounds(n_echos)),
                                            75,
                                            interpolation_method='lower')
    """
    Default to the most inclusive kappa threshold (_cons) unless:
    1. That threshold is more than twice the median of Kappa & Rho thresholds
    2. and the moderate elbow is more inclusive than a p=0.01
    handwerkerd: This actually seems like a way to avoid using the theoretically
       most liberal threshold only when there was a bad estimate and _mod is
       is more inclusive. My one concern is that it's an odd way to test that
       the _mod elbow is any better. Why not at least see if _mod < _cons?
    prantikk's orig comment for this section is:
      "only use exclusive when inclusive is extremely inclusive - double KRcut"
    """
    cond1 = getelbow_cons(seldict['Kappas']) > KRcut * 2
    cond2 = getelbow_mod(seldict['Kappas'], return_val=True) < F01
    if cond1 and cond2:
        Kcut = getelbow_mod(seldict['Kappas'], return_val=True)
    else:
        Kcut = getelbow_cons(seldict['Kappas'], return_val=True)
    """
    handwerkerd: The goal seems to be to maximize the rejected components
       based on the rho cut by defaulting to a lower Rcut value. Again, if
       that is the goal, why not just test if _mod < _cons?
    prantikk's orig comment for this section is:
        only use inclusive when exclusive is extremely exclusive - half KRcut
        (remember for Rho inclusive is higher, so want both Kappa and Rho
        to defaut to lower)
    """
    if getelbow_cons(seldict['Rhos']) > KRcut * 2:
        Rcut = getelbow_mod(seldict['Rhos'], return_val=True)
    # for above, consider something like:
    # min([getelbow_mod(Rhos,True),sorted(Rhos)[::-1][KRguess] ])
    else:
        Rcut = getelbow_cons(seldict['Rhos'], return_val=True)

    # Rcut should never be higher than Kcut (handwerkerd: not sure why)
    if Rcut > Kcut:
        Kcut = Rcut

    # KRelbow has a 2 for components that are above the Kappa accept threshold
    # and below the rho reject threshold
    KRelbow = utils.andb([seldict['Kappas'] > Kcut, seldict['Rhos'] < Rcut])
    """
    Make guess of Kundu et al 2011 plus remove high frequencies,
    generally high variance, and high variance given low Kappa
    the first index of tt_table is a t static of a what handwerkerd thinks
      is a spatial noise metric. Since log10 of these values are taken the >0
      threshold means the metric is >1. tt_lim seems to be a fairly aggressive
      percentile that is then divided by 3.
    """
    tt_lim = stats.scoreatpercentile(
        tt_table[tt_table[:, 0] > 0, 0], 75, interpolation_method='lower') / 3
    """
    KRguess is a list of components to potentially accept. it starts with a
      list of components that cross the Kcut and Rcut threshold and weren't
      previously rejected for other reasons. From that list, it removes more
      components based on several additional criteria:
      1. tt_table less than the tt_lim threshold (spatial noisiness metric)
      2. spz (a z-scored probably high spatial freq metric) >1
      3. Vz (a z-scored variance explained metric) >2
      4. If both (seems to be if a component has a relatively high variance
          the acceptance threshold for Kappa values is doubled):
         A. The variance explained is greater than half the KRcut highest
             variance component
        B. Kappa is less than twice Kcut
    """
    temp = all_comps[utils.andb([
        seldict['varex'] > 0.5 * sorted(seldict['varex'])[::-1][int(KRcut)],
        seldict['Kappas'] < 2 * Kcut
    ]) == 2]
    KRguess = np.setdiff1d(
        np.setdiff1d(all_comps[KRelbow == 2], rej),
        np.union1d(
            all_comps[tt_table[:, 0] < tt_lim],
            np.union1d(np.union1d(all_comps[spz > 1], all_comps[Vz > 2]),
                       temp)))
    guessmask = np.zeros(len(all_comps))
    guessmask[KRguess] = 1
    """
    Throw lower-risk bad components out based on 3 criteria all being true:
      1. tt_table (a spatial noisiness metric) <0
      2. A components variance explains is greater than the median variance
         explained
      3. The component index is greater than the KRcut index. Since the
          components are sorted by kappa, this is another kappa thresholding)
    """
    rejB = acc_comps[utils.andb([
        tt_table[acc_comps, 0] < 0, seldict['varex'][acc_comps] > np.median(
            seldict['varex']), acc_comps > KRcut
    ]) == 3]
    rej = np.union1d(rej, rejB)
    # adjust acc_comps again to only contain the remaining non-rejected components
    acc_comps = np.setdiff1d(acc_comps, rej)
    """
    This is where handwerkerd has paused in hypercommenting the function.
    """
    LGR.debug('Using DBSCAN to find optimal set of "good" BOLD components')
    for ii in range(20000):
        eps = .005 + ii * .005
        db = DBSCAN(eps=eps, min_samples=3).fit(fz.T)

        # it would be great to have descriptive names, here
        # DBSCAN found at least three non-noisy clusters
        cond1 = db.labels_.max() > 1
        # DBSCAN didn't detect more classes than the total # of components / 6
        cond2 = db.labels_.max() < len(all_comps) / 6
        # TODO: confirm if 0 is a special label for DBSCAN
        # my intuition here is that we're confirming DBSCAN labelled previously
        # rejected components as noise (i.e., no overlap between `rej` and
        # labelled DBSCAN components)
        cond3 = np.intersect1d(rej, all_comps[db.labels_ == 0]).shape[0] == 0
        # DBSCAN labelled less than half of the total components as noisy
        cond4 = np.array(db.labels_ == -1, dtype=int).sum() / float(
            len(all_comps)) < .5

        if cond1 and cond2 and cond3 and cond4:
            epsmap.append([
                ii,
                utils.dice(guessmask, db.labels_ == 0),
                np.intersect1d(
                    all_comps[db.labels_ == 0],
                    all_comps[seldict['Rhos'] > getelbow_mod(
                        Rhos_sorted, return_val=True)]).shape[0]
            ])
        db = None

    epsmap = np.array(epsmap)
    LGR.debug('Found DBSCAN solutions for {}/20000 eps resolutions'.format(
        len(epsmap)))
    group0 = []
    dbscanfailed = False
    if len(epsmap) != 0:
        # Select index that maximizes Dice with guessmask but first
        # minimizes number of higher Rho components
        ii = int(
            epsmap[np.argmax(epsmap[epsmap[:, 2] == np.min(epsmap[:, 2]),
                                    1], 0), 0])
        LGR.debug('Component selection tuning: {:.05f}'.format(
            epsmap[:, 1].max()))
        db = DBSCAN(eps=.005 + ii * .005, min_samples=3).fit(fz.T)
        acc_comps = all_comps[db.labels_ == 0]
        acc_comps = np.setdiff1d(acc_comps, rej)
        acc_comps = np.setdiff1d(
            acc_comps, acc_comps[acc_comps > len(all_comps) - len(rej)])
        group0 = acc_comps.copy()
        group_n1 = all_comps[db.labels_ == -1]
        to_clf = np.setdiff1d(all_comps, np.union1d(acc_comps, rej))

    if len(group0) == 0 or len(group0) < len(KRguess) * .5:
        dbscanfailed = True
        LGR.debug('DBSCAN guess failed; using elbow guess method instead')
        temp = all_comps[utils.andb([
            seldict['varex'] > 0.5 *
            sorted(seldict['varex'])[::-1][int(KRcut)], seldict['Kappas'] < 2 *
            Kcut
        ]) == 2]
        acc_comps = np.setdiff1d(
            np.setdiff1d(all_comps[KRelbow == 2], rej),
            np.union1d(
                all_comps[tt_table[:, 0] < tt_lim],
                np.union1d(np.union1d(all_comps[spz > 1], all_comps[Vz > 2]),
                           temp)))
        group0 = acc_comps.copy()
        group_n1 = []
        to_clf = np.setdiff1d(all_comps, np.union1d(group0, rej))

    if len(group0) < 2 or (len(group0) < 4
                           and float(len(rej)) / len(group0) > 3):
        LGR.warning('Extremely limited reliable BOLD signal space! '
                    'Not filtering components beyond BOLD/non-BOLD guesses.')
        midkfailed = True
        min_acc = np.array([])
        if len(group0) != 0:
            # For extremes, building in a 20% tolerance
            toacc_hi = np.setdiff1d(
                all_comps[utils.andb([
                    fdist <= np.max(fdist[group0]), seldict['Rhos'] < F025,
                    Vz > -2
                ]) == 3], np.union1d(group0, rej))
            min_acc = np.union1d(group0, toacc_hi)
            to_clf = np.setdiff1d(all_comps, np.union1d(min_acc, rej))
        else:
            toacc_hi = []
            min_acc = []
        diagstep_keys = [
            'Rejected components', 'Kappa-Rho cut point', 'Kappa cut point',
            'Rho cut point', 'DBSCAN failed to converge',
            'Mid-Kappa failed (limited BOLD signal)', 'Kappa-Rho guess',
            'min_acc', 'toacc_hi'
        ]
        diagstep_vals = [
            list(rej), KRcut, Kcut, Rcut, dbscanfailed, midkfailed,
            list(KRguess),
            list(min_acc),
            list(toacc_hi)
        ]
        with open('csstepdata.json', 'w') as ofh:
            json.dump(dict(zip(diagstep_keys, diagstep_vals)),
                      ofh,
                      indent=4,
                      sort_keys=True,
                      default=str)
        return list(sorted(min_acc)), list(sorted(rej)), [], list(
            sorted(to_clf))

    # Find additional components to reject based on Dice - doing this here
    # since Dice is a little unstable, need to reference group0
    rej_supp = []
    dice_rej = False
    if not dbscanfailed and len(rej) + len(group0) < 0.75 * len(all_comps):
        dice_rej = True
        temp = all_comps[dice_tbl[all_comps, 0] <= dice_tbl[all_comps, 1]]
        rej_supp = np.setdiff1d(np.setdiff1d(np.union1d(rej, temp), group0),
                                group_n1)
        rej = np.union1d(rej, rej_supp)

    # Temporal features
    # larger is worse - spike
    mmix_kurt_z = (mmix_kurt -
                   mmix_kurt[group0].mean()) / mmix_kurt[group0].std()
    # smaller is worse - drift
    mmix_std_z = -1 * (
        (mmix_std - mmix_std[group0].mean()) / mmix_std[group0].std())
    mmix_kurt_z_max = np.max([mmix_kurt_z, mmix_std_z], 0)
    """
    Step 2: Classifiy midk and ignore using separate SVMs for
    different variance regimes
    # To render hyperplane:
    min_x = np.min(spz2);max_x=np.max(spz2)
    # plotting separating hyperplane
        ww = clf_.coef_[0]
        aa = -ww[0] / ww[1]
        # make sure the next line is long enough
        xx = np.linspace(min_x - 2, max_x + 2)
        yy = aa * xx - (clf_.intercept_[0]) / ww[1]
        plt.plot(xx, yy, '-')
    """
    LGR.debug('Attempting to classify midk components')
    # Tried getting rid of accepting based on SVM altogether,
    # now using only rejecting
    toacc_hi = np.setdiff1d(
        all_comps[utils.andb([
            fdist <= np.max(fdist[group0]), seldict['Rhos'] < F025, Vz > -2
        ]) == 3], np.union1d(group0, rej))
    temp = utils.andb([
        spz < 1, Rz < 0, mmix_kurt_z_max < 5, Dz > -1, Tz > -1, Vz < 0,
        seldict['Kappas'] >= F025,
        fdist < 3 * np.percentile(fdist[group0], 98)
    ]) == 8
    toacc_lo = np.intersect1d(to_clf, all_comps[temp])
    midk_clf, clf_ = do_svm(fproj_arr_val[:, np.union1d(group0, rej)].T,
                            [0] * len(group0) + [1] * len(rej),
                            fproj_arr_val[:, to_clf].T,
                            svmtype=2)
    midk = np.setdiff1d(
        to_clf[utils.andb([
            midk_clf == 1,
            seldict['varex'][to_clf] > np.median(seldict['varex'][group0])
        ]) == 2], np.union1d(toacc_hi, toacc_lo))

    # only use SVM to augment toacc_hi only if toacc_hi isn't already
    # conflicting with SVM choice
    if len(
            np.intersect1d(
                to_clf[utils.andb([midk_clf == 1, Vz[to_clf] > 0]) == 2],
                toacc_hi)) == 0:
        svm_acc_fail = True
        toacc_hi = np.union1d(toacc_hi, to_clf[midk_clf == 0])
    else:
        svm_acc_fail = False
    """
    Step 3: Compute variance associated with low T2* areas
    (e.g. draining veins and low T2* areas)
    # To write out veinmask
    veinout = np.zeros(t2s.shape)
    veinout[t2s!=0] = veinmaskf
    utils.filewrite(veinout, 'veinmaskf', ref_img)
    veinBout = utils.unmask(veinmaskB, mask)
    utils.filewrite(veinBout, 'veins50', ref_img)
    """
    LGR.debug('Computing variance associated with low T2* areas (e.g., '
              'draining veins)')
    tsoc_B_Zcl = np.zeros(seldict['tsoc_B'].shape)
    tsoc_B_Zcl[seldict['Z_clmaps'] != 0] = np.abs(
        seldict['tsoc_B'])[seldict['Z_clmaps'] != 0]
    sig_B = [
        stats.scoreatpercentile(tsoc_B_Zcl[tsoc_B_Zcl[:, ii] != 0, ii], 25)
        if len(tsoc_B_Zcl[tsoc_B_Zcl[:, ii] != 0, ii]) != 0 else 0
        for ii in all_comps
    ]
    sig_B = np.abs(seldict['tsoc_B']) > np.tile(
        sig_B, [seldict['tsoc_B'].shape[0], 1])

    veinmask = utils.andb([
        t2s < stats.scoreatpercentile(
            t2s[t2s != 0], 15, interpolation_method='lower'), t2s != 0
    ]) == 2
    veinmaskf = veinmask[mask]
    veinR = np.array(sig_B[veinmaskf].sum(0),
                     dtype=float) / sig_B[~veinmaskf].sum(0)
    veinR[np.isnan(veinR)] = 0

    veinc = np.union1d(rej, midk)
    rej_veinRZ = ((veinR - veinR[veinc].mean()) / veinR[veinc].std())[veinc]
    rej_veinRZ[rej_veinRZ < 0] = 0
    rej_veinRZ[countsigFR2[veinc] > np.array(veinmaskf, dtype=int).sum()] = 0
    t2s_lim = [
        stats.scoreatpercentile(t2s[t2s != 0],
                                50,
                                interpolation_method='lower'),
        stats.scoreatpercentile(
            t2s[t2s != 0], 80, interpolation_method='lower') / 2
    ]
    phys_var_zs = []
    for t2sl_i in range(len(t2s_lim)):
        t2sl = t2s_lim[t2sl_i]
        veinW = sig_B[:, veinc] * np.tile(rej_veinRZ, [sig_B.shape[0], 1])
        veincand = utils.unmask(
            utils.andb([
                s0[t2s != 0] < np.median(s0[t2s != 0]), t2s[t2s != 0] < t2sl
            ]) >= 1, t2s != 0)[mask]
        veinW[~veincand] = 0
        invein = veinW.sum(
            axis=1)[(utils.unmask(veinmaskf, mask) *
                     utils.unmask(veinW.sum(axis=1) > 1, mask))[mask]]
        minW = 10 * (np.log10(invein).mean()) - 1 * 10**(
            np.log10(invein).std())
        veinmaskB = veinW.sum(axis=1) > minW
        tsoc_Bp = seldict['tsoc_B'].copy()
        tsoc_Bp[tsoc_Bp < 0] = 0
        vvex = np.array([
            (tsoc_Bp[veinmaskB, ii]**2.).sum() / (tsoc_Bp[:, ii]**2.).sum()
            for ii in all_comps
        ])
        group0_res = np.intersect1d(KRguess, group0)
        phys_var_zs.append(
            (vvex - vvex[group0_res].mean()) / vvex[group0_res].std())
        veinBout = utils.unmask(veinmaskB, mask)
        utils.filewrite(veinBout.astype(float), 'veins_l%i' % t2sl_i, ref_img)

    # Mask to sample veins
    phys_var_z = np.array(phys_var_zs).max(0)
    Vz2 = (varex_log - varex_log[group0].mean()) / varex_log[group0].std()
    """
    Step 4: Learn joint TE-dependence spatial and temporal models to move
    remaining artifacts to ignore class
    """
    LGR.debug(
        'Learning joint TE-dependence spatial/temporal models to ignore remaining artifacts'
    )

    to_ign = []

    minK_ign = np.max([F05, getelbow_cons(seldict['Kappas'], return_val=True)])
    newcest = len(group0) + len(
        toacc_hi[seldict['Kappas'][toacc_hi] > minK_ign])
    phys_art = np.setdiff1d(
        all_comps[utils.andb([phys_var_z > 3.5, seldict['Kappas'] < minK_ign])
                  == 2], group0)
    rank_diff = stats.rankdata(phys_var_z) - stats.rankdata(seldict['Kappas'])
    phys_art = np.union1d(
        np.setdiff1d(
            all_comps[utils.andb(
                [phys_var_z > 2, rank_diff > newcest / 2, Vz2 > -1]) == 3],
            group0), phys_art)
    # Want to replace field_art with an acf/SVM based approach
    # instead of a kurtosis/filter one
    field_art = np.setdiff1d(
        all_comps[utils.andb(
            [mmix_kurt_z_max > 5, seldict['Kappas'] < minK_ign]) == 2], group0)
    temp = (stats.rankdata(mmix_kurt_z_max) -
            stats.rankdata(seldict['Kappas'])) > newcest / 2
    field_art = np.union1d(
        np.setdiff1d(
            all_comps[utils.andb([
                mmix_kurt_z_max > 2, temp, Vz2 > 1, seldict['Kappas'] < F01
            ]) == 4], group0), field_art)
    temp = seldict['Rhos'] > np.percentile(seldict['Rhos'][group0], 75)
    field_art = np.union1d(
        np.setdiff1d(
            all_comps[utils.andb([mmix_kurt_z_max > 3, Vz2 > 3, temp]) == 3],
            group0), field_art)
    field_art = np.union1d(
        np.setdiff1d(
            all_comps[utils.andb([mmix_kurt_z_max > 5, Vz2 > 5]) == 2],
            group0), field_art)
    misc_art = np.setdiff1d(
        all_comps[utils.andb([(stats.rankdata(Vz) -
                               stats.rankdata(Ktz)) > newcest /
                              2, seldict['Kappas'] < Khighelbowval]) == 2],
        group0)
    ign_cand = np.unique(list(field_art) + list(phys_art) + list(misc_art))
    midkrej = np.union1d(midk, rej)
    to_ign = np.setdiff1d(list(ign_cand), midkrej)
    toacc = np.union1d(toacc_hi, toacc_lo)
    acc_comps = np.setdiff1d(np.union1d(acc_comps, toacc),
                             np.union1d(to_ign, midkrej))
    ign = np.setdiff1d(all_comps, list(acc_comps) + list(midk) + list(rej))
    orphan = np.setdiff1d(
        all_comps,
        list(acc_comps) + list(to_ign) + list(midk) + list(rej))

    # Last ditch effort to save some transient components
    if not strict_mode:
        Vz3 = (varex_log -
               varex_log[acc_comps].mean()) / varex_log[acc_comps].std()
        temp = utils.andb([
            seldict['Kappas'] > F05, seldict['Rhos'] < F025,
            seldict['Kappas'] > seldict['Rhos'], Vz3 <= -1, Vz3 > -3,
            mmix_kurt_z_max < 2.5
        ])
        acc_comps = np.union1d(acc_comps,
                               np.intersect1d(orphan, all_comps[temp == 6]))
        ign = np.setdiff1d(all_comps, list(acc_comps) + list(midk) + list(rej))
        orphan = np.setdiff1d(
            all_comps,
            list(acc_comps) + list(to_ign) + list(midk) + list(rej))

    if savecsdiag:
        diagstep_keys = [
            'Rejected components', 'Kappa-Rho cut point', 'Kappa cut',
            'Rho cut', 'DBSCAN failed to converge', 'Kappa-Rho guess',
            'Dice rejected', 'rej_supp', 'to_clf', 'Mid-kappa components',
            'svm_acc_fail', 'toacc_hi', 'toacc_lo', 'Field artifacts',
            'Physiological artifacts', 'Miscellaneous artifacts', 'acc_comps',
            'Ignored components'
        ]
        diagstep_vals = [
            list(rej),
            KRcut.item(),
            Kcut.item(),
            Rcut.item(), dbscanfailed,
            list(KRguess), dice_rej,
            list(rej_supp),
            list(to_clf),
            list(midk), svm_acc_fail,
            list(toacc_hi),
            list(toacc_lo),
            list(field_art),
            list(phys_art),
            list(misc_art),
            list(acc_comps),
            list(ign)
        ]

        with open('csstepdata.json', 'w') as ofh:
            json.dump(dict(zip(diagstep_keys, diagstep_vals)),
                      ofh,
                      indent=4,
                      sort_keys=True,
                      default=str)
        allfz = np.array([Tz, Vz, Ktz, KRr, cnz, Rz, mmix_kurt, fdist_z])
        np.savetxt('csdata.txt', allfz)

    return list(sorted(acc_comps)), list(sorted(rej)), list(
        sorted(midk)), list(sorted(ign))
  def testComplexOps(self):
    for dtype in self.complex_types:

      self._assertOpOutputMatchesExpected(
          math_ops.acosh,
          np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
          expected=np.arccosh(
              np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.asinh,
          np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
          expected=np.arcsinh(
              np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.atanh,
          np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
          expected=np.arctanh(
              np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.cosh,
          np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype),
          expected=np.cosh(np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.sinh,
          np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
          expected=np.sinh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.exp,
          np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
          expected=np.exp(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.expm1,
          np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
          expected=np.expm1(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)),
          rtol=1e-6,
          atol=1e-6)

      # For real part close to zero, or imaginary part close to a multiple of
      # pi.

      self._assertOpOutputMatchesExpected(
          math_ops.expm1,
          np.array([[1e-11 + 1j, -1e-11 - 1j, 1. + 1e-11j,
                     -1. - 1e-11j, 1e-13j + 1e-13j]], dtype=dtype),
          # TODO(srvasude): Use numpy as the source of truth after we depend on
          # latest numpy with this pull request:
          # https://github.com/numpy/numpy/pull/15110.
          # The numbers below were generated by scipy.special.expm1.
          expected=np.array([[
              -4.59697694e-01+8.41470985e-01j,
              -4.59697694e-01-8.41470985e-01j,
              1.71828183e+00+2.71828183e-11j,
              -6.32120559e-01-3.67879441e-12j,
              -2.00000000e-26+2.00000000e-13j]], dtype=dtype),
          rtol=1e-09,
          atol=1e-20)

      self._assertOpOutputMatchesExpected(
          math_ops.reciprocal,
          np.array([[1, 2j, 2 + 3j]], dtype=dtype),
          expected=1.0 / np.array([[1, 2j, 2 + 3j]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          math_ops.log,
          np.array([[5j, 3 - 2j]], dtype=dtype),
          expected=np.log(np.array([[5j, 3 - 2j]], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.sin,
          np.array([[5j, 3 - 2j]], dtype=dtype),
          expected=np.sin(np.array([[5j, 3 - 2j]], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.cos,
          np.array([[5j, 3 - 2j]], dtype=dtype),
          expected=np.cos(np.array([[5j, 3 - 2j]], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.log1p,
          np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype),
          expected=np.log1p(
              np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype)),
          rtol=1e-4,
          atol=1e-6)

      val = np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)
      self._assertOpOutputMatchesExpected(
          math_ops.rsqrt, val, expected=1 / np.sqrt(val))

      self._assertOpOutputMatchesExpected(
          math_ops.sigmoid, val, expected=1 / (1 + np.exp(-val)))

      self._assertOpOutputMatchesExpected(
          math_ops.sqrt, val, expected=np.sqrt(val))

      self._assertOpOutputMatchesExpected(
          math_ops.tanh,
          np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
          expected=np.tanh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.tan,
          np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
          expected=np.tan(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))

      ctypes = {np.complex64: np.float32, np.complex128: np.float64}
      self._assertOpOutputMatchesExpected(
          math_ops.abs,
          np.array([[3 - 4j, -1j, np.inf]], dtype=dtype),
          expected=np.array([[5, 1, np.inf]], dtype=ctypes[dtype]))

      self._assertOpOutputMatchesExpected(
          math_ops.negative,
          np.array([[-1 + 2j, -3j]], dtype=dtype),
          expected=np.array([[1 - 2j, 3j]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          math_ops.square,
          np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype),
          expected=np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype)**2)

      self._assertOpOutputMatchesExpected(
          array_ops.zeros_like,
          np.array([[4j, 3 - 2j], [2, -1j]], dtype=dtype),
          expected=np.array([[0, 0], [0, 0]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          array_ops.ones_like,
          np.array([[-4j, 3 + 2j], [2, -1j]], dtype=dtype),
          expected=np.array([[1, 1], [1, 1]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          math_ops.angle,
          np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
          expected=np.angle(np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.conj,
          np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
          expected=np.array([1 - 3j, -4 - 7j, 2.7, 3j], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          math_ops.imag,
          np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
          expected=np.array([3, 7, 0, -3], dtype=ctypes[dtype]))

      self._assertOpOutputMatchesExpected(
          math_ops.real,
          np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
          expected=np.array([1, -4, 2.7, 0], dtype=ctypes[dtype]))
Example #34
0
def fyi(x):
    return np.arctanh(x)
Example #35
0
def unscale(serie):
    return np.arctanh(serie)
Example #36
0
class Atanh(Activation):
    op = 'Atanh'
    version = 'opset4'
    operation = staticmethod(lambda x: np.arctanh(x))
Example #37
0
def _eval_model(model_id, perm=None):
    # set vars from model
    mm = _global_meld[model_id]
    _R = mm._R

    # Calculate R
    R = []

    ind_b = np.arange(len(mm._groups))

    # loop over group vars
    ind = {}
    for i, k in enumerate(mm._groups[ind_b]):
        # grab the A and M
        A = mm._A[k]
        M = mm._M[k]

        # gen a perm for that subj
        if perm is None:
            ind[k] = np.arange(len(A))
        else:
            ind[k] = perm[k]

        if perm is None and mm._R is not None:
            # reuse R we already calculated
            R.append(mm._R[ind_b[i]])
        else:
            # calc the correlation
            #R.append(np.dot(A.T,M[ind[k]].copy()))
            #R.append(blockwise_dot(M[:][ind[k]].T, A).T)
            R.append(blockwise_dot(M[ind[k]].T, A).T)

    # turn R into array
    R_nocat = np.array(R)

    # zero invariant features
    feat_mask = np.isnan(R)
    R_nocat[feat_mask] = 0.0

    # turn to Z
    # make sure we are not 1/-1
    R_nocat[R_nocat > .9999] = .9999
    R_nocat[R_nocat < -.9999] = -.9999
    R_nocat = np.arctanh(R_nocat)

    if mm._do_tfce:
        # run TFCE
        Ztemp = R_to_tfce(R_nocat,
                          mm._dep_mask,
                          connectivity=mm._connectivity,
                          shape=mm._feat_shape,
                          dt=mm._dt,
                          E=mm._E,
                          H=mm._H)
    else:
        # just use the current Z without TFCE
        Ztemp = R_nocat

    # pick only stable features
    # NOTE: Ztemp is no longer R, it's either TFCE or Z
    Rtbr = pick_stable_features(Ztemp, nboot=mm._feat_nboot)

    # actually use the TFCE for SVD
    R_nocat = Ztemp

    # apply the thresh
    stable_ind = Rtbr < mm._feat_thresh
    stable_ind = stable_ind.reshape((stable_ind.shape[0], -1))

    # zero out non-stable
    feat_mask[:, ~stable_ind] = True
    R_nocat[:, ~stable_ind] = 0.0

    # save R before concatenating if not a permutation
    if perm is None and _R is None:
        _R = R_nocat.copy()

    # concatenate R for SVD
    # NOTE: It's really Z or TFCE now
    R = np.concatenate([R_nocat[i] for i in range(len(R_nocat))])

    # perform svd
    U, s, Vh = np.linalg.svd(R, full_matrices=False)

    # fix near zero vals from SVD
    Vh[np.abs(Vh) < (.00000001 * mm._dt)] = 0.0
    s[np.abs(s) < .00000001] = 0.0

    # calc prop of variance accounted for
    if mm._ss is None:
        # _ss = np.sqrt((s*s).sum())
        _ss = s.sum()
    else:
        _ss = mm._ss
    # ss /= ss.sum()
    ss = s

    # set up lmer
    O = None
    lmer = None
    if mm._mer is None:
        O = [mm._O[i].copy() for i in ind_b]

        lmer = LMER(mm._formula_str,
                    np.concatenate(O),
                    factors=mm._factors,
                    resid_formula_str=mm._resid_formula_str,
                    **mm._lmer_opts)
        mer = None
    else:
        mer = mm._mer

    # loop over LVs performing LMER
    res = []
    for i in range(len(Vh)):
        if ss[i] <= 0.0:
            # print 'skipped ',str(i)
            continue

        # flatten then weigh features via dot product
        Dw = np.concatenate([  #np.dot(mm._D[k][ind[k]].copy(),Vh[i])
            #blockwise_dot(mm._D[k][:][ind[k]], Vh[i])
            blockwise_dot(mm._D[k][ind[k]], Vh[i])
            for g, k in enumerate(mm._groups[ind_b])
        ])

        # run the main model
        if mer is None:
            # run the model for the first time and save it
            res.append(lmer.run(vals=Dw))
            mer = lmer._ms
        else:
            # use the saved model and just refit it for speed
            mer = r['refit'](mer, FloatVector(Dw))
            df = r['data.frame'](r_coef(r['summary'](mer)))
            rows = list(r['row.names'](df))
            new_tvals = np.rec.fromarrays([[tv]
                                           for tv in tuple(df.rx2('t.value'))],
                                          names=','.join(rows))
            new_ll = float(r['logLik'](mer)[0])
            res.append((new_tvals, np.array([new_ll])))

    if len(res) == 0:
        # must make dummy data
        if lmer is None:
            O = [mm._O[i].copy() for i in ind_b]
            # if boot is not None:
            #     # replace the group
            #     for i, k in enumerate(mm._groups):
            #         O[i][mm._re_group] = k

            lmer = LMER(mm._formula_str,
                        np.concatenate(O),
                        factors=mm._factors,
                        resid_formula_str=mm._resid_formula_str,
                        **mm._lmer_opts)

        Dw = np.random.randn(len(np.concatenate(O)))
        temp_t, temp_ll = lmer.run(vals=Dw)

        for n in temp_t.dtype.names:
            temp_t[n] = 0.0
        temp_ll[0] = 0.0
        res.append((temp_t, temp_ll))

        # must make ss, too
        ss = np.array([1.0])
        # print "perm fail"

    # pull out data from all the components
    tvals, log_likes = zip(*res)
    tvals = np.concatenate(tvals)
    log_likes = np.concatenate(log_likes)

    # recombine and scale the tvals across components
    ts = np.rec.fromarrays(
        [
            np.dot(tvals[k], ss[ss > 0.0] / _ss)  # /(ss>0.).sum()
            for k in tvals.dtype.names
        ],
        names=','.join(tvals.dtype.names))

    # scale tvals across features
    tfs = []
    for k in tvals.dtype.names:
        # tfs.append(np.dot(tvals[k],
        #                   np.dot(diagsvd(ss[ss > 0],
        #                                  len(ss[ss > 0]),
        #                                  len(ss[ss > 0])),
        #                          Vh[ss > 0, ...])))  # /(ss>0).sum())
        tfs.append(
            blockwise_dot(
                tvals[k],
                blockwise_dot(
                    diagsvd(ss[ss > 0], len(ss[ss > 0]), len(ss[ss > 0])),
                    Vh[ss > 0, ...])))
    tfs = np.rec.fromarrays(tfs, names=','.join(tvals.dtype.names))

    # decide what to return
    if perm is None:
        # return tvals, tfs, and R for actual non-permuted data
        out = (ts, tfs, _R, feat_mask, _ss, mer)
    else:
        # return the tvals for the terms
        out = (ts, tfs, ~feat_mask[0])

    return out
Example #38
0
    def attack_batch(self, imgs, labs):
        """
        Run the attack on a batch of images and labels.
        """
        # print("imgs, labs in attack_batch", imgs, labs) #shape=(1, 28, 28, 1), dtype=float32) [array([0., 0., 0., 0., 0., 0., 1., 0., 0., 0.])]

        batch_size = self.batch_size

        def compare(x, y):
            if not isinstance(x, (float, int, np.int64)):
                x = x.numpy()
                x = np.copy(x)
                if self.TARGETED:
                    x[y] -= self.CONFIDENCE
                else:
                    x[y] += self.CONFIDENCE
                x = np.argmax(x)
            if self.TARGETED:
                return x == y
            else:
                return x != y

        # @tf.function
        def train_step(modifier, timg, tlab, const):
            with tf.GradientTape() as tape:
                newimg = tf.tanh(modifier + timg) * self.boxmul + self.boxplus
                # newimg = np.random.rand(1, 28, 28, 1)
                output = model.predict(newimg)
                output = tf.cast(output, dtype=tf.float32)
                l2dist = tf.reduce_sum(
                    tf.square(newimg -
                              (tf.tanh(timg) * self.boxmul + self.boxplus)),
                    [1, 2, 3])
                real = tf.math.reduce_sum((tlab) * output, 1)
                other = tf.math.reduce_max(
                    (1 - tlab) * output - (tlab * 10000), 1)
                if self.TARGETED:
                    # if targetted, optimize for making the other class most likely
                    loss1 = tf.maximum(0.0, other - real + self.CONFIDENCE)
                else:
                    # if untargeted, optimize for making this class least likely.
                    loss1 = tf.maximum(0.0, real - other + self.CONFIDENCE)

                loss2 = tf.reduce_sum(l2dist)
                loss1 = tf.reduce_sum(const * loss1)

                loss = loss1 + loss2
            optimizer = optimizers.Adam(self.LEARNING_RATE)
            loss_metric = tf.keras.metrics.Mean(name='train_loss')
            # optimizer.minimize(self.loss, var_list=[modifier])
            grads = tape.gradient(loss, [modifier])
            optimizer.apply_gradients(zip(grads, [modifier]))
            loss_metric.update_state(loss)
            return loss, l2dist, output, newimg, loss1, loss2

        # convert to tanh-space
        imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999)
        # print(np.shape(imgs))
        lower_bound = np.zeros(batch_size)
        CONST = np.ones(batch_size) * self.initial_const
        upper_bound = np.ones(batch_size) * 1e10

        # the best l2, score, and image attack
        o_bestl2 = [1e10] * batch_size
        o_bestscore = [-1] * batch_size
        o_bestattack = [np.zeros(imgs[0].shape)] * batch_size
        print(np.shape(o_bestattack),
              "np.shape(o_bestattack)")  # (1, 28, 28, 1)

        for outer_step in range(self.BINARY_SEARCH_STEPS):
            batch = tf.Variable(imgs[:batch_size], dtype=tf.float32)
            batchlab = tf.Variable(labs[:batch_size], dtype=tf.float32)
            # print("*******batchlab***********", batchlab)  # shape=(1, 10)
            bestl2 = [1e10] * batch_size
            bestscore = [-1] * batch_size
            if self.repeat == True and outer_step == self.BINARY_SEARCH_STEPS - 1:
                CONST = upper_bound

            modifier = tf.Variable(np.zeros((1, 28, 28, 1), dtype=np.float32))
            const = tf.Variable(CONST, dtype=tf.float32)
            prev = np.inf
            for iteration in range(self.MAX_ITERATIONS):
                # perform the attack

                l, l2s, scores, nimg, loss1, loss2 = train_step(
                    modifier, batch, batchlab, const)
                if np.all(scores >= -.0001) and np.all(scores <= 1.0001):
                    if np.allclose(np.sum(scores, axis=1), 1.0, atol=1e-3):
                        if not self.I_KNOW_WHAT_I_AM_DOING_AND_WANT_TO_OVERRIDE_THE_PRESOFTMAX_CHECK:
                            raise Exception(
                                "The output of model.predict should return the pre-softmax layer. It looks like you are returning the probability vector (post-softmax). If you are sure you want to do that, set attack.I_KNOW_WHAT_I_AM_DOING_AND_WANT_TO_OVERRIDE_THE_PRESOFTMAX_CHECK = True"
                            )

                if iteration % (self.MAX_ITERATIONS // 10) == 0:
                    print(iteration, l, loss1, loss2)
                # check if we should abort search if we're getting nowhere.
                if self.ABORT_EARLY and iteration % (self.MAX_ITERATIONS //
                                                     10) == 0:
                    if l > prev * .9999:
                        break
                    prev = l
                # adjust the best result found so far
                for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
                    # print("batchlab", np.argmax(batchlab[e]))
                    # print("(sc, np.argmax(batchlab))", sc, np.argmax(sc))
                    # print("l2 and bestl2[e]", l2, bestl2[e])
                    # print("compare(sc, tf.argmax(batchlab))",
                    #       compare(sc, tf.argmax(batchlab[e])))
                    if l2 < bestl2[e] and compare(sc, np.argmax(batchlab[e])):
                        bestl2[e] = l2
                        bestscore[e] = np.argmax(sc)
                    if l2 < o_bestl2[e] and compare(sc, np.argmax(
                            batchlab[e])):
                        o_bestl2[e] = l2
                        o_bestscore[e] = np.argmax(sc)
                        o_bestattack[e] = ii

                # adjust the constant as needed
            for e in range(batch_size):
                print("bestscore[e]", bestscore[e])
                if compare(bestscore[e], np.argmax(
                        batchlab[e])) and bestscore[e] != -1:
                    # success, divide const by two
                    upper_bound[e] = min(upper_bound[e], CONST[e])
                    if upper_bound[e] < 1e9:
                        CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
                else:
                    # failure, either multiply by 10 if no solution found yet
                    #          or do binary search with the known upper bound
                    lower_bound[e] = max(lower_bound[e], CONST[e])
                    if upper_bound[e] < 1e9:
                        CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
                    else:
                        CONST[e] *= 10
        o_bestl2 = np.array(o_bestl2)
        return o_bestattack
Example #39
0
        between_subj_data = []

        for subj in subj_overlap:
            print('%s: %s %s Getting corr data' % (pu.ctime(), sess, subj))
            within_subj_data = []

            for roi in rois:
                cfc_file = h5py.File(pac_first_level, 'r')
                rval_path = subj + '/' + sess + '/' + roi + '/' + 'r_vals'

                dset = cfc_file.get(rval_path).value
                within_subj_data.append(dset[:, :])

                cfc_file.close()

            between_subj_data.append(np.arctanh(np.asarray(within_subj_data)))
            del within_subj_data

        between_subj_array = np.asarray(between_subj_data)
        del between_subj_data

        print('%s: %s Reorganizing data by ROI' % (pu.ctime(), sess))
        roi_dict = {}
        for r, roi in enumerate(rois):
            band_dict = {}

            for s, slow in enumerate(slow_bands):
                key_name = slow + ' with supraslow bands'
                data_to_grab = between_subj_array[:, r, s, :]

                band_dict[key_name] = np.ndarray.squeeze(data_to_grab)
Example #40
0
def return_conf_cor_mat(ts_mat, weight_vect, conf_interval_prob=0.01,
                        normalize=False):

    """
    Compute correlation matrices over a time series and weight vector,
    either Rsquared-value matrix (cor_mat),
    or Z (after R-to-Z values) matrix (Z_cor_mat)
    It also return possibly thresholded matrices (conf_cor_mat and
    Z_conf_cor_mat) according to a confidence interval probability
    conf_interval_prob
    """
    if ts_mat.shape[1] == len(weight_vect):
        print("Transposing data_matrix shape {} -> {}".format(
            ts_mat.shape, np.transpose(ts_mat).shape))
        ts_mat = np.transpose(ts_mat)

    assert ts_mat.shape[0] == len(weight_vect), \
        ("Error, incompatible regressor length {} {}".format(ts_mat.shape[0],
                                                             len(weight_vect)))

    if normalize:  # pragma: no cover
        print("Normalising data before computing Correlation")
        ts_mat = stats.zscore(ts_mat, axis=0, nan_policy="omit")

    keep = weight_vect > 0.0
    w = weight_vect[keep]
    ts_mat = ts_mat[keep, :]

    # confidence interval for variance computation
    norm = stats.norm.ppf(1-conf_interval_prob/2)
    deg_freedom = w.sum()/w.max()-3

    s, n = ts_mat.shape

    Z_cor_mat = np.zeros((n, n), dtype=float)
    Z_conf_cor_mat = np.zeros((n, n), dtype=float)
    cor_mat = np.zeros((n, n), dtype=float)
    conf_cor_mat = np.zeros((n, n), dtype=float)

    ts_mat2 = ts_mat*np.sqrt(w)[:, np.newaxis]

    for i, j in it.combinations(list(range(n)), 2):

        keep_val = ~(np.isnan(ts_mat2[:, i]) | np.isnan(ts_mat2[:, j]))

        s1 = ts_mat2[keep_val, i]
        s2 = ts_mat2[keep_val, j]

        cor_mat[i, j] = (s1*s2).sum()/np.sqrt((s1*s1).sum() * (s2*s2).sum())
        Z_cor_mat[i, j] = np.arctanh(cor_mat[i, j])

        assert not np.isnan(Z_cor_mat[i, j]), \
            ("Error Z_cor_mat {} {} should not be NAN value".format(i, j))

        assert not np.isinf(Z_cor_mat[i, j]), \
            ("Error Z_cor_mat {} {} should not be infinite value".format(i, j))

    pos_Z = (np.sign(Z_cor_mat) == +1.0)
    neg_Z = (np.sign(Z_cor_mat) == -1.0)
    signif_pos = (Z_cor_mat > norm/np.sqrt(deg_freedom)) & pos_Z
    signif_neg = (Z_cor_mat < -norm/np.sqrt(deg_freedom)) & neg_Z

    Z_conf_cor_mat[signif_pos] = Z_cor_mat[signif_pos]
    Z_conf_cor_mat[signif_neg] = Z_cor_mat[signif_neg]

    conf_cor_mat[signif_pos] = cor_mat[signif_pos]
    conf_cor_mat[signif_neg] = cor_mat[signif_neg]

    return cor_mat, Z_cor_mat, conf_cor_mat, Z_conf_cor_mat
Example #41
0
def sie_grad(x, y, par):
    """
    NAME: sie_grad

    PURPOSE: compute the deflection of an SIE potential

    USAGE: (xg, yg) = sie_grad(x, y, par)

    ARGUMENTS:
      x, y: vectors or images of coordinates;
            should be matching numpy ndarrays
      par: vector of parameters with 1 to 5 elements, defined as follows:
        par[0]: lens strength, or 'Einstein radius'
        par[1]: (optional) x-center (default = 0.0)
        par[2]: (optional) y-center (default = 0.0)
        par[3]: (optional) axis ratio (default=1.0)
        par[4]: (optional) major axis Position Angle
                in degrees c.c.w. of x axis. (default = 0.0)

    RETURNS: tuple (xg, yg) of gradients at the positions (x, y)

    NOTES: This routine implements an 'intermediate-axis' convention.
      Analytic forms for the SIE potential can be found in:
        Kassiola & Kovner 1993, ApJ, 417, 450
        Kormann et al. 1994, A&A, 284, 285
        Keeton & Kochanek 1998, ApJ, 495, 157
      The parameter-order convention in this routine differs from that
      of a previous IDL routine of the same name by ASB.

    WRITTEN: Adam S. Bolton, U of Utah, 2009
    """
    # Set parameters:
    b = N.abs(par[0])  # can't be negative!!!
    xzero = 0. if (len(par) < 2) else par[1]
    yzero = 0. if (len(par) < 3) else par[2]
    q = 1. if (len(par) < 4) else N.abs(par[3])
    phiq = 0. if (len(par) < 5) else par[4]
    eps = 0.001  # for sqrt(1/q - q) < eps, a limit expression is used.
    # Handle q > 1 gracefully:
    if (q > 1.):
        q = 1.0 / q
        phiq = phiq + 90.0
    # Go into shifted coordinats of the potential:
    phirad = N.deg2rad(phiq)
    xsie = (x - xzero) * N.cos(phirad) + (y - yzero) * N.sin(phirad)
    ysie = (y - yzero) * N.cos(phirad) - (x - xzero) * N.sin(phirad)
    # Compute potential gradient in the transformed system:
    r_ell = N.sqrt(q * xsie**2 + ysie**2 / q)
    qfact = N.sqrt(1. / q - q)
    # (r_ell == 0) terms prevent divide-by-zero problems
    if (qfact >= eps):
        xtg = (b / qfact) * N.arctan(qfact * xsie / (r_ell + (r_ell == 0)))
        ytg = (b / qfact) * N.arctanh(qfact * ysie / (r_ell + (r_ell == 0)))
    else:
        xtg = b * xsie / (r_ell + (r_ell == 0))
        ytg = b * ysie / (r_ell + (r_ell == 0))
    # Transform back to un-rotated system:
    xg = xtg * N.cos(phirad) - ytg * N.sin(phirad)
    yg = ytg * N.cos(phirad) + xtg * N.sin(phirad)
    # Return value:
    return (xg, yg)
Example #42
0
import numpy as np
import matplotlib.pyplot as plt

maxes = [0.981439, 0.643811, 0.36434, 0.136199]

bluber = np.tanh(maxes)
plt.plot(np.tanh(maxes), np.tanh(maxes), "ro")
plt.rc('text', usetex=True)
plt.ylabel(r'$tanh(\beta J)$')
plt.xlabel(r'$tanh(\beta J)$')
x = np.linspace(1, 0, 100)
y = np.tanh(np.arctanh(x))**2
f = lambda x: np.tanh(np.arctanh(x))**2
interpol = np.tanh(maxes)
params = np.polyfit(interpol, interpol, 1)

plt.plot(x, y, "b")
plt.plot(x, params[0] * x + params[1], "b")

#plot connections
line1 = []
print(line1)
for i in range(0, len(bluber)):
    plt.plot([bluber[i], bluber[i]], [bluber[i], f(bluber[i])], "r-.")
    plt.plot([bluber[i], f(bluber[i])], [f(bluber[i]), f(bluber[i])], "r-.")
plt.show()
 def arctanh(x):
     return np.arctanh(x)
Example #44
0
    def test_squashed_gaussian(self):
        """Tests the SquashedGaussia ActionDistribution (tf-eager only)."""
        with eager_mode():
            input_space = Box(-1.0, 1.0, shape=(200, 10))
            low, high = -2.0, 1.0

            # Batch of size=n and deterministic.
            inputs = input_space.sample()
            means, _ = np.split(inputs, 2, axis=-1)
            squashed_distribution = SquashedGaussian(inputs, {},
                                                     low=low,
                                                     high=high)
            expected = ((np.tanh(means) + 1.0) / 2.0) * (high - low) + low
            # Sample n times, expect always mean value (deterministic draw).
            out = squashed_distribution.deterministic_sample()
            check(out, expected)

            # Batch of size=n and non-deterministic -> expect roughly the mean.
            inputs = input_space.sample()
            means, log_stds = np.split(inputs, 2, axis=-1)
            squashed_distribution = SquashedGaussian(inputs, {},
                                                     low=low,
                                                     high=high)
            expected = ((np.tanh(means) + 1.0) / 2.0) * (high - low) + low
            values = squashed_distribution.sample()
            self.assertTrue(np.max(values) < high)
            self.assertTrue(np.min(values) > low)

            check(np.mean(values), expected.mean(), decimals=1)

            # Test log-likelihood outputs.
            sampled_action_logp = squashed_distribution.sampled_action_logp()
            # Convert to parameters for distr.
            stds = np.exp(
                np.clip(log_stds, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT))
            # Unsquash values, then get log-llh from regular gaussian.
            unsquashed_values = np.arctanh((values - low) /
                                           (high - low) * 2.0 - 1.0)
            log_prob_unsquashed = \
                np.sum(np.log(norm.pdf(unsquashed_values, means, stds)), -1)
            log_prob = log_prob_unsquashed - \
                np.sum(np.log(1 - np.tanh(unsquashed_values) ** 2),
                       axis=-1)
            check(np.mean(sampled_action_logp), np.mean(log_prob), rtol=0.01)

            # NN output.
            means = np.array([[0.1, 0.2, 0.3, 0.4, 50.0],
                              [-0.1, -0.2, -0.3, -0.4, -1.0]])
            log_stds = np.array([[0.8, -0.2, 0.3, -1.0, 2.0],
                                 [0.7, -0.3, 0.4, -0.9, 2.0]])
            squashed_distribution = SquashedGaussian(np.concatenate(
                [means, log_stds], axis=-1), {},
                                                     low=low,
                                                     high=high)
            # Convert to parameters for distr.
            stds = np.exp(log_stds)
            # Values to get log-likelihoods for.
            values = np.array([[0.9, 0.2, 0.4, -0.1, -1.05],
                               [-0.9, -0.2, 0.4, -0.1, -1.05]])

            # Unsquash values, then get log-llh from regular gaussian.
            unsquashed_values = np.arctanh((values - low) /
                                           (high - low) * 2.0 - 1.0)
            log_prob_unsquashed = \
                np.sum(np.log(norm.pdf(unsquashed_values, means, stds)), -1)
            log_prob = log_prob_unsquashed - \
                np.sum(np.log(1 - np.tanh(unsquashed_values) ** 2),
                       axis=-1)

            out = squashed_distribution.logp(values)
            check(out, log_prob)
Example #45
0
 def corr_cint(r, n):
     z = np.arctanh(r)
     sigma = (1 / (n - 3)**0.5)
     cint = z + np.array([-1, 1]) * sigma * st.norm.ppf((1 + 0.95) / 2)
     return np.tanh(cint)
top_patch = blocks.create_patch_nb_faces(name='top', nb_faces=1)
top_patch[0] = [5, 4]

right_patch = blocks.create_patch_nb_faces(name='right', nb_faces=2)
right_patch[0] = [1, 3]
right_patch[1] = [3, 5]

#blocks.partition_blocks(nb_partitions = 4, direction = 0)

mesh = domain.create_component('Mesh', 'cf3.mesh.Mesh')
blocks.create_mesh(mesh.uri())

coordmap = {}
b = 0.9544
xi = np.linspace(-h, h, y_segs * 2 + 1)
y_graded = h / b * np.tanh(xi * np.arctanh(b))

coords = mesh.geometry.coordinates
for i in range(len(coords)):
    y_key = int(coords[i][1])
    coords[i][1] = y_graded[y_key]

# Make the boundary global, to allow wall distance and periodics to work correctly
make_boundary_global = domain.create_component(
    'MakeBoundaryGlobal', 'cf3.mesh.actions.MakeBoundaryGlobal')
make_boundary_global.mesh = mesh
make_boundary_global.execute()

link_horizontal = domain.create_component(
    'LinkHorizontal', 'cf3.mesh.actions.LinkPeriodicNodes')
link_horizontal.mesh = mesh
Example #47
0
import numpy as np
from pylab import *

x = np.linspace(-np.pi, np.pi, 2001)
sines = np.sin(x)
hist(np.arctanh(np.clip(sines, -0.999, 0.999)), bins=40)
show()
Example #48
0
  def attack_batch(self, imgs, labs):
    """
    Run the attack on a batch of instance and labels.
    """

    def compare(x, y):
      if not isinstance(x, (float, int, np.int64)):
        x = np.copy(x)
        if self.TARGETED:
          x[y] -= self.CONFIDENCE
        else:
          x[y] += self.CONFIDENCE
        x = np.argmax(x)
      if self.TARGETED:
        return x == y
      else:
        return x != y

    batch_size = self.batch_size

    oimgs = np.clip(imgs, self.clip_min, self.clip_max)

    # re-scale instances to be within range [0, 1]
    imgs = (imgs - self.clip_min) / (self.clip_max - self.clip_min)
    imgs = np.clip(imgs, 0, 1)
    # now convert to [-1, 1]
    imgs = (imgs * 2) - 1
    # convert to tanh-space
    imgs = np.arctanh(imgs * .999999)

    # set the lower and upper bounds accordingly
    lower_bound = np.zeros(batch_size)
    CONST = np.ones(batch_size) * self.initial_const
    upper_bound = np.ones(batch_size) * 1e10

    # placeholders for the best l2, score, and instance attack found so far
    o_bestl2 = [1e10] * batch_size
    o_bestscore = [-1] * batch_size
    o_bestattack = np.copy(oimgs)

    for outer_step in range(self.BINARY_SEARCH_STEPS):
      # completely reset adam's internal state.
      self.sess.run(self.init)
      batch = imgs[:batch_size]
      batchlab = labs[:batch_size]

      bestl2 = [1e10] * batch_size
      bestscore = [-1] * batch_size
      _logger.debug("  Binary search step %s of %s",
                    outer_step, self.BINARY_SEARCH_STEPS)

      # The last iteration (if we run many steps) repeat the search once.
      if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
        CONST = upper_bound

      # set the variables so that we don't have to send them over again
      self.sess.run(
          self.setup, {
              self.assign_timg: batch,
              self.assign_tlab: batchlab,
              self.assign_const: CONST
          })

      prev = 1e6
      for iteration in range(self.MAX_ITERATIONS):
        # perform the attack
        _, l, l2s, scores, nimg = self.sess.run([
            self.train, self.loss, self.l2dist, self.output,
            self.newimg
        ])

        if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
          _logger.debug(("    Iteration {} of {}: loss={:.3g} " +
                         "l2={:.3g} f={:.3g}").format(
                             iteration, self.MAX_ITERATIONS, l,
                             np.mean(l2s), np.mean(scores)))

        # check if we should abort search if we're getting nowhere.
        if self.ABORT_EARLY and \
           iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
          if l > prev * .9999:
            msg = "    Failed to make progress; stop early"
            _logger.debug(msg)
            break
          prev = l

        # adjust the best result found so far
        for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
          lab = np.argmax(batchlab[e])
          if l2 < bestl2[e] and compare(sc, lab):
            bestl2[e] = l2
            bestscore[e] = np.argmax(sc)
          if l2 < o_bestl2[e] and compare(sc, lab):
            o_bestl2[e] = l2
            o_bestscore[e] = np.argmax(sc)
            o_bestattack[e] = ii

      # adjust the constant as needed
      for e in range(batch_size):
        if compare(bestscore[e], np.argmax(batchlab[e])) and \
           bestscore[e] != -1:
          # success, divide const by two
          upper_bound[e] = min(upper_bound[e], CONST[e])
          if upper_bound[e] < 1e9:
            CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
        else:
          # failure, either multiply by 10 if no solution found yet
          #          or do binary search with the known upper bound
          lower_bound[e] = max(lower_bound[e], CONST[e])
          if upper_bound[e] < 1e9:
            CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
          else:
            CONST[e] *= 10
      _logger.debug("  Successfully generated adversarial examples " +
                    "on {} of {} instances.".format(
                        sum(upper_bound < 1e9), batch_size))
      o_bestl2 = np.array(o_bestl2)
      mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
      _logger.debug("   Mean successful distortion: {:.4g}".format(mean))

    # return the best solution found
    o_bestl2 = np.array(o_bestl2)
    return o_bestattack
Example #49
0
  def testComplexOps(self):
    for dtype in self.complex_types:

      self._assertOpOutputMatchesExpected(
          math_ops.acosh,
          np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
          expected=np.arccosh(
              np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.asinh,
          np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
          expected=np.arcsinh(
              np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.atanh,
          np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype),
          expected=np.arctanh(
              np.array([0.1, 0.2j, 0.3 - 0.1j, 0.4 + 0.5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.cosh,
          np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype),
          expected=np.cosh(np.array([1j, 2 - 3j, 3, 4 + 2j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.sinh,
          np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
          expected=np.sinh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.exp,
          np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
          expected=np.exp(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.expm1,
          np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype),
          expected=np.expm1(np.array([[-1 + 2j, 3j, 2 - 3j]], dtype=dtype)),
          rtol=1e-6,
          atol=1e-6)

      self._assertOpOutputMatchesExpected(
          math_ops.reciprocal,
          np.array([[1, 2j, 2 + 3j]], dtype=dtype),
          expected=1.0 / np.array([[1, 2j, 2 + 3j]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          math_ops.log,
          np.array([[5j, 3 - 2j]], dtype=dtype),
          expected=np.log(np.array([[5j, 3 - 2j]], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.sin,
          np.array([[5j, 3 - 2j]], dtype=dtype),
          expected=np.sin(np.array([[5j, 3 - 2j]], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.cos,
          np.array([[5j, 3 - 2j]], dtype=dtype),
          expected=np.cos(np.array([[5j, 3 - 2j]], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.log1p,
          np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype),
          expected=np.log1p(
              np.array([[1e-14, 1e-15j, 0.6 - 0.3j]], dtype=dtype)),
          rtol=1e-4,
          atol=1e-6)

      val = np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)
      self._assertOpOutputMatchesExpected(
          math_ops.rsqrt, val, expected=1 / np.sqrt(val))

      self._assertOpOutputMatchesExpected(
          math_ops.sigmoid, val, expected=1 / (1 + np.exp(-val)))

      self._assertOpOutputMatchesExpected(
          math_ops.sqrt, val, expected=np.sqrt(val))

      self._assertOpOutputMatchesExpected(
          math_ops.tanh,
          np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
          expected=np.tanh(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.tan,
          np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype),
          expected=np.tan(np.array([1, 2j, 2 - 3j, 4 + 5j], dtype=dtype)))

      ctypes = {np.complex64: np.float32}
      self._assertOpOutputMatchesExpected(
          math_ops.abs,
          np.array([[3 - 4j, -1j, np.inf]], dtype=dtype),
          expected=np.array([[5, 1, np.inf]], dtype=ctypes[dtype]))

      self._assertOpOutputMatchesExpected(
          math_ops.negative,
          np.array([[-1 + 2j, -3j]], dtype=dtype),
          expected=np.array([[1 - 2j, 3j]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          math_ops.square,
          np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype),
          expected=np.array([[-2 - 3j, 3 + 4j, 5j]], dtype=dtype)**2)

      self._assertOpOutputMatchesExpected(
          array_ops.zeros_like,
          np.array([[4j, 3 - 2j], [2, -1j]], dtype=dtype),
          expected=np.array([[0, 0], [0, 0]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          array_ops.ones_like,
          np.array([[-4j, 3 + 2j], [2, -1j]], dtype=dtype),
          expected=np.array([[1, 1], [1, 1]], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          math_ops.angle,
          np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
          expected=np.angle(np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype)))

      self._assertOpOutputMatchesExpected(
          math_ops.conj,
          np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
          expected=np.array([1 - 3j, -4 - 7j, 2.7, 3j], dtype=dtype))

      self._assertOpOutputMatchesExpected(
          math_ops.imag,
          np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
          expected=np.array([3, 7, 0, -3], dtype=ctypes[dtype]))

      self._assertOpOutputMatchesExpected(
          math_ops.real,
          np.array([1 + 3j, -4 + 7j, 2.7, -3j], dtype=dtype),
          expected=np.array([1, -4, 2.7, 0], dtype=ctypes[dtype]))
Example #50
0
    def check_loss_of_precision(self, dtype):
        """Check loss of precision in complex arc* functions"""

        # Check against known-good functions

        info = np.finfo(dtype)
        real_dtype = dtype(0.).real.dtype
        eps = info.eps

        def check(x, rtol):
            x = x.astype(real_dtype)

            z = x.astype(dtype)
            d = np.absolute(np.arcsinh(x) / np.arcsinh(z).real - 1)
            assert_(np.all(d < rtol),
                    (np.argmax(d), x[np.argmax(d)], d.max(), 'arcsinh'))

            z = (1j * x).astype(dtype)
            d = np.absolute(np.arcsinh(x) / np.arcsin(z).imag - 1)
            assert_(np.all(d < rtol),
                    (np.argmax(d), x[np.argmax(d)], d.max(), 'arcsin'))

            z = x.astype(dtype)
            d = np.absolute(np.arctanh(x) / np.arctanh(z).real - 1)
            assert_(np.all(d < rtol),
                    (np.argmax(d), x[np.argmax(d)], d.max(), 'arctanh'))

            z = (1j * x).astype(dtype)
            d = np.absolute(np.arctanh(x) / np.arctan(z).imag - 1)
            assert_(np.all(d < rtol),
                    (np.argmax(d), x[np.argmax(d)], d.max(), 'arctan'))

        # The switchover was chosen as 1e-3; hence there can be up to
        # ~eps/1e-3 of relative cancellation error before it

        x_series = np.logspace(-20, -3.001, 200)
        x_basic = np.logspace(-2.999, 0, 10, endpoint=False)

        if dtype is np.longcomplex:
            # It's not guaranteed that the system-provided arc functions
            # are accurate down to a few epsilons. (Eg. on Linux 64-bit)
            # So, give more leeway for long complex tests here:
            check(x_series, 50 * eps)
        else:
            check(x_series, 2 * eps)
        check(x_basic, 2 * eps / 1e-3)

        # Check a few points

        z = np.array([1e-5 * (1 + 1j)], dtype=dtype)
        p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
        d = np.absolute(1 - np.arctanh(z) / p)
        assert_(np.all(d < 1e-15))

        p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
        d = np.absolute(1 - np.arcsinh(z) / p)
        assert_(np.all(d < 1e-15))

        p = 9.999999999333333333e-6j + 1.000000000066666666e-5
        d = np.absolute(1 - np.arctan(z) / p)
        assert_(np.all(d < 1e-15))

        p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
        d = np.absolute(1 - np.arcsin(z) / p)
        assert_(np.all(d < 1e-15))

        # Check continuity across switchover points

        def check(func, z0, d=1):
            z0 = np.asarray(z0, dtype=dtype)
            zp = z0 + abs(z0) * d * eps * 2
            zm = z0 - abs(z0) * d * eps * 2
            assert_(np.all(zp != zm), (zp, zm))

            # NB: the cancellation error at the switchover is at least eps
            good = (abs(func(zp) - func(zm)) < 2 * eps)
            assert_(np.all(good), (func, z0[~good]))

        for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan):
            pts = [
                rp + 1j * ip for rp in (-1e-3, 0, 1e-3)
                for ip in (-1e-3, 0, 1e-3) if rp != 0 or ip != 0
            ]
            check(func, pts, 1)
            check(func, pts, 1j)
            check(func, pts, 1 + 1j)
 def _fisherZ(self, pc):
     return (np.arctanh(pc))
    def attack_batch(self, imgs, labs):
        """
        Run the attack on a batch of images and labels.
        """
        def compare(x_vl, y_1h):
            x_v = np.argmax(x_vl - self.CONFIDENCE * y_1h[None, :], axis=1)
            y = np.argmax(y_1h)
            if self.TARGETED:
                return np.all(x_v == y)
            else:
                return np.all(x_v != y)

        batch_size = self.batch_size

        # convert to tanh-space
        imgs = np.arctanh((imgs - 127.5) / 127.5001)

        # set the lower and upper bounds accordingly
        lower_bound = np.zeros(batch_size)
        CONST = np.ones(batch_size) * self.initial_const
        upper_bound = np.ones(batch_size) * 1e10

        # the best l2, score, and image attack
        o_bestl2 = [1e10] * batch_size
        o_bestscore = [False] * batch_size
        o_bestattack = [np.zeros(imgs[0].shape)] * batch_size

        for outer_step in range(self.BINARY_SEARCH_STEPS):
            # %%% print(o_bestl2)
            print CONST
            # completely reset adam's internal state.
            self.sess.run(self.init)
            batch = imgs[:batch_size]
            batchlab = labs[:batch_size]

            bestl2 = [1e10] * batch_size
            bestscore = [False] * batch_size

            # The last iteration (if we run many steps) repeat the search once.
            if self.repeat == True and outer_step == self.BINARY_SEARCH_STEPS - 1:
                CONST = upper_bound

            # set the variables so that we don't have to send them over again
            self.sess.run(
                self.setup, {
                    self.assign_timg: batch,
                    self.assign_tlab: batchlab,
                    self.assign_const: CONST
                })

            prev = 1e6
            for iteration in range(self.MAX_ITERATIONS):
                # perform the attack
                _, l, l2s, scores_nvl, nimg = self.sess.run([
                    self.train, self.loss, self.l2dist, self.output_nvl,
                    self.newimg
                ])

                # print out the losses every 10%
                if iteration % (self.MAX_ITERATIONS // 100) == 0:  # %%%
                    print(iteration,
                          self.sess.run((self.loss, self.loss1, self.loss2)))

                # check if we should abort search if we're getting nowhere.
                if self.ABORT_EARLY and iteration % (self.MAX_ITERATIONS //
                                                     10) == 0:
                    if l > prev * .9999:
                        print 'aborting early at iteration', iteration
                        break
                    prev = l

                # adjust the best result found so far
                for e, (l2, sc_vl, ii) in enumerate(zip(l2s, scores_nvl,
                                                        nimg)):
                    if l2 < bestl2[e] and compare(sc_vl, batchlab[e]):
                        bestl2[e] = l2
                        bestscore[e] = True
                    if l2 < o_bestl2[e] and compare(sc_vl, batchlab[e]):
                        o_bestl2[e] = l2
                        o_bestscore[e] = True
                        o_bestattack[e] = ii

            # adjust the constant as needed
            for e in range(batch_size):
                if bestscore[e]:
                    # success, divide const by two
                    upper_bound[e] = min(upper_bound[e], CONST[e])
                    if upper_bound[e] < 1e9:
                        CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
                else:
                    # failure, either multiply by 10 if no solution found yet
                    #          or do binary search with the known upper bound
                    lower_bound[e] = max(lower_bound[e], CONST[e])
                    if upper_bound[e] < 1e9:
                        CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
                    else:
                        CONST[e] *= 10

        # return the best solution found
        o_bestl2 = np.array(o_bestl2)
        return o_bestattack
Example #53
0
def CartToPartCoor3D(x, y, z):
    pt, phi = CartToRad2D(x, y)
    eta = np.arctanh(z / np.sqrt(z**2 + pt**2))
    return pt, eta, phi
Example #54
0
    def attack_batch(self, imgs, labs):
        """
        Run the attack on a batch of images and labels.
        """

        def compare(x, y):
            if not isinstance(x, (float, int, np.int64)):
                x = np.copy(x)
                if self.TARGETED:
                    x[y] -= self.CONFIDENCE
                else:
                    x[y] += self.CONFIDENCE
                x = np.argmax(x)
            if self.TARGETED:
                return x == y
            else:
                return x != y

        batch_size = self.batch_size

        # convert to tanh-space
        imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999)

        # set the lower and upper bounds accordingly
        lower_bound = np.zeros(batch_size)
        CONST = np.ones(batch_size) * self.initial_const
        upper_bound = np.ones(batch_size) * 1e10

        # the best l2, score, and image attack
        o_bestl2 = [1e10] * batch_size
        o_bestscore = [-1] * batch_size
        o_bestattack = [np.zeros(imgs[0].shape)] * batch_size

        for outer_step in range(self.BINARY_SEARCH_STEPS):
            print(o_bestl2)
            # completely reset adam's internal state.
            self.sess.run(self.init)
            batch = imgs[:batch_size]
            batchlab = labs[:batch_size]

            bestl2 = [1e10] * batch_size
            bestscore = [-1] * batch_size

            # The last iteration (if we run many steps) repeat the search once.
            if self.repeat == True and outer_step == self.BINARY_SEARCH_STEPS - 1:
                CONST = upper_bound

            # set the variables so that we don't have to send them over again
            self.sess.run(self.setup, {self.assign_timg: batch,
                                       self.assign_tlab: batchlab,
                                       self.assign_const: CONST,
                                       self.assign_newimg: batch})

            prev = np.inf
            for iteration in range(self.MAX_ITERATIONS):
                # perform the attack
                _, l, l2s, scores, nimg = self.sess.run([self.train, self.loss,
                                                         self.l2dist, self.output,
                                                         self.newimg])

                if np.all(scores >= -.0001) and np.all(scores <= 1.0001):
                    if np.allclose(np.sum(scores, axis=1), 1.0, atol=1e-3):
                        if not self.I_KNOW_WHAT_I_AM_DOING_AND_WANT_TO_OVERRIDE_THE_PRESOFTMAX_CHECK:
                            raise Exception(
                                "The output of model.predict should return the pre-softmax layer. It looks like you are returning the probability vector (post-softmax). If you are sure you want to do that, set attack.I_KNOW_WHAT_I_AM_DOING_AND_WANT_TO_OVERRIDE_THE_PRESOFTMAX_CHECK = True")

                # print out the losses every 10%
                if iteration % (self.MAX_ITERATIONS // 10) == 0:
                    print(iteration, self.sess.run((self.loss, self.loss1, self.loss2)))

                # check if we should abort search if we're getting nowhere.
                if self.ABORT_EARLY and iteration % (self.MAX_ITERATIONS // 10) == 0:
                    if l > prev * .9999:
                        break
                    prev = l

                # adjust the best result found so far
                for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
                    if l2 < bestl2[e] and compare(sc, np.argmax(batchlab[e])):
                        bestl2[e] = l2
                        bestscore[e] = np.argmax(sc)
                    if l2 < o_bestl2[e] and compare(sc, np.argmax(batchlab[e])):
                        o_bestl2[e] = l2
                        o_bestscore[e] = np.argmax(sc)
                        o_bestattack[e] = ii

            # adjust the constant as needed
            for e in range(batch_size):
                if compare(bestscore[e], np.argmax(batchlab[e])) and bestscore[e] != -1:
                    # success, divide const by two
                    upper_bound[e] = min(upper_bound[e], CONST[e])
                    if upper_bound[e] < 1e9:
                        CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
                else:
                    # failure, either multiply by 10 if no solution found yet
                    #          or do binary search with the known upper bound
                    lower_bound[e] = max(lower_bound[e], CONST[e])
                    if upper_bound[e] < 1e9:
                        CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
                    else:
                        CONST[e] *= 10

        # return the best solution found
        o_bestl2 = np.array(o_bestl2)
        return o_bestattack
Example #55
0
def parasite_drag_fuselage(conditions, configuration, fuselage):
    """ SUAVE.Methods.parasite_drag_fuselage(conditions,configuration,fuselage)
        computes the parasite drag associated with a fuselage 
        
        Inputs:

        Outputs:

        Assumptions:

        
    """

    # unpack inputs
    form_factor = configuration.fuselage_parasite_drag_form_factor

    freestream = conditions.freestream

    Sref = fuselage.areas.front_projected
    Swet = fuselage.areas.wetted

    l_fus = fuselage.lengths.cabin
    d_fus = fuselage.width
    l_nose = fuselage.lengths.nose
    l_tail = fuselage.lengths.tail

    # conditions
    Mc = freestream.mach_number
    roc = freestream.density
    muc = freestream.viscosity
    Tc = freestream.temperature
    pc = freestream.pressure

    # reynolds number
    V = Mc * compute_speed_of_sound(Tc, pc)
    Re_fus = roc * V * l_fus / muc

    # skin friction coefficient
    cf_fus, k_comp, k_reyn = compressible_turbulent_flat_plate(Re_fus, Mc, Tc)

    # form factor for cylindrical bodies
    d_d = float(d_fus) / float(l_fus)
    D = np.array([[0.0]] * len(Mc))
    a = np.array([[0.0]] * len(Mc))
    du_max_u = np.array([[0.0]] * len(Mc))
    k_fus = np.array([[0.0]] * len(Mc))

    D[Mc < 0.95] = np.sqrt(1 - (1 - Mc[Mc < 0.95]**2) * d_d**2)
    a[Mc < 0.95] = 2 * (1 - Mc[Mc < 0.95]**2) * (d_d**2) * (
        np.arctanh(D[Mc < 0.95]) - D[Mc < 0.95]) / (D[Mc < 0.95]**3)
    du_max_u[Mc < 0.95] = a[Mc < 0.95] / ((2 - a[Mc < 0.95]) *
                                          (1 - Mc[Mc < 0.95]**2)**0.5)

    D[Mc >= 0.95] = np.sqrt(1 - d_d**2)
    a[Mc >= 0.95] = 2 * (d_d**2) * (np.arctanh(D[Mc >= 0.95]) -
                                    D[Mc >= 0.95]) / (D[Mc >= 0.95]**3)
    du_max_u[Mc >= 0.95] = a[Mc >= 0.95] / ((2 - a[Mc >= 0.95]))

    k_fus = (1 + form_factor * du_max_u)**2

    #for i in range(len(Mc)):
    #if Mc[i] < 0.95:
    #D[i] = np.sqrt(1 - (1-Mc[i]**2) * d_d**2)
    #a[i]        = 2 * (1-Mc[i]**2) * (d_d**2) *(np.arctanh(D[i])-D[i]) / (D[i]**3)
    #du_max_u[i] = a[i] / ( (2-a[i]) * (1-Mc[i]**2)**0.5 )
    #else:
    #D[i] = np.sqrt(1 - d_d**2)
    #a[i]        = 2  * (d_d**2) *(np.arctanh(D[i])-D[i]) / (D[i]**3)
    #du_max_u[i] = a[i] / ( (2-a[i]) )
    #k_fus[i]    = (1 + cf_fus[i]*du_max_u[i])**2

    # --------------------------------------------------------
    # find the final result

    fuselage_parasite_drag = k_fus * cf_fus * Swet / Sref
    # --------------------------------------------------------

    # dump data to conditions
    fuselage_result = Result(
        wetted_area=Swet,
        reference_area=Sref,
        parasite_drag_coefficient=fuselage_parasite_drag,
        skin_friction_coefficient=cf_fus,
        compressibility_factor=k_comp,
        reynolds_factor=k_reyn,
        form_factor=k_fus,
    )
    try:
        conditions.aerodynamics.drag_breakdown.parasite[
            fuselage.tag] = fuselage_result
    except:
        print("Drag Polar Mode fuse parasite")

    return fuselage_parasite_drag
Example #56
0
        np.argmin(_convert_to_tensor(input),
                  axis=0 if axis is None else int(axis)).astype(
                      utils.numpy_dtype(output_type))))

asin = utils.copy_docstring('tf.math.asin', lambda x, name=None: np.arcsin(x))

asinh = utils.copy_docstring('tf.math.asinh',
                             lambda x, name=None: np.arcsinh(x))

atan = utils.copy_docstring('tf.math.atan', lambda x, name=None: np.arctan(x))

atan2 = utils.copy_docstring('tf.math.atan2',
                             lambda y, x, name=None: np.arctan2(y, x))

atanh = utils.copy_docstring('tf.math.atanh',
                             lambda x, name=None: np.arctanh(x))

bessel_i0 = utils.copy_docstring('tf.math.bessel_i0',
                                 lambda x, name=None: scipy_special.i0(x))

bessel_i0e = utils.copy_docstring('tf.math.bessel_i0e',
                                  lambda x, name=None: scipy_special.i0e(x))

bessel_i1 = utils.copy_docstring('tf.math.bessel_i1',
                                 lambda x, name=None: scipy_special.i1(x))

bessel_i1e = utils.copy_docstring('tf.math.bessel_i1e',
                                  lambda x, name=None: scipy_special.i1e(x))

betainc = utils.copy_docstring(
    'tf.math.betainc',
Example #57
0
    def generate(self, x_val, **kwargs):
        """
        Generate adversarial samples and return them in a Numpy array.
        :param x_val:
        :param y_val: If self.targeted is true, then y_val represents the target labels. If self.targeted is false,
                      then targets are the original class labels.
        :return: A Numpy array holding the adversarial examples.
        """

        # Parse and save attack-specific parameters
        params_cpy = dict(kwargs)
        y_val = params_cpy.pop('y_val', None)

        # Assert that, if attack is targeted, y_val is provided:
        assert not (self.targeted and y_val is None)

        # No labels provided, use model prediction as correct class
        if y_val is None:
            y_val = self.sess.run(
                tf.argmax(self.classifier.model(self._x), axis=1),
                {self._x: x_val})
            y_val = to_categorical(
                y_val,
                self.classifier.model.get_output_shape_at(-1)[-1])

        # images to be attacked:
        x_adv = x_val.copy()

        # transform images to tanh space:
        x_adv = np.clip(x_adv, self.clip_min, self.clip_max)
        x_adv = (x_adv - self.clip_min) / (self.clip_max - self.clip_min)
        x_adv = np.arctanh(((x_adv * 2) - 1) * self._tanh_smoother)

        # Progress bar
        progress_bar = Progbar(target=len(x_val), verbose=self.verbose)

        for j, (x, target) in enumerate(zip(x_adv, y_val)):

            # Assign the external inputs to the loss function:
            self.sess.run(self._assign_image_tanh, {self._image_tanh: x})
            self.sess.run(self._assign_target, {self._target: target})

            # Initialize perturbation in tanh space:
            self.sess.run(self._init_perturbation_tanh)

            # Initialize binary search:
            c = self.initial_const
            c_lower_bound = 0
            c_double = True

            # Initalize placeholders for best l2 distance and attack found so far
            best_l2dist = sys.float_info.max
            best_adv_image = x

            for _ in range(self.binary_search_steps):

                attack_success = False
                loss_prev = sys.float_info.max

                # Initalize the optimizer:
                self.sess.run(self._init_optimizer)

                # Assign constant c of the loss function:
                self.sess.run(self._assign_c, {self._c: np.array([c])})

                for _ in range(self.max_iterations):
                    # perform one update of the optimizer:
                    _ = self.sess.run(self._minimize_loss)

                    # collect current loss and l2 distance:
                    loss, l2dist = self.sess.run([self._loss, self._l2dist])

                    # check whether last attack was successful:
                    # attack success criterion: first term of the loss function is <= 0
                    last_attack_success = loss[0] - l2dist <= 0
                    attack_success = attack_success or last_attack_success

                    if last_attack_success and l2dist < best_l2dist:
                        best_l2dist = l2dist
                        best_adv_image = self.sess.run(self._adv_image)

                    # check simple stopping criterion:
                    if loss[0] > loss_prev:
                        break
                    loss_prev = loss[0]

                # update binary search:
                if attack_success:
                    c_double = False
                    c = (c_lower_bound + c) / 2
                else:
                    c_old = c
                    if c_double:
                        c = 2 * c
                    else:
                        c = c + (c - c_lower_bound) / 2
                    c_lower_bound = c_old

                # Abort binary search if c exceeds upper bound:
                if c > self._c_upper_bound:
                    break

            x_adv[j] = best_adv_image
            progress_bar.update(current=j,
                                values=[
                                    ("perturbation",
                                     abs(np.linalg.norm(x_adv[j] - x_val[j])))
                                ])

        return x_adv
Example #58
0
def futhark_atanh64(x):
    return np.arctanh(x)
Example #59
0
    def attack_batch(self, imgs, labs):
        """
        Run the attack on a batch of images and labels.
        """
        def compare(x, y):
            if not isinstance(x, (float, int, np.int64)):
                x = np.copy(x)
                if self.TARGETED:
                    x[y] -= self.CONFIDENCE
                else:
                    x[y] += self.CONFIDENCE
                x = np.argmax(x)
            if self.TARGETED:
                return x == y
            else:
                return x != y

        batch_size = self.batch_size

        # convert to tanh-space
        imgs = np.arctanh((imgs - self.boxplus) / self.boxmul * 0.999999)

        # set the lower and upper bounds accordingly
        lower_bound = np.zeros(batch_size)
        CONST = np.ones(batch_size) * self.initial_const
        CONST2 = np.ones(batch_size) * self.initial_const
        upper_bound = np.ones(batch_size) * 1e10

        # the best l2, score, and image attack
        o_bestl2 = [1e10] * batch_size
        o_bestscore = [-1] * batch_size
        o_bestattack = [np.zeros(imgs[0].shape)] * batch_size

        for outer_step in range(self.BINARY_SEARCH_STEPS):
            print(o_bestl2)
            # completely reset adam's internal state.
            self.sess.run(self.init)
            batch = imgs[:batch_size]
            batchlab = labs[:batch_size]

            bestl2 = [1e10] * batch_size
            bestscore = [-1] * batch_size

            # The last iteration (if we run many steps) repeat the search once.
            if self.repeat == True and outer_step == self.BINARY_SEARCH_STEPS - 1:
                CONST = upper_bound

            # set the variables so that we don't have to send them over again
            self.sess.run(
                self.setup, {
                    self.assign_timg: batch,
                    self.assign_tlab: batchlab,
                    self.assign_const: CONST,
                    self.assign_const2: CONST2 * 1000
                })

            prev = 1e6
            for iteration in range(self.MAX_ITERATIONS):

                # perform the attack
                _, l, l2s, scores, det, nimg = self.sess.run([
                    self.train, self.loss, self.l2dist, self.output,
                    self.detected, self.newimg
                ])

                # print out the losses every 10%
                if iteration % (self.MAX_ITERATIONS // 10) == 0:
                    print(
                        iteration,
                        self.sess.run(
                            (self.loss, self.loss1, self.loss2, self.loss3)))

                # check if we should abort search if we're getting nowhere.
                if self.ABORT_EARLY and iteration % (self.MAX_ITERATIONS //
                                                     10) == 0:
                    if l > prev * .9999:
                        break
                    prev = l

                #print('detection rate',det)
                # adjust the best result found so far
                if type(det) == np.float32:
                    det = [0] * batch_size
                for e, (l2, sc, ii,
                        detected) in enumerate(zip(l2s, scores, nimg, det)):
                    #print('have scores',sc)
                    if l2 < bestl2[e] and all(
                        [compare(scc, np.argmax(batchlab[e]))
                         for scc in sc]) and detected == 0:
                        bestl2[e] = l2
                        bestscore[e] = np.argmax(sc)
                    if l2 < o_bestl2[e] and all(
                        [compare(scc, np.argmax(batchlab[e]))
                         for scc in sc]) and detected == 0:
                        o_bestl2[e] = l2
                        o_bestscore[e] = np.argmax(sc)
                        o_bestattack[e] = ii

            # adjust the constant as needed
            for e in range(batch_size):
                if compare(bestscore[e], np.argmax(
                        batchlab[e])) and bestscore[e] != -1:
                    # success, divide const by two
                    upper_bound[e] = min(upper_bound[e], CONST[e])
                    if upper_bound[e] < 1e9:
                        CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
                else:
                    # failure, either multiply by 10 if no solution found yet
                    #          or do binary search with the known upper bound
                    lower_bound[e] = max(lower_bound[e], CONST[e])
                    if upper_bound[e] < 1e9:
                        CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
                    else:
                        CONST[e] *= 10

        # return the best solution found
        o_bestl2 = np.array(o_bestl2)
        return o_bestattack
Example #60
0
def gamma_approx():
    gamma_approx = 1
    for k in range(2, 20):
        gamma_approx += 1 / k + np.log(1 - 1 / k)
    gamma_approx += 38 * np.arctanh(1 / 39) - 1 + euler_maclaurin_terms(20)
    return gamma_approx