Esempio n. 1
1
def align_magnetism(m, vectors):
    """ Rotates a matrix, to align its components with the direction
  of the magnetism """
    if not len(m) == 2 * len(vectors):  # stop if they don't have
        # compatible dimensions
        raise
    # pauli matrices
    from scipy.sparse import csc_matrix, bmat

    sx = csc_matrix([[0.0, 1.0], [1.0, 0.0]])
    sy = csc_matrix([[0.0, -1j], [1j, 0.0]])
    sz = csc_matrix([[1.0, 0.0], [0.0, -1.0]])
    n = len(m) / 2  # number of sites
    R = [[None for i in range(n)] for j in range(n)]  # rotation matrix
    from scipy.linalg import expm  # exponenciate matrix

    for (i, v) in zip(range(n), vectors):  # loop over sites
        vv = np.sqrt(v.dot(v))  # norm of v
        if vv > 0.000001:  # if nonzero scale
            u = v / vv
        else:  # if zero put to zero
            u = np.array([0.0, 0.0, 0.0])
        #    rot = u[0]*sx + u[1]*sy + u[2]*sz
        uxy = np.sqrt(u[0] ** 2 + u[1] ** 2)  # component in xy plane
        phi = np.arctan2(u[1], u[0])
        theta = np.arctan2(uxy, u[2])
        r1 = phi * sz / 2.0  # rotate along z
        r2 = theta * sy / 2.0  # rotate along y
        # a factor 2 is taken out due to 1/2 of S
        rot = expm(1j * r2) * expm(1j * r1)
        R[i][i] = rot  # save term
    R = bmat(R)  # convert to full sparse matrix
    mout = R * csc_matrix(m) * R.H  # rotate matrix
    return mout.todense()  # return dense matrix
Esempio n. 2
1
def correlation_matrix_quadrature(a1, a2, rho=None):
    """
    Calculate the quadrature correlation matrix with given field operators
    :math:`a_1` and :math:`a_2`. If a density matrix is given the expectation
    values are calculated, otherwise a matrix with operators is returned.

    Parameters
    ----------

    a1 : :class:`qutip.qobj.Qobj`
        Field operator for mode 1.

    a2 : :class:`qutip.qobj.Qobj`
        Field operator for mode 2.

    rho : :class:`qutip.qobj.Qobj`
        Density matrix for which to calculate the covariance matrix.

    Returns
    -------

    corr_mat: *array* of complex numbers or :class:`qutip.qobj.Qobj`
        A 2-dimensional *array* of covariance values for the field quadratures,
        or, if rho=0, a matrix of operators.

    """
    x1 = (a1 + a1.dag()) / np.sqrt(2)
    p1 = -1j * (a1 - a1.dag()) / np.sqrt(2)
    x2 = (a2 + a2.dag()) / np.sqrt(2)
    p2 = -1j * (a2 - a2.dag()) / np.sqrt(2)

    basis = [x1, p1, x2, p2]

    return correlation_matrix(basis, rho)
Esempio n. 3
0
 def affine_grid(self,Hz,rhoz,Lam):
     """
     Get data on regular spatial grid
     """
     #First find dimensionless density params
     Om0 = 8*pi*rhoz[0]/(3*Hz[0]**2)
     OL0 = Lam/(3*Hz[0]**2)
     Ok0 = 1-Om0-OL0
     #Get t0
     t0 = self.get_age(Om0,Ok0,OL0,Hz[0])
     #Set affine parameter vals        
     dvo = uvs(self.z,1/(self.uz**2*Hz),k=3,s=0.0)
     vzo = dvo.antiderivative()
     vz = vzo(self.z)
     vz[0] = 0.0
     #Compute grid sizes that gives num error od err
     NJ = int(ceil(vz[-1]/sqrt(self.err) + 1))
     NI = int(ceil(3.0*(NJ - 1)*(t0 - self.tmin)/vz[-1] + 1))
     #Get functions on regular grid
     v = linspace(0,vz[-1],NJ)
     delv = (v[-1] - v[0])/(NJ-1)
     if delv > sqrt(self.err):
         print 'delv > sqrt(err)'
     Ho = uvs(vz,Hz,s=0.0,k=3)
     H = Ho(v)
     rhoo = uvs(vz,rhoz,s=0.0,k=3)
     rho = rhoo(v)
     uo = uvs(vz,self.uz,s=0.0,k=3)
     u = uo(v)
     u[0] = 1.0
     return v,vzo,H,rho,u,NJ,NI,delv,Om0,OL0,Ok0,t0
Esempio n. 4
0
    def get_tracedata(self, format = 'AmpPha', single=False):
        '''
        Get the data of the current trace

        Input:
            format (string) : 'AmpPha': Amp in dB and Phase, 'RealImag',

        Output:
            'AmpPha':_ Amplitude and Phase
        '''
        #data = self._visainstrument.ask_for_values(':FORMAT REAL,32;*CLS;CALC1:DATA:NSW? SDAT,1;*OPC',format=1)      
        data = self._visainstrument.ask_for_values('FORM:DATA REAL; FORM:BORD SWAPPED; CALC%i:SEL:DATA:SDAT?'%(self._ci), format = visa.double)      
        data_size = numpy.size(data)
        datareal = numpy.array(data[0:data_size:2])
        dataimag = numpy.array(data[1:data_size:2])
          
        if format.upper() == 'REALIMAG':
          if self._zerospan:
            return numpy.mean(datareal), numpy.mean(dataimag)
          else:
            return datareal, dataimag
        elif format.upper() == 'AMPPHA':
          if self._zerospan:
            datareal = numpy.mean(datareal)
            dataimag = numpy.mean(dataimag)
            dataamp = numpy.sqrt(datareal*datareal+dataimag*dataimag)
            datapha = numpy.arctan(dataimag/datareal)
            return dataamp, datapha
          else:
            dataamp = numpy.sqrt(datareal*datareal+dataimag*dataimag)
            datapha = numpy.arctan2(dataimag,datareal)
            return dataamp, datapha
        else:
          raise ValueError('get_tracedata(): Format must be AmpPha or RealImag') 
Esempio n. 5
0
 def __init__(self, class_dim, word_dim, hidden_dim, sen_len, batch_size, truncate=-1):
     # Assign instance variables
     self.class_dim = class_dim
     self.word_dim = word_dim
     self.hidden_dim = hidden_dim
     self.sen_len = sen_len
     self.batch_size = batch_size
     self.truncate = truncate
     params = {}
     # Initialize the network parameters
     params["E"] = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (word_dim, hidden_dim))          #Ebdding Matirx
     params["W"] = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (4, hidden_dim, hidden_dim * 4)) #W[0-1].dot(x), W[2-3].(i,f,o,c)
     params["B"] = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (2, hidden_dim * 4))             #B[0-1] for W[0-1]
     params["lrW"] = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (2, hidden_dim, class_dim))         #LR W and b
     params["lrb"] = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (class_dim))
     
     # Assign paramenters' names 
     self.param_names = {"orign":["E", "W", "B", "lrW", "lrb"], 
                        "cache":["mE", "mW", "mB", "mlrW", "mlrb"]}
     # Theano: Created shared variables
     self.params = {}
     # Model's shared variables
     for _n in self.param_names["orign"]:
         self.params[_n] = theano.shared(value=params[_n].astype(theano.config.floatX), name=_n)
     # Shared variables for RMSProp
     for _n in self.param_names["cache"]:
         self.params[_n] = theano.shared(value=np.zeros(params[_n[1:]].shape).astype(theano.config.floatX), name=_n)
     # Build model graph
     self.__theano_build__()
Esempio n. 6
0
    def __init__(self, x, y, in_size, out_size, prefix='lr_'):

        self.W = theano.shared(
            value=np.random.uniform(
                low=-np.sqrt(6. / (in_size + out_size)),
                high=np.sqrt(6. / (in_size + out_size)),
                size=(in_size, out_size)
            ).astype(theano.config.floatX),
            name='W',
            borrow=True
        )

        self.b = theano.shared(
            value=np.random.uniform(
                low=-np.sqrt(6. / (in_size + out_size)),
                high=np.sqrt(6. / (in_size + out_size)),
                size=(out_size,)
            ).astype(theano.config.floatX),
            name='b',
            borrow=True
        )

        self.y_given_x = T.nnet.softmax(T.dot(x, self.W) + self.b)

        self.y_d = T.argmax(self.y_given_x, axis=1)

        self.loss = -T.mean(T.log(self.y_given_x)[T.arange(y.shape[0]), y])

        self.error = T.mean(T.neq(self.y_d, y))

        self.params = {prefix+'W': self.W, prefix+'b': self.b}
Esempio n. 7
0
    def __init__(self, n_in, n_out, W_init=None, b_init=None,
                 activation=T.tanh):
        self.activation = activation
        if W_init is None:
            rng = numpy.random.RandomState(1234)
            W_values = numpy.asarray(rng.uniform(
                    low=-numpy.sqrt(6. / (n_in + n_out)),
                    high=numpy.sqrt(6. / (n_in + n_out)),
                    size=(n_in, n_out)
                ),
                dtype=theano.config.floatX
            )
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4

            W_init = theano.shared(value=W_values, name='W', borrow=True)

        if b_init is None:
            b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
            b_init = theano.shared(value=b_values, name='b', borrow=True)

        self.W = W_init
        self.b = b_init
        # parameters of the model
        self.params = [self.W, self.b]
Esempio n. 8
0
    def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
        self.pdtype = make_pdtype(ac_space)
        with tf.variable_scope("model", reuse=reuse):
            X, processed_x = observation_input(ob_space, nbatch)
            activ = tf.tanh
            processed_x = tf.layers.flatten(processed_x)
            pi_h1 = activ(fc(processed_x, 'pi_fc1', nh=64, init_scale=np.sqrt(2)))
            pi_h2 = activ(fc(pi_h1, 'pi_fc2', nh=64, init_scale=np.sqrt(2)))
            vf_h1 = activ(fc(processed_x, 'vf_fc1', nh=64, init_scale=np.sqrt(2)))
            vf_h2 = activ(fc(vf_h1, 'vf_fc2', nh=64, init_scale=np.sqrt(2)))
            vf = fc(vf_h2, 'vf', 1)[:,0]

            self.pd, self.pi = self.pdtype.pdfromlatent(pi_h2, init_scale=0.01)


        a0 = self.pd.sample()
        neglogp0 = self.pd.neglogp(a0)
        self.initial_state = None

        def step(ob, *_args, **_kwargs):
            a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
            return a, v, self.initial_state, neglogp

        def value(ob, *_args, **_kwargs):
            return sess.run(vf, {X:ob})

        self.X = X
        self.vf = vf
        self.step = step
        self.value = value
Esempio n. 9
0
def fix_poly(polygon):
    ret = np.array([ [0,0],[0,0],[0,0],[0,0] ],np.float32)
    min_ = np.sqrt(polygon[0][0][0]**2 + polygon[0][0][1]**2)
    minc = 0
    for i in range(1,4):
        if np.sqrt(polygon[i][0][0]**2 + polygon[i][0][1]**2) < min_:
            min_ = np.sqrt(polygon[i][0][0]**2 + polygon[i][0][1]**2)
            minc = i

    #found top left vertex, rotate until it's on the top left
    for i in range(minc):
        polygon = np.roll(polygon,-1,axis=0)

    #if needed, "invert" the order.
    dist1 = dist_line(polygon[0],polygon[2],polygon[1])
    dist3 = dist_line(polygon[0],polygon[2],polygon[3])
    if dist3 > dist1:
        x = polygon[3][0][0]
        y = polygon[3][0][1]
        polygon[3][0][0] = polygon[1][0][0]
        polygon[3][0][1] = polygon[1][0][1]
        polygon[1][0][0] = x
        polygon[1][0][1] = y
    ret[0] = polygon[0][0]
    ret[1] = polygon[1][0]
    ret[2] = polygon[2][0]
    ret[3] = polygon[3][0]
    return ret
Esempio n. 10
0
def discrepancy(observed, simulated, expected):
    """Calculates Freeman-Tukey statistics (Freeman and Tukey 1950) as
    a measure of discrepancy between observed and r replicates of simulated data. This
    is a convenient method for assessing goodness-of-fit (see Brooks et al. 2000).
    
    D(x|\theta) = \sum_j (\sqrt{x_j} - \sqrt{e_j})^2
    
    :Parameters:
      observed : Iterable of observed values (length n)
      simulated : Iterable of simulated values (length rxn)
      expected : Iterable of expected values (length rxn)
    
    :Returns:
      D_obs : Discrepancy of observed values
      D_sim : Discrepancy of simulated values
    
    """
    try:
        simulated = simulated.astype(float)
    except AttributeError:
        simulated = simulated.trace().astype(float)
    try:
        expected = expected.astype(float)
    except AttributeError:
        expected = expected.trace().astype(float)
    
    D_obs = np.sum([(np.sqrt(observed)-np.sqrt(e))**2 for e in expected], 1)
    D_sim = np.sum([(np.sqrt(s)-np.sqrt(e))**2 for s,e in zip(simulated, expected)], 1)
    
    # Print p-value
    count = sum(s>o for o,s in zip(D_obs,D_sim))
    print_('Bayesian p-value: p=%.3f' % (1.*count/len(D_obs)))
    
    return D_obs, D_sim
Esempio n. 11
0
 def GetCorV(self, inAmpField):
     lplcCo=inAmpField.lplcCo
     try:
         self.CorV=1./ np.sqrt(1./(self.AppV**2) - lplcCo)
     except:
         self.CorV=1./ np.sqrt(1./(self.AppV[1:-1, 1:-1]**2) - lplcCo)
     return
def get_nxnyr_cd():
    box = cfg.pms['shape']
    boxMpc = np.array([cfg.pms['xyMpc'],cfg.pms['xyMpc'],cfg.pms['zMpc']])
    lx = boxMpc[0]/2.; ly = boxMpc[1]/2.; lz = boxMpc[2]
    z,d = get_z_d(cfg.pms['zi'],cfg.pms['zf'])
    
    # front of box -- don't use bc grid will extend
    #                 outside the standard box
    # nx_max = lx / np.sqrt(lx*lx+d[0]*d[0]) # nx_min = - nx_max
    # ny_max = ly / np.sqrt(ly*ly+d[0]*d[0]) # ny_min = - ny_max
    # r_max = np.sqrt(lx*lx+ly*ly+(d[0]+lz)*(d[0]+lz)) # r_min = d[0]

    # back of box -- throws away half the box but whatever
    df = d[0]+lz
    nx_max = lx / np.sqrt(lx*lx+df*df) # nx_min = - nx_max
    ny_max = ly / np.sqrt(ly*ly+df*df) # ny_min = - ny_max
    r_max = np.sqrt(lx*lx+ly*ly+df*df) # r_min = d[0]

    print nx_max, ny_max

    nxcd = np.linspace(-nx_max,nx_max,box[0])
    nycd = np.linspace(-ny_max,ny_max,box[1])
    print 2*nx_max/box[0], 2*ny_max/box[1]
    rcd = np.linspace(d[0],r_max,box[2])
    return nxcd,nycd,rcd
Esempio n. 13
0
def logarithmic_negativity(V):
    """
    Calculate the logarithmic negativity given the symmetrized covariance
    matrix, see :func:`qutip.continous_variables.covariance_matrix`. Note that
    the two-mode field state that is described by `V` must be Gaussian for this
    function to applicable.

    Parameters
    ----------

    V : *2d array*
        The covariance matrix.

    Returns
    -------

    N: *float*, the logarithmic negativity for the two-mode Gaussian state
    that is described by the the Wigner covariance matrix V.

    """

    A = V[0:2, 0:2]
    B = V[2:4, 2:4]
    C = V[0:2, 2:4]

    sigma = np.linalg.det(A) + np.linalg.det(B) - 2 * np.linalg.det(C)
    nu_ = sigma / 2 - np.sqrt(sigma ** 2 - 4 * np.linalg.det(V)) / 2
    if nu_ < 0.0:
        return 0.0
    nu = np.sqrt(nu_)
    lognu = -np.log(2 * nu)
    logneg = max(0, lognu)

    return logneg
Esempio n. 14
0
def screen_potential(r, v, charge):
    """Split long-range potential into short-ranged contributions.

    The potential v is a long-ranted potential with the asymptotic form Z/r
    corresponding to the given charge.
    
    Return a potential vscreened and charge distribution rhocomp such that

      v(r) = vscreened(r) + vHartree[rhocomp](r).

    The returned quantities are truncated to a reasonable cutoff radius.
    """
    vr = v * r + charge
    
    err = 0.0
    i = len(vr)
    while err < 1e-5:
        # Things can be a bit sensitive to the threshold.  The O.pz-mt setup
        # gets 20-30 Bohr long compensation charges if it's 1e-6.
        i -= 1
        err = abs(vr[i])
    i += 1
    
    icut = np.searchsorted(r, r[i] * 1.1)
    rcut = r[icut]
    rshort = r[:icut]

    a = rcut / 5.0 # XXX why is this so important?
    vcomp = charge * erf(rshort / (np.sqrt(2.0) * a)) / rshort
    # XXX divide by r
    rhocomp = charge * (np.sqrt(2.0 * np.pi) * a)**(-3) * \
        np.exp(-0.5 * (rshort / a)**2)
    vscreened = v[:icut] + vcomp
    return vscreened, rhocomp
Esempio n. 15
0
def test_ogamma():
    """Tests the effects of changing the temperature of the CMB"""

    # Tested against Ned Wright's advanced cosmology calculator,
    # Sep 7 2012.  The accuracy of our comparision is limited by
    # how many digits it outputs, which limits our test to about
    # 0.2% accuracy.  The NWACC does not allow one
    # to change the number of nuetrino species, fixing that at 3.
    # Also, inspection of the NWACC code shows it uses inaccurate
    # constants at the 0.2% level (specifically, a_B),
    # so we shouldn't expect to match it that well. The integral is
    # also done rather crudely.  Therefore, we should not expect
    # the NWACC to be accurate to better than about 0.5%, which is
    # unfortunate, but reflects a problem with it rather than this code.
    # More accurate tests below using Mathematica
    z = np.array([1.0, 10.0, 500.0, 1000.0])
    cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
    assert np.allclose(cosmo.angular_diameter_distance(z).value,
                       [1651.9, 858.2, 26.855, 13.642], rtol=5e-4)
    cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
    assert np.allclose(cosmo.angular_diameter_distance(z).value,
                       [1651.8, 857.9, 26.767, 13.582], rtol=5e-4)
    cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
    assert np.allclose(cosmo.angular_diameter_distance(z).value,
                       [1651.4, 856.6, 26.489, 13.405], rtol=5e-4)

    # Next compare with doing the integral numerically in Mathematica,
    # which allows more precision in the test.  It is at least as
    # good as 0.01%, possibly better
    cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
    assert np.allclose(cosmo.angular_diameter_distance(z).value,
                       [1651.91, 858.205, 26.8586, 13.6469], rtol=1e-5)
    cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
    assert np.allclose(cosmo.angular_diameter_distance(z).value,
                       [1651.76, 857.817, 26.7688, 13.5841], rtol=1e-5)
    cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
    assert np.allclose(cosmo.angular_diameter_distance(z).value,
                       [1651.21, 856.411, 26.4845, 13.4028], rtol=1e-5)

    # Just to be really sure, we also do a version where the integral
    # is analytic, which is a Ode = 0 flat universe.  In this case
    # Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
    # Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
    Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26
    Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
    Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2
    Om0 = 1.0 - Or0
    hubdis = 299792.458 / 70.0
    cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
    targvals = 2.0 * hubdis * \
        (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
    assert np.allclose(cosmo.comoving_distance(z).value, targvals, rtol=1e-5)

    # Try Tcmb0 = 4
    Or0 *= (4.0 / 2.725) ** 4
    Om0 = 1.0 - Or0
    cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
    targvals = 2.0 * hubdis * \
        (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
    assert np.allclose(cosmo.comoving_distance(z).value, targvals, rtol=1e-5)
Esempio n. 16
0
    def reg_score_function(X, y, mean, scale, shape, skewness):
        """ GAS Skew t Regression Update term using gradient only - native Python function

        Parameters
        ----------
        X : float
            datapoint for the right hand side variable
    
        y : float
            datapoint for the time series

        mean : float
            location parameter for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Score of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        if (y-mean)>=0:
            return ((shape+1)/shape)*((y-mean)*X)/(np.power(skewness*scale,2) + (np.power(y-mean,2)/shape))
        else:
            return ((shape+1)/shape)*((y-mean)*X)/(np.power(scale,2) + (np.power(skewness*(y-mean),2)/shape))
Esempio n. 17
0
    def second_order_score(y, mean, scale, shape, skewness):
        """ GAS Skew t Update term potentially using second-order information - native Python function

        Parameters
        ----------
        y : float
            datapoint for the time series

        mean : float
            location parameter for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Adjusted score of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        if (y-mean)>=0:
            return ((shape+1)/shape)*(y-mean)/(np.power(skewness*scale,2) + (np.power(y-mean,2)/shape))
        else:
            return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(skewness*(y-mean),2)/shape))
Esempio n. 18
0
    def markov_blanket(y, mean, scale, shape, skewness):
        """ Markov blanket for each likelihood term

        Parameters
        ----------
        y : np.ndarray
            univariate time series

        mean : np.ndarray
            array of location parameters for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Markov blanket of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        return Skewt.logpdf_internal(x=y, df=shape, loc=mean, gamma=skewness, scale=scale)
def svdUpdate(U, S, V, a, b):
    """
    Update SVD of an (m x n) matrix `X = U * S * V^T` so that
    `[X + a * b^T] = U' * S' * V'^T`
    and return `U'`, `S'`, `V'`.
    
    `a` and `b` are (m, 1) and (n, 1) rank-1 matrices, so that svdUpdate can simulate 
    incremental addition of one new document and/or term to an already existing 
    decomposition.
    """
    rank = U.shape[1]
    m = U.T * a
    p = a - U * m
    Ra = numpy.sqrt(p.T * p)
    assert float(Ra) > 1e-10
    P = (1.0 / float(Ra)) * p
    n = V.T * b
    q = b - V * n
    Rb = numpy.sqrt(q.T * q)
    assert float(Rb) > 1e-10
    Q = (1.0 / float(Rb)) * q

    K = numpy.matrix(numpy.diag(list(numpy.diag(S)) + [0.0])) + numpy.bmat("m ; Ra") * numpy.bmat(" n; Rb").T
    u, s, vt = numpy.linalg.svd(K, full_matrices=False)
    tUp = numpy.matrix(u[:, :rank])
    tVp = numpy.matrix(vt.T[:, :rank])
    tSp = numpy.matrix(numpy.diag(s[:rank]))
    Up = numpy.bmat("U P") * tUp
    Vp = numpy.bmat("V Q") * tVp
    Sp = tSp
    return Up, Sp, Vp
Esempio n. 20
0
    def neg_loglikelihood(y, mean, scale, shape, skewness):
        """ Negative loglikelihood function

        Parameters
        ----------
        y : np.ndarray
            univariate time series

        mean : np.ndarray
            array of location parameters for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Negative loglikelihood of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        return -np.sum(Skewt.logpdf_internal(x=y, df=shape, loc=mean, gamma=skewness, scale=scale))
Esempio n. 21
0
def test_ParameterizedAberration():
    # verify that we can reproduce the same behavior as ZernikeAberration
    # using ParameterizedAberration
    NWAVES = 0.5
    WAVELENGTH = 1e-6
    RADIUS = 1.0

    pupil = optics.CircularAperture(radius=RADIUS)

    zern_wave = poppy_core.Wavefront(npix=NPIX, diam=DIAM, wavelength=1e-6)
    zernike_wfe = wfe.ZernikeWFE(
        coefficients=[0, 0, 2e-7, NWAVES * WAVELENGTH / (2 * np.sqrt(3)), 0, 3e-8],
        radius=RADIUS
    )
    zern_wave *= pupil
    zern_wave *= zernike_wfe

    parameterized_distortion = wfe.ParameterizedWFE(
        coefficients=[0, 0, 2e-7, NWAVES * WAVELENGTH / (2 * np.sqrt(3)), 0, 3e-8],
        basis_factory=zernike.zernike_basis,
        radius=RADIUS
    )

    pd_wave = poppy_core.Wavefront(npix=NPIX, diam=3.0, wavelength=1e-6)
    pd_wave *= pupil
    pd_wave *= parameterized_distortion

    np.testing.assert_allclose(pd_wave.phase, zern_wave.phase,
                               err_msg="ParameterizedAberration disagrees with ZernikeAberration")
Esempio n. 22
0
    def CA(self):
#        return NPortZ(self).CA
        z0 = self.z0
        A = np.mat(self.A)
        T = np.matrix([[np.sqrt(z0), -(A[0,1]+A[0,0]*z0)/np.sqrt(z0)],
                        [-1/np.sqrt(z0), -(A[1,1]+A[1,0]*z0)/np.sqrt(z0)]])
        return np.array(T * np.mat(self.CS) * T.H)
Esempio n. 23
0
    def __init__(self, rng, input, n_in, n_out, W=None, b=None,
                 activation=T.tanh):
        self.input = input[0]

        # initialize weights into this layer
        if W is None:
            W_values = np.asarray(
                rng.uniform(
                    size=(n_in, n_out),
                    low=-np.sqrt(6. / (n_in + n_out)),
                    high=np.sqrt(6. / (n_in + n_out)),
                ),
                dtype=theano.config.floatX
            )
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4

            W = theano.shared(value=W_values, name='W', borrow=True)

        # initialize bias term weights into this layer
        if b is None:
            b_values = np.zeros((n_out,), dtype=theano.config.floatX)
            b = theano.shared(value=b_values, name='b', borrow=True)

        self.W = W
        self.b = b

        lin_output = T.dot(self.input, self.W) + self.b
        self.output = (
            lin_output if activation is None
            else activation(lin_output)
        )

        self.params = [self.W, self.b]
Esempio n. 24
0
  def testStudentLogPDFMultidimensional(self):
    with self.test_session():
      batch_size = 6
      df = constant_op.constant([[1.5, 7.2]] * batch_size)
      mu = constant_op.constant([[3., -3.]] * batch_size)
      sigma = constant_op.constant([[-math.sqrt(10.), math.sqrt(15.)]] *
                                   batch_size)
      df_v = np.array([1.5, 7.2])
      mu_v = np.array([3., -3.])
      sigma_v = np.array([np.sqrt(10.), np.sqrt(15.)])
      t = np.array([[-2.5, 2.5, 4., 0., -1., 2.]], dtype=np.float32).T
      student = student_t.StudentT(df, loc=mu, scale=sigma)
      log_pdf = student.log_prob(t)
      log_pdf_values = self.evaluate(log_pdf)
      self.assertEqual(log_pdf.get_shape(), (6, 2))
      pdf = student.prob(t)
      pdf_values = self.evaluate(pdf)
      self.assertEqual(pdf.get_shape(), (6, 2))

      if not stats:
        return
      expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v)
      expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v)
      self.assertAllClose(expected_log_pdf, log_pdf_values)
      self.assertAllClose(np.log(expected_pdf), log_pdf_values)
      self.assertAllClose(expected_pdf, pdf_values)
      self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
Esempio n. 25
0
def getEff(s, cut, comp='joint', reco=True):

    eff, sig, relerr = {},{},{}
    a = np.log10(s['MC_energy'])
    Ebins = getEbins()
    Emids = getMids(Ebins)
    erangeDict = getErange()

    c0 = cut
    if comp != 'joint':
        compcut = s['comp'] == comp
        c0 = cut * compcut

    # Set radii for finding effective area
    rDict = {}
    keys = ['low', 'mid', 'high']
    for key in keys:
        rDict[key] = np.array([600, 800, 1100, 1700, 2600, 2900])
    rDict['low'][1] = 600
    Ebreaks = np.array([4, 5, 6, 7, 8, 9])
    rgrp = np.digitize(Emids, Ebreaks) - 1

    for key in keys:

        # Get efficiency and sigma
        simcut = np.array([sim in erangeDict[key] for sim in s['sim']])
        k = np.histogram(a[c0*simcut], bins=Ebins)[0]
        #k = Nfinder(a, c0*simcut)
        n = s['MC'][comp][key].astype('float')
        eff[key], sig[key], relerr[key] = np.zeros((3, len(k)))
        with np.errstate(divide='ignore', invalid='ignore'):
            eff[key] = k / n
            var = (k+1)*(k+2)/((n+2)*(n+3)) - (k+1)**2/((n+2)**2)
        sig[key] = np.sqrt(var)

        # Multiply by throw area
        r = np.array([rDict[key][i] for i in rgrp])
        eff[key] *= np.pi*(r**2)
        sig[key] *= np.pi*(r**2)

        # Deal with parts of the arrays with no information
        for i in range(len(eff[key])):
            if n[i] == 0:
                eff[key][i] = 0
                sig[key][i] = np.inf

    # Combine low, mid, and high energy datasets
    eff_tot = (np.sum([eff[key]/sig[key] for key in keys], axis=0) /
            np.sum([1/sig[key] for key in keys], axis=0))
    sig_tot = np.sqrt(1 / np.sum([1/sig[key]**2 for key in keys], axis=0))
    with np.errstate(divide='ignore'):
        relerr  = sig_tot / eff_tot

    # UGH : find better way to do this
    if reco:
        eff_tot = eff_tot[20:]
        sig_tot = sig_tot[20:]
        relerr  = relerr[20:]

    return eff_tot, sig_tot, relerr
Esempio n. 26
0
def EN_CID(y):
    """
    CID measure from Batista, G. E. A. P. A., Keogh, E. J., Tataw, O. M. & de
    Souza, V. M. A. CID: an efficient complexity-invariant distance for time
    series. Data Min Knowl. Disc. 28, 634-669 (2014).
    
    Arguments
    ---------

    y: a nitime time-series object, or numpy vector

    """

    # Make the input a row vector of numbers:
    y = makeRowVector(vectorize(y))

    # Prepare the output dictionary
    out = {}
    
     # Original definition (in Table 2 of paper cited above)
    out['CE1'] = np.sqrt(np.mean(np.power(np.diff(y),2))); # sum -> mean to deal with non-equal time-series lengths

    # Definition corresponding to the line segment example in Fig. 9 of the paper
    # cited above (using Pythagoras's theorum):
    out['CE2'] = np.mean(np.sqrt(1 + np.power(np.diff(y),2)));

    return out
Esempio n. 27
0
 def testStudentSampleMultiDimensional(self):
   with self.test_session():
     batch_size = 7
     df = constant_op.constant([[3., 7.]] * batch_size)
     mu = constant_op.constant([[3., -3.]] * batch_size)
     sigma = constant_op.constant([[math.sqrt(10.), math.sqrt(15.)]] *
                                  batch_size)
     df_v = [3., 7.]
     mu_v = [3., -3.]
     sigma_v = [np.sqrt(10.), np.sqrt(15.)]
     n = constant_op.constant(200000)
     student = student_t.StudentT(df=df, loc=mu, scale=sigma)
     samples = student.sample(n, seed=123456)
     sample_values = self.evaluate(samples)
     self.assertEqual(samples.get_shape(), (200000, batch_size, 2))
     self.assertAllClose(
         sample_values[:, 0, 0].mean(), mu_v[0], rtol=1e-2, atol=0)
     self.assertAllClose(
         sample_values[:, 0, 0].var(),
         sigma_v[0]**2 * df_v[0] / (df_v[0] - 2),
         rtol=1e-1,
         atol=0)
     self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 0])
     self.assertAllClose(
         sample_values[:, 0, 1].mean(), mu_v[1], rtol=1e-2, atol=0)
     self.assertAllClose(
         sample_values[:, 0, 1].var(),
         sigma_v[1]**2 * df_v[1] / (df_v[1] - 2),
         rtol=1e-1,
         atol=0)
     self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 1])
Esempio n. 28
0
def test_decimate():
    """Test decimation of digitizer headshapes with too many points."""
    # load headshape and convert to meters
    hsp_mm = _get_ico_surface(5)['rr'] * 100
    hsp_m = hsp_mm / 1000.

    # save headshape to a file in mm in temporary directory
    tempdir = _TempDir()
    sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
    np.savetxt(sphere_hsp_path, hsp_mm)

    # read in raw data using spherical hsp, and extract new hsp
    with warnings.catch_warnings(record=True) as w:
        raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
    assert_true(any('more than' in str(ww.message) for ww in w))
    # collect headshape from raw (should now be in m)
    hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]

    # with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
    # should be a bit over 5000 points. If not, something is wrong or
    # decimation resolution has been purposefully changed
    assert_true(len(hsp_dec) > 5000)

    # should have similar size, distance from center
    dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
    dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
    hsp_rad = np.mean(dist)
    hsp_dec_rad = np.mean(dist_dec)
    assert_almost_equal(hsp_rad, hsp_dec_rad, places=3)
Esempio n. 29
0
    def DM(self, z):
        """Transverse Comoving Distance (Mpc)

        Parameters
        ----------
        z : float
            redshift
        
        Returns
        -------
        y : float
            The transverse comoving distance in Mpc, given by Hogg eqn 16
            
        Examples
        --------
        >>> cosmo = Cosmology()
        >>> cosmo.DM(1.0)
        3303.8288058874678
        """
        # Compute the transverse comoving distance in Mpc (Eqn 16)
        if self.OmegaK > 0.0:
            return self.DH / np.sqrt(self.OmegaK) * \
                    np.sinh(np.sqrt(self.OmegaK)*self.DC(z)/self.DH)
        elif self.OmegaK == 0.0:
            return self.DC(z)
        elif self.OmegaK < 0.0:
            return self.DH / np.sqrt(np.abs(self.OmegaK)) * \
                    np.sin(np.sqrt(np.abs(self.OmegaK))*self.DC(z)/self.DH)
def delta(phase,inc, ecc = 0, omega=0):
    """
    Compute the distance center-to-center between planet and host star.
    ___

    INPUT:

    phase: orbital phase in radian
    inc: inclination of the system in radian

    OPTIONAL INPUT:

    ecc:
    omega:

    //
    OUTPUT:

    distance center-to-center, double-float number.
    ___


    """
    phase = 2*np.pi*phase
    if ecc == 0 and omega == 0:
        delta = np.sqrt(1-(np.cos(phase)**2)*(np.sin(inc)**2))
    else:
        delta = (1.-ecc**2.)/(1.-ecc*np.sin(phase-omega))* np.sqrt((1.-(np.cos(phase))**2.*(np.sin(inc))**2))

    return delta
# test features
feats = ad.csv_feats_dict
ft = feats['colors'] + feats['other'][0:2] + feats['mags']  # features to use

# filter for large,small classes, high class_probability
nLarge, nSmall, cprob = 10000, 50, 0.99
d = ad.filter_dfcsv(dfcsv, feats=ft, nLarge=nLarge, nSmall=nSmall,
                    cprob=cprob)  # filter dfcsv
lclasses = d.loc[d.numinType > nLarge, 'newType'].unique()
sclasses = d.loc[d.numinType < nSmall, 'newType'].unique()

# dataframe for results
idxs = pd.MultiIndex.from_product(
    [list(sclasses), list(lclasses)], names=['small_class', 'large_class'])
simdf = pd.DataFrame(data=None, index=idxs, columns=['dist', 'dist_std'])

# calc distance between classes
#   using Euclidean distance between feature means
davg = d[ft + ['newType']].groupby('newType').mean()
dstd = d[ft + ['newType']].groupby('newType').std()
for sc in sclasses:
    for lc in lclasses:
        dist, dist_std = 0, 0
        for f in ft:
            dist = dist + (davg.loc[sc, f] - davg.loc[lc, f])**2
            dist_std = dist_std + (davg.loc[sc, f] / dstd.loc[sc, f] -
                                   davg.loc[lc, f] / dstd.loc[lc, f])**2

        simdf.loc[(sc, lc), 'dist'] = np.sqrt(dist)
        simdf.loc[(sc, lc), 'dist_std'] = np.sqrt(dist_std)
def plotResults(data):
    means = np.mean(data, axis=0)
    std_errors = np.std(data, axis=0) / np.sqrt(len(data))
    plt.errorbar(range(len(means)), means, yerr=std_errors)
    plt.ylim(0, 100)
Esempio n. 33
0
def constrained_oasisAR2(y,
                         g,
                         sn,
                         optimize_b=True,
                         b_nonneg=True,
                         optimize_g=0,
                         decimate=5,
                         shift=100,
                         window=None,
                         tol=1e-9,
                         max_iter=1,
                         penalty=1):
    """ Infer the most likely discretized spike train underlying an AR(2)
    fluorescence trace. Solves the noise constrained sparse non-negative
    deconvolution problem min (s)_1 subject to (c-y)^2 = sn^2 T and
    s_t = c_t-g1 c_{t-1}-g2 c_{t-2} >= 0
    Args:
        y : array of float
            One dimensional array containing the fluorescence intensities
            (with baseline already subtracted) with one entry per time-bin.

        g : (float, float)
            Parameters of the AR(2) process that models the fluorescence
            impulse response.

        sn : float
            Standard deviation of the noise distribution.

        optimize_b : bool, optional, default True
            Optimize baseline if True else it is set to 0, see y.

        b_nonneg: bool, optional, default True
            Enforce strictly non-negative baseline if True.

        optimize_g : int, optional, default 0
            Number of large, isolated events to consider for optimizing g.
            No optimization if optimize_g=0.

        decimate : int, optional, default 5
            Decimation factor for estimating hyper-parameters faster on
            decimated data.

        shift : int, optional, default 100
            Number of frames by which to shift window from on run of NNLS to
            the next.

        window : int, optional, default None (200 or larger dependend on g)
            Window size.

        tol : float, optional, default 1e-9
            Tolerance parameter.

        max_iter : int, optional, default 1
            Maximal number of iterations.

        penalty : int, optional, default 1
            Sparsity penalty. 1: min (s)_1  0: min (s)_0
    Returns:
        c : array of float
            The inferred denoised fluorescence signal at each time-bin.

        s : array of float
            Discretized deconvolved neural activity (spikes).

        b : float
            Fluorescence baseline value.

        (g1, g2) : tuple of float
            Parameters of the AR(2) process that models the fluorescence
            impulse response.

        lam : float
            Sparsity penalty parameter lambda of dual problem.
    References:
        Friedrich J and Paninski L, NIPS 2016
        Friedrich J, Zhou P, and Paninski L, arXiv 2016
    """

    T = len(y)
    d = (g[0] + np.sqrt(g[0] * g[0] + 4 * g[1])) / 2
    r = (g[0] - np.sqrt(g[0] * g[0] + 4 * g[1])) / 2
    if window is None:
        window = int(min(T, max(200, -5 / np.log(d))))

    if not optimize_g:
        g11 = (np.exp(np.log(d) * np.arange(1, T + 1)) * np.arange(1, T + 1)) if d == r else \
            (np.exp(np.log(d) * np.arange(1, T + 1)) -
             np.exp(np.log(r) * np.arange(1, T + 1))) / (d - r)
        g12 = np.append(0, g[1] * g11[:-1])
        g11g11 = np.cumsum(g11 * g11)
        g11g12 = np.cumsum(g11 * g12)
        Sg11 = np.cumsum(g11)
        f_lam = 1 - g[0] - g[1]
    elif decimate == 0:  # need to run AR1 anyways for estimating AR coeffs
        decimate = 1
    thresh = sn * sn * T

    # get initial estimate of b and lam on downsampled data using AR1 model
    if decimate > 0:
        _, s, b, aa, lam = constrained_oasisAR1(
            y[:len(y) // decimate * decimate].reshape(-1, decimate).mean(1),
            d**decimate,
            sn / np.sqrt(decimate),
            optimize_b=optimize_b,
            b_nonneg=b_nonneg,
            optimize_g=optimize_g)
        if optimize_g:
            from scipy.optimize import minimize
            d = aa**(1. / decimate)
            if decimate > 1:
                s = oasisAR1(y - b, d, lam=lam * (1 - aa) / (1 - d))[1]
            r = estimate_time_constant(s, 1, fudge_factor=.98)[0]
            g[0] = d + r
            g[1] = -d * r
            g11 = (np.exp(np.log(d) * np.arange(1, T + 1)) -
                   np.exp(np.log(r) * np.arange(1, T + 1))) / (d - r)
            g12 = np.append(0, g[1] * g11[:-1])
            g11g11 = np.cumsum(g11 * g11)
            g11g12 = np.cumsum(g11 * g12)
            Sg11 = np.cumsum(g11)
            f_lam = 1 - g[0] - g[1]
        elif decimate > 1:
            s = oasisAR1(y - b, d, lam=lam * (1 - aa) / (1 - d))[1]
        lam *= (1 - d**decimate) / f_lam

        # this window size seems necessary and sufficient
        possible_spikes = [
            x + np.arange(-2, 3) for x in np.where(s > s.max() / 10.)[0]
        ]
        ff = np.array(possible_spikes, dtype=np.int).ravel()
        ff = np.unique(ff[(ff >= 0) * (ff < T)])
        mask = np.zeros(T, dtype=bool)
        mask[ff] = True
    else:
        b = np.percentile(y, 15) if optimize_b else 0
        lam = 2 * sn * np.linalg.norm(g11)
        mask = None
    if b_nonneg:
        b = max(b, 0)

    # run ONNLS
    c, s = onnls(y - b,
                 g,
                 lam=lam,
                 mask=mask,
                 shift=shift,
                 window=window,
                 tol=tol)

    if not optimize_b:  # don't optimize b, just the dual variable lambda
        for _ in range(max_iter - 1):
            res = y - c
            RSS = res.dot(res)
            if np.abs(RSS - thresh) < 1e-4 * thresh:
                break

            # calc shift dlam, here attributed to sparsity penalty
            tmp = np.empty(T)
            ls = np.append(np.where(s > 1e-6)[0], T)
            l = ls[0]
            tmp[:l] = (1 + d) / (1 + d**l) * \
                np.exp(np.log(d) * np.arange(l))  # first pool
            for i, f in enumerate(ls[:-1]):  # all other pools
                l = ls[i + 1] - f - 1

                # if and elif correct last 2 time points for |s|_1 instead |c|_1
                if i == len(ls) - 2:  # last pool
                    tmp[f] = (1. / f_lam if l == 0 else
                              (Sg11[l] + g[1] / f_lam * g11[l - 1] +
                               (g[0] + g[1]) / f_lam * g11[l] -
                               g11g12[l] * tmp[f - 1]) / g11g11[l])
                # secondlast pool if last one has length 1
                elif i == len(ls) - 3 and ls[-2] == T - 1:
                    tmp[f] = (Sg11[l] + g[1] / f_lam * g11[l] -
                              g11g12[l] * tmp[f - 1]) / g11g11[l]
                else:  # all other pools
                    tmp[f] = (Sg11[l] - g11g12[l] * tmp[f - 1]) / g11g11[l]
                l += 1
                tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]

            aa = tmp.dot(tmp)
            bb = res.dot(tmp)
            cc = RSS - thresh
            try:
                db = (-bb + np.sqrt(bb * bb - aa * cc)) / aa
            except:
                db = -bb / aa

            # perform shift
            b += db
            c, s = onnls(y - b,
                         g,
                         lam=lam,
                         mask=mask,
                         shift=shift,
                         window=window,
                         tol=tol)
            db = np.mean(y - c) - b
            b += db
            lam -= db / f_lam

    else:  # optimize b
        db = max(np.mean(y - c), 0 if b_nonneg else -np.inf) - b
        b += db
        lam -= db / (1 - g[0] - g[1])
        g_converged = False
        for _ in range(max_iter - 1):
            res = y - c - b
            RSS = res.dot(res)
            if np.abs(RSS - thresh) < 1e-4 * thresh:
                break
            # calc shift db, here attributed to baseline
            tmp = np.empty(T)
            ls = np.append(np.where(s > 1e-6)[0], T)
            l = ls[0]
            tmp[:l] = (1 + d) / (1 + d**l) * \
                np.exp(np.log(d) * np.arange(l))  # first pool
            for i, f in enumerate(ls[:-1]):  # all other pools
                l = ls[i + 1] - f
                tmp[f] = (Sg11[l - 1] -
                          g11g12[l - 1] * tmp[f - 1]) / g11g11[l - 1]
                tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]
            tmp -= tmp.mean()
            aa = tmp.dot(tmp)
            bb = res.dot(tmp)
            cc = RSS - thresh
            try:
                db = (-bb + np.sqrt(bb * bb - aa * cc)) / aa
            except:
                db = -bb / aa

            # perform shift
            if b_nonneg:
                db = np.max(db, -b)
            b += db
            c, s = onnls(y - b,
                         g,
                         lam=lam,
                         mask=mask,
                         shift=shift,
                         window=window,
                         tol=tol)

            # update b and lam
            db = np.max(np.mean(y - c), 0 if b_nonneg else -np.inf) - b
            b += db
            lam -= db / f_lam

            # update g and b
            if optimize_g and (not g_converged):

                def getRSS(y, opt):
                    b, ld, lr = opt
                    if ld < lr:
                        return 1e3 * thresh
                    d, r = np.exp(ld), np.exp(lr)
                    g1, g2 = d + r, -d * r
                    tmp = b + onnls(
                        y - b, [g1, g2], lam, mask=(s > 1e-2 * s.max()))[0] - y
                    return tmp.dot(tmp)

                result = minimize(lambda x: getRSS(y, x),
                                  (b, np.log(d), np.log(r)),
                                  bounds=((0 if b_nonneg else None, None),
                                          (None, -1e-4), (None, -1e-3)),
                                  method='L-BFGS-B',
                                  options={
                                      'gtol': 1e-04,
                                      'maxiter': 10,
                                      'ftol': 1e-05
                                  })
                if abs(result['x'][1] - np.log(d)) < 1e-3:
                    g_converged = True
                b, ld, lr = result['x']
                d, r = np.exp(ld), np.exp(lr)
                g = (d + r, -d * r)
                c, s = onnls(y - b,
                             g,
                             lam=lam,
                             mask=mask,
                             shift=shift,
                             window=window,
                             tol=tol)

                # update b and lam
                db = np.max(np.mean(y - c), 0 if b_nonneg else -np.inf) - b
                b += db
                lam -= db

    if penalty == 0:  # get (locally optimal) L0 solution

        def c4smin(y, s, s_min):
            ls = np.append(np.where(s > s_min)[0], T)
            tmp = np.zeros_like(s)
            l = ls[0]  # first pool
            tmp[:l] = np.max(
                0,
                np.exp(np.log(d) * np.arange(l)).dot(y[:l]) * (1 - d * d) /
                (1 - d**(2 * l))) * np.exp(np.log(d) * np.arange(l))
            for i, f in enumerate(ls[:-1]):  # all other pools
                l = ls[i + 1] - f
                tmp[f] = (g11[:l].dot(y[f:f + l]) -
                          g11g12[l - 1] * tmp[f - 1]) / g11g11[l - 1]
                tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]
            return tmp

        spikesizes = np.sort(s[s > 1e-6])
        i = len(spikesizes) // 2
        l = 0
        u = len(spikesizes) - 1
        while u - l > 1:
            s_min = spikesizes[i]
            tmp = c4smin(y - b, s, s_min)
            res = y - b - tmp
            RSS = res.dot(res)
            if RSS < thresh or i == 0:
                l = i
                i = (l + u) // 2
                res0 = tmp
            else:
                u = i
                i = (l + u) // 2
        if i > 0:
            c = res0
            s = np.append([0, 0], c[2:] - g[0] * c[1:-1] - g[1] * c[:-2])

    return c, s, b, g, lam
Esempio n. 34
0
def onnls(y,
          g,
          lam=0,
          shift=100,
          window=None,
          mask=None,
          tol=1e-9,
          max_iter=None):
    """ Infer the most likely discretized spike train underlying an AR(2)
    fluorescence trace. Solves the sparse non-negative deconvolution problem
    ``argmin_s 1/2|Ks-y|^2 + lam |s|_1`` for ``s>=0``
    Args:
        y : array of float, shape (T,)
            One dimensional array containing the fluorescence intensities with
            one entry per time-bin.
        g : array, shape (p,)
            if p in (1,2):
                Parameter(s) of the AR(p) process that models the fluorescence
                impulse response.
            else:
                Kernel that models the fluorescence impulse response.
        lam : float, optional, default 0
            Sparsity penalty parameter lambda.

        shift : int, optional, default 100
            Number of frames by which to shift window from on run of NNLS
            to the next.

        window : int, optional, default None (200 or larger dependend on g)
            Window size.

        mask : array of bool, shape (n,), optional, default (True,)*n
            Mask to restrict potential spike times considered.

        tol : float, optional, default 1e-9
            Tolerance parameter.

        max_iter : None or int, optional, default None
            Maximum number of iterations before termination.
            If None (default), it is set to window size.
    Returns:
        c : array of float, shape (T,)
            The inferred denoised fluorescence signal at each time-bin.

        s : array of float, shape (T,)
            Discretized deconvolved neural activity (spikes).
    References:
        Friedrich J and Paninski L, NIPS 2016
        Bro R and DeJong S, J Chemometrics 1997
    """

    T = len(y)
    if mask is None:
        mask = np.ones(T, dtype=bool)
    if window is None:
        w = max(
            200,
            len(g) if len(g) > 2 else int(
                -5 / np.log(g[0] if len(g) == 1 else
                            (g[0] + np.sqrt(g[0] * g[0] + 4 * g[1])) / 2)))
    else:
        w = window
    w = min(T, w)
    K = np.zeros((w, w))

    if len(g) == 1:  # kernel for AR(1)
        _y = y - lam * (1 - g[0])
        _y[-1] = y[-1] - lam
        h = np.exp(np.log(g[0]) * np.arange(w))
        for i in range(w):
            K[i:, i] = h[:w - i]

    elif len(g) == 2:  # kernel for AR(2)
        _y = y - lam * (1 - g[0] - g[1])
        _y[-2] = y[-2] - lam * (1 - g[0])
        _y[-1] = y[-1] - lam
        d = (g[0] + np.sqrt(g[0] * g[0] + 4 * g[1])) / 2
        r = (g[0] - np.sqrt(g[0] * g[0] + 4 * g[1])) / 2
        if d == r:
            h = np.exp(np.log(d) * np.arange(1, w + 1)) * np.arange(1, w + 1)
        else:
            h = (np.exp(np.log(d) * np.arange(1, w + 1)) -
                 np.exp(np.log(r) * np.arange(1, w + 1))) / (d - r)
        for i in range(w):
            K[i:, i] = h[:w - i]

    else:  # arbitrary kernel
        h = g
        for i in range(w):
            K[i:, i] = h[:w - i]
        a = np.linalg.inv(K).sum(0)
        _y = y - lam * a[0]
        _y[-w:] = y[-w:] - lam * a

    s = np.zeros(T)
    KK = K.T.dot(K)
    for i in range(0, max(1, T - w), shift):
        s[i:i + w] = _nnls(KK,
                           K.T.dot(_y[i:i + w]),
                           s[i:i + w],
                           mask=mask[i:i + w],
                           tol=tol,
                           max_iter=max_iter)[:w]

        # subtract contribution of spikes already committed to
        _y[i:i + w] -= K[:, :shift].dot(s[i:i + shift])
    s[i + shift:] = _nnls(KK[-(T - i - shift):, -(T - i - shift):],
                          K[:T - i - shift, :T - i - shift].T.dot(_y[i +
                                                                     shift:]),
                          s[i + shift:],
                          mask=mask[i + shift:])
    c = np.zeros_like(s)
    for t in np.where(s > tol)[0]:
        c[t:t + w] += s[t] * h[:min(w, T - t)]
    return c, s
Esempio n. 35
0
def constrained_foopsi(fluor,
                       bl=None,
                       c1=None,
                       g=None,
                       sn=None,
                       p=1,
                       method_deconvolution='oasis',
                       bas_nonneg=True,
                       noise_range=[.25, .5],
                       noise_method='logmexp',
                       lags=5,
                       fudge_factor=1.,
                       verbosity=False,
                       solvers=None,
                       optimize_g=0,
                       s_min=None,
                       **kwargs):
    """
    Infer the most likely discretized spike train underlying a fluorescence
    trace. It relies on a noise constrained deconvolution approach.
    Args:
        fluor: np.ndarray
            One dimensional array containing the fluorescence intensities with
            one entry per time-bin.
        bl: [optional] float
            Fluorescence baseline value. If no value is given, then bl is
            estimated from the data.
        c1: [optional] float
            value of calcium at time 0
        g: [optional] list,float
            Parameters of the AR process that models the fluorescence impulse
            response. Estimated from the data if no value is given
        sn: float, optional
            Standard deviation of the noise distribution.  If no value is
            given, then sn is estimated from the data.
        p: int
            order of the autoregression model
        method_deconvolution: [optional] string
            solution method for basis projection pursuit 'cvx' or 'cvxpy' or
            'oasis'
        bas_nonneg: bool
            baseline strictly non-negative
        noise_range:  list of two elms
            frequency range for averaging noise PSD
        noise_method: string
            method of averaging noise PSD
        lags: int
            number of lags for estimating time constants
        fudge_factor: float
            fudge factor for reducing time constant bias
        verbosity: bool
             display optimization details
        solvers: list string
            primary and secondary (if problem unfeasible for approx solution)
            solvers to be used with cvxpy, default is ['ECOS','SCS']
        optimize_g : [optional] int, only applies to method 'oasis'
            Number of large, isolated events to consider for optimizing g.
            If optimize_g=0 (default) the provided or estimated g is not
            further optimized.
        s_min : float, optional, only applies to method 'oasis'
            Minimal non-zero activity within each bin (minimal 'spike size').
            For negative values the threshold is abs(s_min) * sn * sqrt(1-g)
            If None (default) the standard L1 penalty is used
            If 0 the threshold is determined automatically such that
            RSS <= sn^2 T
    Returns:
        c: np.ndarray float
            The inferred denoised fluorescence signal at each time-bin.
        bl, c1, g, sn : As explained above
        sp: ndarray of float
            Discretized deconvolved neural activity (spikes)
        lam: float
            Regularization parameter
    Raises:
        Exception("You must specify the value of p")
        Exception('OASIS is currently only implemented for p=1 and p=2')
        Exception('Undefined Deconvolution Method')
    References:
        * Pnevmatikakis et al. 2016. Neuron, in press,
        http://dx.doi.org/10.1016/j.neuron.2015.11.037
        * Machado et al. 2015. Cell 162(2):338-350
    \image: docs/img/deconvolution.png
    \image: docs/img/evaluationcomponent.png
    """

    if g is None or sn is None:
        # Estimate noise standard deviation and AR coefficients,
        # if they are not present
        g, sn = estimate_parameters(fluor,
                                    p=p,
                                    sn=sn,
                                    g=g,
                                    range_ff=noise_range,
                                    method=noise_method,
                                    lags=lags,
                                    fudge_factor=fudge_factor)
    lam = None

    if method_deconvolution == 'cvx':
        c, bl, c1, g, sn, sp = cvxopt_foopsi(fluor,
                                             b=bl,
                                             c1=c1,
                                             g=g,
                                             sn=sn,
                                             p=p,
                                             bas_nonneg=bas_nonneg,
                                             verbosity=verbosity)

    elif method_deconvolution == 'cvxpy':
        c, bl, c1, g, sn, sp = cvxpy_foopsi(fluor,
                                            g,
                                            sn,
                                            b=bl,
                                            c1=c1,
                                            bas_nonneg=bas_nonneg,
                                            solvers=solvers)

    elif method_deconvolution == 'oasis':
        from cnmf_oasis import constrained_oasisAR1
        penalty = 1 if s_min is None else 0
        if p == 1:
            if bl is None:
                # Infer the most likely discretized spike train underlying
                # an AR(1) fluorescence trace.
                # Solves the noise constrained sparse
                # non-negative deconvolution problem. min |s|_1 subject to:
                # |c-y|^2 = sn^2 T and s_t = c_t-g c_{t-1} >= 0
                c, sp, bl, g, lam = constrained_oasisAR1(
                    fluor.astype(np.float32),
                    g[0],
                    sn,
                    optimize_b=True,
                    b_nonneg=bas_nonneg,
                    optimize_g=optimize_g,
                    penalty=penalty,
                    s_min=0 if s_min is None else s_min)
            else:
                c, sp, _, g, lam = constrained_oasisAR1(
                    (fluor - bl).astype(np.float32),
                    g[0],
                    sn,
                    optimize_b=False,
                    penalty=penalty)

            c1 = c[0]

            # remove intial calcium to align with the other foopsi methods
            # it is added back in function constrained_foopsi_parallel of
            # temporal.py
            c -= c1 * g**np.arange(len(fluor))
        elif p == 2:
            from cnmf_oasis import constrained_oasisAR1, constrained_oasisAR2
            if bl is None:
                c, sp, bl, g, lam = constrained_oasisAR2(fluor.astype(
                    np.float32),
                                                         g,
                                                         sn,
                                                         optimize_b=True,
                                                         b_nonneg=bas_nonneg,
                                                         optimize_g=optimize_g,
                                                         penalty=penalty)
            else:
                c, sp, _, g, lam = constrained_oasisAR2(
                    (fluor - bl).astype(np.float32),
                    g,
                    sn,
                    optimize_b=False,
                    penalty=penalty)
            c1 = c[0]
            d = (g[0] + np.sqrt(g[0] * g[0] + 4 * g[1])) / 2
            c -= c1 * d**np.arange(len(fluor))
        else:
            raise Exception(
                'OASIS is currently only implemented for p=1 and p=2')
        g = np.ravel(g)

    return c, bl, c1, g, sn, sp, lam
Esempio n. 36
0
def cvxpy_foopsi(fluor, g, sn, b=None, c1=None, bas_nonneg=True, solvers=None):
    """
    Solves the deconvolution problem using the cvxpy package and the
    ECOS/SCS library.
    Args:
        fluor: ndarray
            fluorescence trace
        g: list of doubles
            parameters of the autoregressive model, cardinality equivalent to p
        sn: double
            estimated noise level
        b: double
            baseline level. If None it is estimated.
        c1: double
            initial value of calcium. If None it is estimated.
        bas_nonneg: boolean
            should the baseline be estimated
        solvers: tuple of two strings
            primary and secondary solvers to be used. Can be choosen between
            ECOS, SCS, CVXOPT
    Returns:
        c: estimated calcium trace
        b: estimated baseline
        c1: esimtated initial calcium value
        g: esitmated parameters of the autoregressive model
        sn: estimated noise level
        sp: estimated spikes
    Raises:
        ImportError 'cvxpy solver requires installation of cvxpy. Not working
        in windows at the moment.'
        ValueError 'Problem solved suboptimally or unfeasible'
    """
    # todo: check the result and gen_vector vars
    try:
        import cvxpy as cvx

    except ImportError:  # XXX Is the below still true?
        raise ImportError('''cvxpy solver requires installation of cvxpy.
            Not working in windows at the moment.''')

    if solvers is None:
        solvers = ['ECOS', 'SCS']

    T = fluor.size

    # construct deconvolution matrix  (sp = G*c)
    G = scipy.sparse.dia_matrix((np.ones((1, T)), [0]), (T, T))

    for i, gi in enumerate(g):
        G = G + \
            scipy.sparse.dia_matrix((-gi * np.ones((1, T)), [-1 - i]), (T, T))

    gr = np.roots(np.concatenate([np.array([1]), -g.flatten()]))
    gd_vec = np.max(gr)**np.arange(T)  # decay vector for initial fluorescence
    gen_vec = G.dot(scipy.sparse.coo_matrix(np.ones((T, 1))))

    c = cvx.Variable(T)  # calcium at each time step
    constraints = []
    cnt = 0
    if b is None:
        flag_b = True
        cnt += 1
        b = cvx.Variable(1)  # baseline value
        if bas_nonneg:
            b_lb = 0
        else:
            b_lb = np.min(fluor)
        constraints.append(b >= b_lb)
    else:
        flag_b = False

    if c1 is None:
        flag_c1 = True
        cnt += 1
        c1 = cvx.Variable(1)  # baseline value
        constraints.append(c1 >= 0)
    else:
        flag_c1 = False

    thrNoise = sn * np.sqrt(fluor.size)

    try:
        # minimize number of spikes
        objective = cvx.Minimize(cvx.norm(G * c, 1))
        constraints.append(G * c >= 0)
        constraints.append(
            cvx.norm(-c + fluor - b - gd_vec * c1, 2) <= thrNoise)
        prob = cvx.Problem(objective, constraints)
        result = prob.solve(solver=solvers[0])

        if not (prob.status == 'optimal'
                or prob.status == 'optimal_inaccurate'):
            raise ValueError('Problem solved suboptimally or unfeasible')

        print(('PROBLEM STATUS:' + prob.status))
        sys.stdout.flush()
    except (ValueError, cvx.SolverError):
        # if solvers fail to solve the problem
        lam = old_div(sn, 500)
        constraints = constraints[:-1]
        objective = cvx.Minimize(
            cvx.norm(-c + fluor - b - gd_vec * c1, 2) +
            lam * cvx.norm(G * c, 1))
        prob = cvx.Problem(objective, constraints)

        try:  # in case scs was not installed properly
            try:
                print('TRYING AGAIN ECOS')
                sys.stdout.flush()
                result = prob.solve(solver=solvers[0])
            except:
                print((solvers[0] + ' DID NOT WORK TRYING ' + solvers[1]))
                result = prob.solve(solver=solvers[1])
        except:
            sys.stderr.write(
                '''***** SCS solver failed, try installing and compiling SCS
                for much faster performance. Otherwise set the solvers in
                tempora_params to ["ECOS","CVXOPT"]''')
            sys.stderr.flush()
            raise

        if not (prob.status == 'optimal'
                or prob.status == 'optimal_inaccurate'):
            print(('PROBLEM STATUS:' + prob.status))
            sp = fluor
            c = fluor
            b = 0
            c1 = 0
            return c, b, c1, g, sn, sp

    sp = np.squeeze(np.asarray(G * c.value))
    c = np.squeeze(np.asarray(c.value))
    if flag_b:
        b = np.squeeze(b.value)
    if flag_c1:
        c1 = np.squeeze(c1.value)

    return c, b, c1, g, sn, sp
 # Model 2 (a_0 + a_1 K + a_2 K^2 + a_3 T + a_5 K*T)
 X = np.column_stack((data['StrikePrice'], data['StrikePrice']**2, data['t_delta'], data['StrikePrice']*data['t_delta']))
 X = sm.add_constant(X)
 y = data['ImpliedVola']
 
 model = sm.OLS(y, X)
 results = model.fit()
     
 DVF_Model2_result['a_0'][i] = results.params[0]
 DVF_Model2_result['a_1'][i] = results.params[1]
 DVF_Model2_result['a_2'][i] = results.params[2]
 DVF_Model2_result['a_3'][i] = results.params[3]
 DVF_Model2_result['a_5'][i] = results.params[4]
     
 # Create RMSE
 DVF_Model2_result['RMSE_S'][i] = np.sqrt(np.mean((data['TaeglicherAbrechnungspreis'] - vBSMOption(data['OptionType'], data['SchlusspreisBasiswert'], data['t_delta'], data['StrikePrice'], data['EONIA'], vDVF_Model2_sigma(data['t_delta'], data['StrikePrice'], DVF_Model2_result['a_0'][i], DVF_Model2_result['a_1'][i], DVF_Model2_result['a_2'][i], DVF_Model2_result['a_3'][i], DVF_Model2_result['a_5'][i]), Greek='Price'))**2)) / data['SchlusspreisBasiswert'][0]
 
 if i > 0:
         DVF_Model2_result['RMSE_S_Previous_Vs_Current'][i] = np.sqrt(np.mean((data['TaeglicherAbrechnungspreis'] - vBSMOption(data['OptionType'], data['SchlusspreisBasiswert'], data['t_delta'], data['StrikePrice'], data['EONIA'], vDVF_Model2_sigma(data['t_delta'], data['StrikePrice'], DVF_Model2_result['a_0'][i-1], DVF_Model2_result['a_1'][i-1], DVF_Model2_result['a_2'][i-1], DVF_Model2_result['a_3'][i-1], DVF_Model2_result['a_5'][i-1]), Greek='Price'))**2)) / data['SchlusspreisBasiswert'][0]
 
 # Model 3 (a_0 + a_1 K + a_2 K^2 + a_3 T + a_4 T^2 + a_5 K*T)
 X = np.column_stack((data['StrikePrice'], data['StrikePrice']**2, data['t_delta'], data['t_delta']**2, data['StrikePrice']*data['t_delta']))
 X = sm.add_constant(X)
 y = data['ImpliedVola']
 
 model = sm.OLS(y, X)
 results = model.fit()
     
 DVF_Model3_result['a_0'][i] = results.params[0]
 DVF_Model3_result['a_1'][i] = results.params[1]
 DVF_Model3_result['a_2'][i] = results.params[2]
Esempio n. 38
0
File: plot2.py Progetto: shreq/sis
from common import load, load_args
import numpy
import os
import matplotlib.pyplot as plt

input_list = []
target_list = []
for i in range(101):
    input_list.append(i)
    target_list.append(numpy.sqrt(i))

directory = os.fsencode('./output/saves/')
if not os.path.exists('./output/charts'):
    os.makedirs('./output/charts')

output = []
temp = []
for file in os.listdir(directory):
    filename = os.fsdecode(file)
    filename_split = filename.split('_')
    if filename_split[0] == '0.01' and filename_split[5] == '50.ser':
        network = load('./output/saves/' + filename)
        output.clear()
        for number in input_list:
            temp.clear()
            temp.append(number)
            output.append(network.query(temp)[0][0])
        plt.figure(0)
        plt.suptitle('Aproksymacja funkcji')
        plt.title('lr=' + filename_split[1] + ', momentum=' +
                  filename_split[2] + ', neurony ukryte=' + filename_split[3] +
Esempio n. 39
0
            plt.xlabel(r'$\mu$' "(" r'$\phi$' ")")

        plt.ylabel('Height (km)')

    if n_part == 1:

        plt.subplot(1, 4, 2)

        sigma = np.zeros((results.shape[0], 1))

        M0 = np.asarray(moments[:, 0, 0], dtype=float).reshape((-1, 1))
        M1 = np.asarray(moments[:, 1, 0], dtype=float).reshape((-1, 1))
        M2 = np.asarray(moments[:, 2, 0], dtype=float).reshape((-1, 1))
        M3 = np.asarray(moments[:, 3, 0], dtype=float).reshape((-1, 1))

        sigma[:, 0] = np.sqrt(M2[:, 0] / M0[:, 0] - (M1[:, 0] / M0[:, 0])**2)

        plt.plot(sigma, z)

        plt.xlabel(r'$\sigma$' "(" r'$\phi$' ")")
        plt.ylabel('Height (km)')

        plt.subplot(1, 4, 3)

        skew = np.zeros((results.shape[0], 1))

        skew[:, 0] = M3[:, 0] - 3 * M1[:, 0] * M2[:, 0] + 2 * M1[:, 0]**3

        plt.plot(skew, z)

        plt.xlabel('Skew (\phi)' "(" r'$\phi$' ")")
Esempio n. 40
0
def GP_forecast(
    df: pd.DataFrame,
    days_in_past: int = 2,
    days_in_future: int = 1,
    detectors: list = None,
    kern=None,
) -> pd.DataFrame:

    """Forecast using Gaussian Processes 
    Args: 
        df: Dataframe of JamCam data
        days_in_past: Integer number of previous days to use for forecast
        days_in_future: Days in future produce a for forecast for
        detectors: List of detectors to look at


    Returns:
        Dataframe forecast in same format as JamCam input dataframe

        """

    # extract numpy array of detector ID's
    if detectors is None:
        detectors = df["detector_id"].drop_duplicates().to_numpy()
    framelist = []

    i = 0
    for detector in detectors:
        i += 1

        dataset = df[df["detector_id"] == detector].tail(n=26 * days_in_past)

        Y = dataset["n_vehicles_in_interval"].to_numpy().reshape(-1, 1)
        Y = Y.astype(float)
        X = (
            (dataset["measurement_end_utc"] - dataset["measurement_end_utc"].min())
            .astype("timedelta64[h]")
            .to_numpy()
            .reshape(-1, 1)
        )

        scaler = MinMaxScaler(feature_range=(-1, 1))
        y = scaler.fit_transform(Y)

        if kern is None:

            kern_pD = gpflow.kernels.Periodic(gpflow.kernels.SquaredExponential())
            kern_pW = gpflow.kernels.Periodic(gpflow.kernels.SquaredExponential())
            kern_SE = gpflow.kernels.SquaredExponential()
            kern_W = gpflow.kernels.White()
            kern_M = gpflow.kernels.Matern32()

            kern_pD.period.assign(24.0)
            # kern_pD.base_kernel.variance.assign(10)
            kern_pW.period.assign(168.0)
            # kern_pW.base_kernel.variance.assign(10)

            k = kern_pD + kern_pW + kern_M
        else:
            k = kern

        m = gpflow.models.GPR(data=(X, y), kernel=k, mean_function=None)
        opt = gpflow.optimizers.Scipy()

        try:
            opt.minimize(
                m.training_loss,
                m.trainable_variables,
                options=dict(maxiter=500),
            )
        except:
            print(detector, " Covariance matrix not invertible, skipping to next detector")
            del m
            continue

        print("please wait: ", i, "/", len(detectors), end="\r")

        time_shift=24 - dataset["measurement_end_utc"].max().hour

        ## generate test points for prediction
        xx = np.linspace(
            X.max() + time_shift, X.max() + time_shift + (days_in_future * 24) + 1, (days_in_future * 24)
        ).reshape(
            (days_in_future * 24), 1
        )  # test points must be of shape (N, D)

        ## predict mean and variance of latent GP at test points
        mean, var = m.predict_f(xx)

        # reverse min_max scaler
        testPredict = scaler.inverse_transform(mean)
        testVar = scaler.inverse_transform(var)

        # find the time period for our testPredictions
        start_date = dataset["measurement_end_utc"].max() + np.timedelta64(time_shift, "h")
        end_date = start_date + np.timedelta64(24 * (days_in_future) -1, "h")

        T = pd.date_range(start_date, end_date, freq="H")

        # organise data into dataframe similar to the SCOOT outputs
        df2 = pd.DataFrame(
            {
                "detector_id": detector,
                "lon": df[df["detector_id"] == detector]["lon"].iloc[0],
                "lat": df[df["detector_id"] == detector]["lat"].iloc[0],
                "measurement_start_utc": T,
                "measurement_end_utc": T + np.timedelta64(1, "h"),
                "n_vehicles_in_interval": testPredict.flatten(),
                "prediction_variance": testVar.flatten(),
                "baseline_upper": testPredict.flatten() + 3 * np.sqrt(testVar.flatten()),
                "baseline_lower": testPredict.flatten() - 3 * np.sqrt(testVar.flatten()),
            }
        )

        del m

        framelist.append(df2)

    return pd.concat(framelist)
Esempio n. 41
0
def G_synthesis(
    dlatents_in,  # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
    dlatent_size=512,  # Disentangled latent (W) dimensionality.
    num_channels=3,  # Number of output color channels.
    resolution=1024,  # Output resolution.
    fmap_base=8192,  # Overall multiplier for the number of feature maps.
    fmap_decay=1.0,  # log2 feature map reduction when doubling the resolution.
    fmap_max=512,  # Maximum number of feature maps in any layer.
    use_styles=True,  # Enable style inputs?
    const_input_layer=True,  # First layer is a learned constant?
    use_noise=True,  # Enable noise inputs?
    randomize_noise=True,  # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
    nonlinearity='lrelu',  # Activation function: 'relu', 'lrelu'
    use_wscale=True,  # Enable equalized learning rate?
    use_pixel_norm=False,  # Enable pixelwise feature vector normalization?
    use_instance_norm=True,  # Enable instance normalization?
    dtype='float32',  # Data type to use for activations and outputs.
    fused_scale='auto',  # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
    blur_filter=[
        1, 2, 1
    ],  # Low-pass filter to apply when resampling activations. None = no filtering.
    # structure           = 'auto',       # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
    structure='fixed',  # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
    is_template_graph=False,  # True = template graph constructed by the Network class, False = actual evaluation.
    force_clean_graph=False,  # True = construct a clean graph that looks nice in TensorBoard, False = default behavior.
    **_kwargs):  # Ignore unrecognized keyword args.

    resolution_log2 = int(np.log2(resolution))
    assert resolution == 2**resolution_log2 and resolution >= 4

    def nf(stage):
        return min(int(fmap_base / (2.0**(stage * fmap_decay))), fmap_max)

    def blur(x):
        return blur2d(x, blur_filter) if blur_filter else x

    if is_template_graph: force_clean_graph = True
    if force_clean_graph: randomize_noise = False
    if structure == 'auto':
        structure = 'linear' if force_clean_graph else 'recursive'
    act, gain = {
        'relu': (tf.nn.relu, np.sqrt(2)),
        'lrelu': (leaky_relu, np.sqrt(2))
    }[nonlinearity]
    num_layers = resolution_log2 * 2 - 2
    num_styles = num_layers if use_styles else 1
    images_out = None

    # Primary inputs.
    dlatents_in.set_shape([None, num_styles, dlatent_size])
    dlatents_in = tf.cast(dlatents_in, dtype)
    lod_in = tf.cast(
        tf.get_variable('lod', initializer=np.float32(0), trainable=False),
        dtype)

    # Noise inputs.
    noise_inputs = []
    if use_noise:
        for layer_idx in range(num_layers):
            res = layer_idx // 2 + 2
            shape = [1, use_noise, 2**res, 2**res]
            noise_inputs.append(
                tf.get_variable('noise%d' % layer_idx,
                                shape=shape,
                                initializer=tf.initializers.random_normal(),
                                trainable=False))

    # Things to do at the end of each layer.
    def layer_epilogue(x, layer_idx):
        if use_noise:
            x = apply_noise(x,
                            noise_inputs[layer_idx],
                            randomize_noise=randomize_noise)
        x = apply_bias(x)
        x = act(x)
        if use_pixel_norm:
            x = pixel_norm(x)
        if use_instance_norm:
            x = instance_norm(x)
        if use_styles:
            x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale)
        return x

    # Early layers.
    with tf.variable_scope('4x4'):
        if const_input_layer:
            with tf.variable_scope('Const'):
                x = tf.get_variable('const',
                                    shape=[1, nf(1), 4, 4],
                                    initializer=tf.initializers.ones())
                x = layer_epilogue(
                    tf.tile(tf.cast(x, dtype),
                            [tf.shape(dlatents_in)[0], 1, 1, 1]), 0)
        else:
            with tf.variable_scope('Dense'):
                x = dense(
                    dlatents_in[:, 0],
                    fmaps=nf(1) * 16,
                    gain=gain / 4,
                    use_wscale=use_wscale
                )  # tweak gain to match the official implementation of Progressing GAN
                x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0)
        with tf.variable_scope('Conv'):
            x = layer_epilogue(
                conv2d(x,
                       fmaps=nf(1),
                       kernel=3,
                       gain=gain,
                       use_wscale=use_wscale), 1)

    # Building blocks for remaining layers.
    def block(res, x):  # res = 3..resolution_log2
        with tf.variable_scope('%dx%d' % (2**res, 2**res)):
            with tf.variable_scope('Conv0_up'):
                x = layer_epilogue(
                    blur(
                        upscale2d_conv2d(x,
                                         fmaps=nf(res - 1),
                                         kernel=3,
                                         gain=gain,
                                         use_wscale=use_wscale,
                                         fused_scale=fused_scale)),
                    res * 2 - 4)
            with tf.variable_scope('Conv1'):
                x = layer_epilogue(
                    conv2d(x,
                           fmaps=nf(res - 1),
                           kernel=3,
                           gain=gain,
                           use_wscale=use_wscale), res * 2 - 3)
            return x

    def torgb(res, x):  # res = 2..resolution_log2
        lod = resolution_log2 - res
        with tf.variable_scope('ToRGB_lod%d' % lod):
            return apply_bias(
                conv2d(x,
                       fmaps=num_channels,
                       kernel=1,
                       gain=1,
                       use_wscale=use_wscale))

    # Fixed structure: simple and efficient, but does not support progressive growing.
    if structure == 'fixed':
        for res in range(3, resolution_log2 + 1):
            x = block(res, x)
        images_out = torgb(resolution_log2, x)

    # Linear structure: simple but inefficient.
    if structure == 'linear':
        images_out = torgb(2, x)
        for res in range(3, resolution_log2 + 1):
            lod = resolution_log2 - res
            x = block(res, x)
            img = torgb(res, x)
            images_out = upscale2d(images_out)
            with tf.variable_scope('Grow_lod%d' % lod):
                images_out = tflib.lerp_clip(img, images_out, lod_in - lod)

    # Recursive structure: complex but efficient.
    if structure == 'recursive':

        def cset(cur_lambda, new_cond, new_lambda):
            return lambda: tf.cond(new_cond, new_lambda, cur_lambda)

        def grow(x, res, lod):
            y = block(res, x)
            img = lambda: upscale2d(torgb(res, y), 2**lod)
            img = cset(
                img, (lod_in > lod), lambda: upscale2d(
                    tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)),
                               lod_in - lod), 2**lod))
            if lod > 0:
                img = cset(img, (lod_in < lod),
                           lambda: grow(y, res + 1, lod - 1))
            return img()

        images_out = grow(x, 3, resolution_log2 - 3)

    assert images_out.dtype == tf.as_dtype(dtype)
    return tf.identity(images_out, name='images_out')
Esempio n. 42
0
    def generate_cone(direction_vec, lim_angle, xxx_todo_changeme2, num_pts_dir):
        """
        Generates cone for direction vector on real S_6
        (x^2 + y^2 + z^2 + u^2 + v^2 + w^2 = 1)
        and generates cartesian raster for x and y
        """

        (dx, dy) = xxx_todo_changeme2
        num_pts_lspace = num_pts_dir
        if num_pts_dir % 2 == 1:
            num_pts_lspace -= 1

        lspace = np.hstack(
            (np.linspace(-1, 0, num_pts_lspace//2, endpoint=False),
             np.linspace(1, 0, num_pts_lspace//2, endpoint=False)
             )
             )

        lspace = np.hstack((0, lspace))

        # generate vectors in z direction

        x = dx*lspace
        y = dy*lspace

        kxr = lim_angle*lspace
        kxi = lim_angle*lspace

        kyr = lim_angle*lspace
        kyi = lim_angle*lspace

        kzi = lim_angle*lspace

        (X, Y, KXR, KXI, KYR, KYI, KZI) = np.meshgrid(x, y, kxr, kxi, kyr, kyi, kzi)

        KZR = np.sqrt(1. - KXR**2 - KXI**2 - KYR**2 - KYI**2 - KZI**2)


        complex_ek = np.vstack((KXR.flatten() + 1j*KXI.flatten(),
                               KYR.flatten() + 1j*KYI.flatten(),
                                KZR.flatten() + 1j*KZI.flatten()))

        start_pts = np.vstack((X.flatten(), Y.flatten(), np.zeros_like(X.flatten())))


        # TODO: complex rotate complex_ek into right direction
        # this means: generalize rodrigues to unitary matrices
        # and get 5 angles from dir_vector

        #print(np.linalg.norm(complex_ek, axis=0))
        #print(complex_ek)

        #kz = np.cos(lim_angle)
        #kinpl = np.sin(lim_angle)

        # rotate back into direction_vec direction

        #phi = np.arctan2(direction_vec[1], direction_vec[0])
        #theta = np.arcsin(np.sqrt(direction_vec[1]**2 + direction_vec[0]**2))

        return (start_pts, complex_ek)
Esempio n. 43
0
def normfun(x,mu, sigma):
    pdf = np.exp(-((x - mu)**2) / (2* sigma**2)) / (sigma * np.sqrt(2*np.pi))
    return pdf
Esempio n. 44
0
def polynomial_regression():
    # Check if user is loggedin
    if ('loggedin' in session):

        # Init variables
        (data_to_html, feature, graph_title, 
         msg_suc, msg_err, msg_warn) = (None,) * 6

        # Init list
        (columns, res_list, score_list) = (list(), ) * 3

        # Get session details
        username = session['username']
        lang     = session['lang']

        # Define tag category + model
        cat_tag = 'REG'
        mod_tag = 'PR'

        # Connect to database
        cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)

        # Get categories of navbar
        navbar_cat      = datas_cat_nav(cursor, lang)
        navbar_cat_name = navbar_cat[0]
        navbar_cat_tag  = navbar_cat[1]
        navbar_cat_icon = navbar_cat[2]
        navbar_cat_link = navbar_cat[3]

        # Get models of navbar
        navbar_models = datas_mod_nav(cursor, lang, navbar_cat_tag)

        # Get settings of navbar
        navbar_settings = datas_set_nav(cursor, lang)
        navbar_set_name = navbar_settings[0]
        navbar_set_icon = navbar_settings[1]
        navbar_set_link = navbar_settings[2]

        # Get category details for breadcrumb
        cat_details   = cards_categories(cursor, lang, cat_tag)
        cat_name      = cat_details[0]
        cat_link      = cat_details[3]

        # Get model details for breadcrumb
        model_details = datas_model(cursor, lang, mod_tag)
        model_name    = model_details[0]
        model_link    = model_details[1]

        # Break connection
        cursor.close()

        if (request.method == 'POST'):

            # Upload file
            if (request.form['submit_btn'] == 'Upload Now'
                or request.form['submit_btn'] == 'Envoyer maintenant'):

                # All fields was complete
                if (bool(request.files['file']) == 1 
                    and bool(request.form['sep_select']) == 1
                ):
                    get_upload_datas = upload_file(lang, False)
                    msg_err          = get_upload_datas[0]
                    msg_suc          = get_upload_datas[1]
                    msg_warn         = get_upload_datas[2]

                    global new_tmp_path
                    new_tmp_path = get_upload_datas[3]

                    global colname_list
                    colname_list = get_upload_datas[4]
                    columns      = colname_list

                    data_to_html = get_upload_datas[5]

                    global df
                    df = get_upload_datas[6]

                else:
                    if (lang == 'en'):
                        # Submit without upload file
                        msg_err = (
                            'Please upload your data and select a separator.'
                        )
                    
                    else:
                        msg_err = (
                            'Veuillez télécharger vos données et '
                            'choisir un séparateur.'
                        )

            # Model compute
            if (request.form['submit_btn'] == 'Launch the model'
                or request.form['submit_btn'] == 'Lancer le modèle'):
                feature = request.form['feature']

                # Get colname list
                columns = colname_list

                # Show uploading files
                data_to_html = df_html_show(df)

                # Delete feature from columns
                columns.remove(feature)

                for i in columns:
                    x_feat = df[feature].values.reshape(-1, 1)
                    y_targ = df[i].values.reshape(-1, 1)

                    # Train Test
                    X_train, X_test, y_train, y_test = train_test_split(
                        x_feat,
                        y_targ,
                        test_size=0.33,
                        random_state=42
                    )

                    score_rmse = list()
                    min_rmse, min_deg = (math.inf,) * 2

                    for deg in range(1, 11):

                        # Train features
                        poly_features = PolynomialFeatures(degree=deg, include_bias=False)
                        x_poly_train  = poly_features.fit_transform(X_train)

                        # Linear regression
                        poly_reg = LinearRegression().fit(x_poly_train, y_train)

                        # Compare with test data
                        x_poly_test  = poly_features.fit_transform(X_test)
                        poly_predict = poly_reg.predict(x_poly_test)

                        poly_rmse = np.sqrt(mean_squared_error(y_test, poly_predict))

                        score_rmse.append(poly_rmse)

                        # Cross-validation of degree
                        if (min_rmse > poly_rmse):
                            min_rmse = poly_rmse
                            min_deg  = deg

                    # Create Polynomial model
                    polynomial = PolynomialFeatures(degree=min_deg)

                    # Fit polynomial model
                    X_train = polynomial.fit_transform(X_train)
                    X_test  = polynomial.fit_transform(X_test)

                    # Create linear model and fit
                    regressor = linear_model.LinearRegression().fit(X_train, y_train)

                    # Predicting test set results
                    y_test_pred = regressor.predict(X_test)

                    # Prediction
                    y_pred = regressor.predict(X_train)
                    y_pred = y_pred.tolist()

                    # Accuracy
                    r2_test  = r2_score(y_test , y_test_pred) * 100
                    r2_train = r2_score(y_train, y_pred) * 100

                    res = [i, round(statistics.mean([r2_test, r2_train]), 2)]
                    res_list.append(res)

                # Save scoring
                score_list = [score[1] for score in res_list]

                if (lang == 'en'):
                    # Add graph title
                    graph_title = (
                        'Comparison of the correlation between ' + feature + 
                        ' and the columns :'
                    )

                    # Success
                    msg_suc = (
                        'The model was successfully calculated. '
                        'Your data was automatically deleted.'
                    )

                else:
                    graph_title = (
                        'Comparaison de la corrélation entre ' + feature + 
                        ' et les colonnes :'
                    )

                    msg_suc = (
                        'Le modèle a été calculé avec succès.  '
                        'Vos données ont été automatiquement supprimées.'
                    )

                # Delete file
                file_remove(new_tmp_path)

        return render_template(
            'regression/pol_reg.html',
            title        = model_name,
            username     = username,
            lang         = lang,
            nav_cat_name = navbar_cat_name,
            nav_cat_tag  = navbar_cat_tag,
            nav_cat_icon = navbar_cat_icon,
            nav_cat_lnk  = navbar_cat_link,
            nav_models   = navbar_models,
            nav_set_name = navbar_set_name,
            nav_set_icon = navbar_set_icon,
            nav_set_lnk  = navbar_set_link,
            cat_name     = cat_name,
            cat_tag      = cat_tag,
            cat_link     = cat_link,
            model_name   = model_name,
            model_link   = model_link,
            msg_err      = msg_err,
            msg_suc      = msg_suc,
            msg_warn     = msg_warn,
            data_show    = data_to_html,
            df_columns   = columns,
            feature      = feature,
            score_list   = score_list,
            graph_title  = graph_title
        )

    else:
        return redirect('404')
Esempio n. 45
0
def sgd_recur(X,
              y,
              grad,
              batch_size,
              n_epoch,
              L,
              init_step=2.0,
              R=1.0,
              reg_coeff=0.001,
              verbose=False,
              loss_func=None,
              test_func=None):
    N, dim = X.shape

    batch_idx = get_batch_index(N, batch_size)
    m = len(batch_idx) - 1
    mu = reg_coeff

    niter = n_epoch * m + 1
    it = 0

    # initialization
    w = np.zeros((niter, dim))
    sens = np.zeros((niter, m))
    step_size = init_step

    for t in range(n_epoch):
        if m > 1:
            step_size = init_step / np.sqrt(t + 1)

        # recurrence coefficient
        contr_coeff = max(np.abs(1. - step_size * mu),
                          np.abs(1. - step_size * L))
        b = (2.0 * R * step_size) / batch_size

        for j in range(m):
            mini_X = X[batch_idx[j]:batch_idx[j + 1], :]
            mini_y = y[batch_idx[j]:batch_idx[j + 1]]

            # gradient desecent update
            gt = grad(w[it], mini_X, mini_y) / batch_size
            gt += reg_coeff * w[it]

            w[it + 1, :] = w[it] - step_size * gt

            for k in range(m):
                sens[it + 1, k] = contr_coeff * sens[it, k]

                if k == j:
                    sens[it + 1, k] += b

            it += 1

        if verbose:
            objval = loss_func(w[it], X, y)
            acc = test_func(w[it], X, y) * 100
            print("[{0}] loss={1:.5f} acc={2:7.3f}".format(t + 1, objval, acc))

    # avg_sens = sens[-1, :]
    # last_it = w[-1, :]

    # return last_it, avg_sens
    return w[1:, ], sens[1:, :]
Esempio n. 46
0
def acf_to_acorr(acf):
    diag = np.diag(acf[0])
    # numpy broadcasting sufficient
    return acf / np.sqrt(np.outer(diag, diag))
    #get VCSS data
    #freq_arr, flux_arr, eflux_arr, labels = get_VCSS_data(sname, wise_row,
    #                         sp_row, labels, freq_arr, flux_arr, eflux_arr)
    freq_arr, flux_arr, eflux_arr, labels = sedutils.VCSS_data_cleanup(
        sname, wise_row, sp_row, labels, freq_arr, flux_arr, eflux_arr)

    flux_arr, eflux_arr, labels = sedutils.mod_data_flux_density_scale(
        flux_arr, eflux_arr, labels)

    if 'VLASS' in labels and sp_row['VLASS_Limit'] != 'U':
        ind = labels.index('VLASS')
        vflux = flux_arr[ind]
        eflux = eflux_arr[ind]
        if eflux < 0.2 * vflux:
            eflux_arr[ind] = np.sqrt(eflux**2 + (vflux / 5)**2)

    if useInBand:
        if 'BX' in labels and np.any(jvla_BX['snr'] > 50):
            freq_arr, flux_arr, eflux_arr = rmfit.prep_fit_arr(
                freq_arr, flux_arr, eflux_arr, alpha_BX)
        if 'AX' in labels and np.any(jvla_AX['snr'] > 50):
            freq_arr, flux_arr, eflux_arr = rmfit.prep_fit_arr(
                freq_arr, flux_arr, eflux_arr, alpha_AX)

    #create dict for online table:
    mydict = {}
    for snu, esnu, cat in zip(flux_arr, eflux_arr, labels):
        mydict['F' + cat] = np.round(snu, 2)
        mydict['E' + cat] = np.round(esnu, 2)
Esempio n. 48
0
def momentum(X,
             y,
             grad,
             batch_size,
             beta,
             L,
             sigma,
             alpha,
             n_epoch,
             reg_coeff=0.001,
             verbose=False):
    N, dim = X.shape

    n_batch = int(N / batch_size)
    rem = N % batch_size
    extra = rem / n_batch
    batch_size += extra
    rem = N % batch_size

    m = n_batch
    mu = reg_coeff
    n_alpha = len(alpha)
    sig_sq = 2.0 * (sigma**2)

    batches = np.arange(N)
    np.random.shuffle(batches)

    # initialization
    w = np.zeros(dim)
    v = np.zeros_like(w)
    sens = np.zeros((m, n_alpha))
    sens_p = np.zeros_like(sens)

    for t in range(n_epoch):
        step_size = 2. / (t + 1)

        for j in range(m):
            batch_start = batch_start_idx(j, batch_size, rem)
            batch_finish = batch_start_idx(j + 1, batch_size, rem)
            rand_idx = batches[batch_start:batch_finish]
            mini_X = X[rand_idx, :]
            mini_y = y[rand_idx]

            # gradient desecent update
            gt = grad(w, mini_X, mini_y)
            gt /= batch_size
            gt += mu * w

            v[:] = beta * v + step_size * gt
            w -= v

            if verbose:
                loss = logistic_loss(w, X, y) / N + 0.5 * reg_coeff * np.dot(
                    w, w)
                print("[{0}] loss={1}".format(t + 1, loss))

        # recurrence coefficient
        contr_coeff = max(np.absolute(1. - np.sqrt(step_size * mu)),
                          np.absolute(1. - np.sqrt(step_size * L)))
        expan = 2.0 * step_size / batch_size

        if t == 0:
            for j in range(1, m):
                sens[0, :] = (expan**2) * (contr_coeff**(2 * (m - j - 1)))
        else:
            for j in range(m):
                sens[j, :] = (contr_coeff**(2 * m) *
                              (sens[j, :] + sens_p[j, :]) + (2.0 * expan) *
                              (contr_coeff**(2 * (m - 1) - j)) *
                              np.sqrt(sens[j, :] + sens_p[j, :]) + (expan**2) *
                              (contr_coeff**(2 * (m - j - 1))))

    expo = alpha * (alpha - 1) * sens / sig_sq
    log_eta = logsumexp(expo, axis=0) - np.log(m)

    return w, log_eta
Esempio n. 49
0
 def vega(self):
     self.BSM()
     self.vega = np.exp(-self.r*self.T)*self.S0*np.sqrt(self.T)*norm.pdf(self.d1)
     return self.vega[0]
Esempio n. 50
0
def QuatSqrt(Q) :
    return (one+Q)/np.sqrt(2+2*Q[0])
Esempio n. 51
0
def D_basic(
    images_in,  # First input: Images [minibatch, channel, height, width].
    labels_in,  # Second input: Labels [minibatch, label_size].
    num_channels=1,  # Number of input color channels. Overridden based on dataset.
    resolution=32,  # Input resolution. Overridden based on dataset.
    label_size=0,  # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
    fmap_base=8192,  # Overall multiplier for the number of feature maps.
    fmap_decay=1.0,  # log2 feature map reduction when doubling the resolution.
    fmap_max=512,  # Maximum number of feature maps in any layer.
    nonlinearity='lrelu',  # Activation function: 'relu', 'lrelu',
    use_wscale=True,  # Enable equalized learning rate?
    mbstd_group_size=4,  # Group size for the minibatch standard deviation layer, 0 = disable.
    mbstd_num_features=1,  # Number of features for the minibatch standard deviation layer.
    dtype='float32',  # Data type to use for activations and outputs.
    fused_scale='auto',  # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
    blur_filter=[
        1, 2, 1
    ],  # Low-pass filter to apply when resampling activations. None = no filtering.
    structure='auto',  # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
    is_template_graph=False,  # True = template graph constructed by the Network class, False = actual evaluation.
    **_kwargs):  # Ignore unrecognized keyword args.

    resolution_log2 = int(np.log2(resolution))
    assert resolution == 2**resolution_log2 and resolution >= 4

    def nf(stage):
        return min(int(fmap_base / (2.0**(stage * fmap_decay))), fmap_max)

    def blur(x):
        return blur2d(x, blur_filter) if blur_filter else x

    if structure == 'auto':
        structure = 'linear' if is_template_graph else 'recursive'
    act, gain = {
        'relu': (tf.nn.relu, np.sqrt(2)),
        'lrelu': (leaky_relu, np.sqrt(2))
    }[nonlinearity]

    images_in.set_shape([None, num_channels, resolution, resolution])
    labels_in.set_shape([None, label_size])
    images_in = tf.cast(images_in, dtype)
    labels_in = tf.cast(labels_in, dtype)
    lod_in = tf.cast(
        tf.get_variable('lod', initializer=np.float32(0.0), trainable=False),
        dtype)
    scores_out = None

    # Building blocks.
    def fromrgb(x, res):  # res = 2..resolution_log2
        with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
            return act(
                apply_bias(
                    conv2d(x,
                           fmaps=nf(res - 1),
                           kernel=1,
                           gain=gain,
                           use_wscale=use_wscale)))

    def block(x, res):  # res = 2..resolution_log2
        with tf.variable_scope('%dx%d' % (2**res, 2**res)):
            if res >= 3:  # 8x8 and up
                with tf.variable_scope('Conv0'):
                    x = act(
                        apply_bias(
                            conv2d(x,
                                   fmaps=nf(res - 1),
                                   kernel=3,
                                   gain=gain,
                                   use_wscale=use_wscale)))
                with tf.variable_scope('Conv1_down'):
                    x = act(
                        apply_bias(
                            conv2d_downscale2d(blur(x),
                                               fmaps=nf(res - 2),
                                               kernel=3,
                                               gain=gain,
                                               use_wscale=use_wscale,
                                               fused_scale=fused_scale)))
            else:  # 4x4
                if mbstd_group_size > 1:
                    x = minibatch_stddev_layer(x, mbstd_group_size,
                                               mbstd_num_features)
                with tf.variable_scope('Conv'):
                    x = act(
                        apply_bias(
                            conv2d(x,
                                   fmaps=nf(res - 1),
                                   kernel=3,
                                   gain=gain,
                                   use_wscale=use_wscale)))
                with tf.variable_scope('Dense0'):
                    x = act(
                        apply_bias(
                            dense(x,
                                  fmaps=nf(res - 2),
                                  gain=gain,
                                  use_wscale=use_wscale)))
                with tf.variable_scope('Dense1'):
                    x = apply_bias(
                        dense(x,
                              fmaps=max(label_size, 1),
                              gain=1,
                              use_wscale=use_wscale))
            return x

    # Fixed structure: simple and efficient, but does not support progressive growing.
    if structure == 'fixed':
        x = fromrgb(images_in, resolution_log2)
        for res in range(resolution_log2, 2, -1):
            x = block(x, res)
        scores_out = block(x, 2)

    # Linear structure: simple but inefficient.
    if structure == 'linear':
        img = images_in
        x = fromrgb(img, resolution_log2)
        for res in range(resolution_log2, 2, -1):
            lod = resolution_log2 - res
            x = block(x, res)
            img = downscale2d(img)
            y = fromrgb(img, res - 1)
            with tf.variable_scope('Grow_lod%d' % lod):
                x = tflib.lerp_clip(x, y, lod_in - lod)
        scores_out = block(x, 2)

    # Recursive structure: complex but efficient.
    if structure == 'recursive':

        def cset(cur_lambda, new_cond, new_lambda):
            return lambda: tf.cond(new_cond, new_lambda, cur_lambda)

        def grow(res, lod):
            x = lambda: fromrgb(downscale2d(images_in, 2**lod), res)
            if lod > 0:
                x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
            x = block(x(), res)
            y = lambda: x
            if res > 2:
                y = cset(
                    y, (lod_in > lod), lambda: tflib.lerp(
                        x,
                        fromrgb(downscale2d(images_in, 2**(lod + 1)), res - 1),
                        lod_in - lod))
            return y()

        scores_out = grow(2, resolution_log2 - 2)

    # Label conditioning from "Which Training Methods for GANs do actually Converge?"
    if label_size:
        with tf.variable_scope('LabelSwitch'):
            scores_out = tf.reduce_sum(scores_out * labels_in,
                                       axis=1,
                                       keepdims=True)

    assert scores_out.dtype == tf.as_dtype(dtype)
    scores_out = tf.identity(scores_out, name='scores_out')
    return scores_out
Esempio n. 52
0
def parallel_modelbuilding(sess,
                           tf_cluster,
                           masks,
                           datasets,
                           gtabs,
                           n_parts=16):

    ten_model = dti.TensorModel(gtabs[0])
    design_matrix = ten_model.design_matrix

    nonzero_indices_list = [np.nonzero(masks[i]) for i in range(len(masks))]

    max_length = np.max([
        len(nonzero_indices_list[i][0])
        for i in range(len(nonzero_indices_list))
    ])

    stride = int(np.ceil(max_length / float(n_parts)))

    dim_sh = [stride, datasets[0].shape[-1]]

    waves = tf_cluster.partition_work_waves(int(np.ceil(max_length / stride)),
                                            use_host=False)

    dim_inputs = []
    cnt_inputs = []
    work = []
    dm_input = tf.placeholder(tf.float64, shape=design_matrix.shape, name="dm")
    for i_worker in range(len(waves[0])):
        with tf.device(waves[0][i_worker]):
            dim_inputs.append(
                tf.placeholder(tf.float64,
                               shape=dim_sh,
                               name="dim_%d" % i_worker))

            cnt_inputs.append(
                tf.placeholder(tf.int32, shape=1,
                               name="counter_%d" % i_worker))

            work.append(
                mb.model_building(dim_inputs[-1], dm_input, cnt_inputs[-1]))

    fas = []
    for i_data in range(len(datasets)):
        ten_model = dti.TensorModel(gtabs[i_data])
        design_matrix = ten_model.design_matrix
        nonzero_indices = nonzero_indices_list[i_data]

        dti_params = np.zeros(datasets[i_data].shape[:-1] + (12, ))
        cnt = 1
        thread_mask = np.zeros(masks[i_data].shape, dtype=np.int)
        data = datasets[i_data]
        waves = tf_cluster.partition_work_waves(int(
            np.ceil(len(nonzero_indices[0]) / stride)),
                                                use_host=False)

        for i_wave in range(len(waves)):
            counter = []
            data_in_mask_list = []
            for i_worker in range(len(waves[0])):
                step = (cnt - 1) * stride
                thread_mask[nonzero_indices[0][step:step + stride],
                            nonzero_indices[1][step:step + stride],
                            nonzero_indices[2][step:step + stride]] = cnt

                data_in_mask = \
                    np.reshape(data[nonzero_indices[0][step: step + stride],
                                    nonzero_indices[1][step: step + stride],
                                    nonzero_indices[2][step: step + stride]],
                               (-1, data.shape[-1]))
                data_in_mask = np.maximum(data_in_mask, 0.0001)

                data_in_mask_list.append(data_in_mask)
                counter.append(np.array([cnt]))
                cnt += 1

            feed_dict = {
                i: d
                for i, d in zip(dim_inputs[:len(waves[i_wave])],
                                data_in_mask_list)
            }

            feed_dict.update(
                {a: d
                 for a, d in zip([dm_input], [design_matrix])})

            feed_dict.update({
                a: d
                for a, d in zip(cnt_inputs[:len(waves[i_wave])], counter)
            })

            results = []
            results += sess.run(work[:len(waves[i_wave])], feed_dict=feed_dict)

            for result in results:
                dti_params[thread_mask == result[1][0]] = \
                    result[0].reshape(result[0].shape[0], 12)

        evals = dti_params[..., :3]

        evals = mb._roll_evals(evals, -1)

        all_zero = (evals == 0).all(axis=0)
        ev1, ev2, ev3 = evals
        fa = np.sqrt(0.5 * ((ev1 - ev2)**2 + (ev2 - ev3)**2 + (ev3 - ev1)**2) /
                     ((evals * evals).sum(0) + all_zero))
        fas.append(fa)

    return fas
Esempio n. 53
0
def sgd_adv(D, sideBilinear, U0, UBias0, ULatentScaler0, WBilinear0,
            eta_Latent, eta_LatentScaler, eta_Bilinear, eta_RowBias, epochs,
            plotAdv, plotAdvHR, validationSet, alpha, epsilon):

    pairs = len(D[1, :])
    testOnes = []

    for index in range(0, len(validationSet[0, :])):
        if validationSet[2, index] == 1.0:
            testOnes.append(index)

    #load model parameter, passed from the initial training SGD function
    U = np.copy(U0)
    UBias = np.copy(UBias0)
    ULatentScaler = np.copy(ULatentScaler0)
    WBilinear = np.copy(WBilinear0)

    #load starting learning rates
    etaLatent = eta_Latent  # / ((1 + etaLatent0 * lambdaLatent) * e)
    etaRowBias = eta_RowBias  # / ((1 + etaRowBias0 * lambdaRowBias) * e)
    etaLatentScaler = eta_LatentScaler  # / ((1 + etaLatentScaler0 * lambdaLatentScaler) * e)
    etaBilinear = eta_Bilinear  # / ((1 + etaBilinear0 * lambdaBilinear) * e)

    limit = int(1 * pairs)

    #initalize perturbations
    DeltaI = np.zeros(len(U[:, 0]))
    DeltaJ = np.zeros(len(U[:, 0]))
    DeltaXI = np.zeros(len(sideBilinear[:, 0]))
    DeltaXJ = np.zeros(len(sideBilinear[:, 0]))

    # np.sqrt(np.power(epsilon, 2)/len(U[:, 0]))

    # here we set the bound (if starting perturbation is random)for the random noise that could be 0 if we want to add Delta=0 or the commented value if we want ||Delta||<epsilon

    aucCounter = 0

    # Main SGD body
    for e in range(1, epochs):
        for t in range(0, limit):
            i = int(D[0, t]) - 1
            j = int(D[1, t]) - 1
            truth = int(D[2, t])

            # Procedure for the adversarial perturbation buidling
            predictionDelta = (U[:, i].T + DeltaI) @ ULatentScaler @ (
                U[:, j].T + DeltaJ).T + UBias[i] + UBias[j] + (
                    sideBilinear[:, i].T + DeltaXI) @ WBilinear @ (
                        sideBilinear[:, j].T +
                        DeltaXJ).T  # WPair @ sidePair[:, i, j]  # + WBias

            sigmaDelta = 1 / (1 + np.exp(-predictionDelta)
                              )  # this must be a matrix of link probabilities.

            GammaI = alpha * (sigmaDelta - truth) * (
                ULatentScaler @ (U[:, j].T + DeltaJ).T).T
            GammaJ = alpha * (sigmaDelta - truth) * (
                (U[:, i].T + DeltaI) @ ULatentScaler)
            GammaXI = alpha * (sigmaDelta - truth) * (
                WBilinear @ (sideBilinear[:, j].T + DeltaXJ).T).T
            GammaXJ = alpha * (sigmaDelta - truth) * (
                (sideBilinear[:, i].T + DeltaXI) @ WBilinear)

            DeltaAdvI = epsilon * GammaI / np.sqrt(
                max(np.sum(np.power(GammaI, 2)), 0.000001))
            DeltaAdvJ = epsilon * GammaJ / np.sqrt(
                max(np.sum(np.power(GammaJ, 2)), 0.000001))
            DeltaAdvXI = epsilon * GammaXI / np.sqrt(
                max(np.sum(np.power(GammaXI, 2)), 0.000001))
            DeltaAdvXJ = epsilon * GammaXJ / np.sqrt(
                max(np.sum(np.power(GammaXJ, 2)), 0.000001))

            predictionAdv = (U[:, i].T + DeltaAdvI) @ ULatentScaler @ (U[:, j].T + DeltaAdvJ).T + UBias[i] + UBias[j] \
                            + (sideBilinear[:, i].T + DeltaAdvXI) @ WBilinear @ (sideBilinear[:, j].T + DeltaAdvXJ).T #+ WPair @ sidePair[:, i, j]
            prediction = (
                U[:, i]).T @ ULatentScaler @ U[:, j] + UBias[i] + UBias[
                    j] + sideBilinear[:, i].T @ WBilinear @ sideBilinear[:, j]

            sigmaAdv = 1 / (1 + np.exp(-predictionAdv))
            sigma = 1 / (1 + np.exp(-prediction))

            #cost = -(truth * np.log(sigma) + (1 - truth) * (np.log(1-sigma))) - alpha * ((truth * np.log(sigmaAdv)) + (1 - truth) * (np.log(1-sigmaAdv)))

            gradscalerAdv = float(sigmaAdv - truth)
            gradscaler = float(sigma - truth)

            #gradients computation
            gradIAdv = ULatentScaler @ (
                U[:, j].T + DeltaAdvJ
            ).T  # + np.dot(ULatentScalerAdv, np.transpose(DeltaAdvJ))
            gradI = ULatentScaler @ U[:, j]
            gradJAdv = ((U[:, i].T + DeltaAdvI) @ ULatentScaler).T
            gradJ = (U[:, i].T @ ULatentScaler).T
            gradBilinear = sideBilinear[:, i] @ sideBilinear[:, j].T
            gradBilinearAdv = sideBilinear[:,
                                           i] @ sideBilinear[:,
                                                             j].T + sideBilinear[:,
                                                                                 j] @ DeltaAdvXI + sideBilinear[:,
                                                                                                                i] @ DeltaAdvXJ + DeltaAdvXI @ DeltaAdvXJ.T
            gradBias = 1
            gradLatentScaler = U[:, i] @ U[:, j].T
            gradLatentScalerAdv = U[:,
                                    i] @ U[:,
                                           j].T + U[:,
                                                    j] @ DeltaAdvI + U[:,
                                                                       i] @ DeltaAdvJ + DeltaAdvI @ DeltaAdvJ.T

            #updates
            U[:, i] = U[:, i] - etaLatent * (
                gradscaler * gradI + alpha * gradscalerAdv * gradIAdv
            )  # U_i è di dimensione 2x1
            U[:, j] = U[:, j] - etaLatent * (gradscaler * gradJ +
                                             alpha * gradscalerAdv * gradJAdv)
            UBias[i] = UBias[i] - etaRowBias * (gradscaler * gradBias)
            UBias[j] = UBias[j] - etaRowBias * (gradscaler * gradBias)
            ULatentScaler = ULatentScaler - etaLatentScaler * (
                gradscaler * gradLatentScaler +
                alpha * gradscalerAdv * gradLatentScalerAdv)
            WBilinear = WBilinear - etaBilinear * (
                gradscaler * gradBilinear +
                alpha * gradscalerAdv * gradBilinearAdv)

        if e % aucStep == 0:
            prediction = predict(U, UBias, ULatentScaler, WBilinear,
                                 sideBilinear)
            acc, rec, prec, auc, f1 = test(prediction, validationSet)
            hr = hitratio(U, ULatentScaler, UBias, WBilinear, validationSet,
                          testOnes)
            plotAdvHR[aucCounter, 0] = hr
            plotAdv[aucCounter, 0] = auc
            aucCounter += 1

    return U, UBias, ULatentScaler, WBilinear, plotAdv, plotAdvHR
Esempio n. 54
0
 def __compute(self):
     self.CAR = np.cumsum(self.AR)
     self.var_CAR = [(i * var) for i, var in enumerate(self.var_AR, 1)]
     self.tstat = self.CAR / np.sqrt(self.var_CAR)
     self.pvalue = 1.0 - t.cdf(abs(self.tstat), self.df)
Esempio n. 55
0
#Berechnung der Energien,nach linearer Skala in MeV
E0=4
E1=E0/796
E=E1*Emax
print('Energien der alpha-Teilchen=', E)

#Berechnung der effektiven Länge
p0=1013
x0=2
x=x0*(p/p0)
print('effektive Länge=',x)

def f(x, a, b):
    return a*x+b
params, cov = curve_fit(f, x1, N1)
errors = np.sqrt(np.diag(cov))
print('a =', params[0], '±', errors[0])
print('b =', params[1], '±', errors[1])
a=ufloat(params[0], errors[0])
b=ufloat(params[1], errors[1])


t=np.linspace(0.6, 8)
#plt.plot (x, y, 'rx', label='Messwerte')
plt.plot(x,N , 'rx', label='Messwerte')
#plt.plot(t,f(t, *params), 'b-' ,label='Ausgleichsgerade')
plt.axhline(y=67691)
plt.text(-0.1, 65707,'N/2')
#plt.axvline(x=7.4)
#plt.yscale('log')
plt.xlabel(r'$ x/cm$')
Esempio n. 56
0
#程序文件Pex3_20.py
import numpy as np
from scipy.integrate import tplquad
f = lambda z, y, x: z * np.sqrt(x**2 + y**2 + 1)
ybd = lambda x: np.sqrt(2 * x - x**2)
print("I=", tplquad(f, 0, 2, lambda x: -ybd(x), ybd, 0, 6))
Esempio n. 57
0
def draw(m_, V_, z):
    raise NotImplementedError
    ns = V_.shape[0]
    m = sp.array([[i for i in (m_)]])
    V = copy.copy(V_)
    R = sp.empty([ns, z])
    libGP.drawk(V.ctypes.data_as(ctpd), cint(ns), R.ctypes.data_as(ctpd),
                cint(z))
    R += sp.hstack([m.T] * z)
    #R=sp.random.multivariate_normal(m.flatten(),V,z)
    return copy.copy(R).T


from scipy import stats

SQRT_1_2PI = 1 / np.sqrt(2 * np.pi)


def EI(m, s, y):
    N = len(m)
    R = sp.empty(N)
    for i in xrange(N):
        S = (y - m[i]) / s[i]
        c = stats.norm.cdf(S)
        p = stats.norm.pdf(S)
        R[i] = (y - m[i]) * c + s[i] * p
    return R


def lEI(m, s, y):
    N = len(m)
Esempio n. 58
0
 def infer_LCB(self, X, D, p):
     m, v = self.infer_diag(X, D)
     return m - p * np.sqrt(v)
Esempio n. 59
0
def G_mapping(
    latents_in,  # First input: Latent vectors (Z) [minibatch, latent_size].
    labels_in,  # Second input: Conditioning labels [minibatch, label_size].
    latent_size=512,  # Latent vector (Z) dimensionality.
    label_size=0,  # Label dimensionality, 0 if no labels.
    dlatent_size=512,  # Disentangled latent (W) dimensionality.
    dlatent_broadcast=None,  # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size].
    mapping_layers=8,  # Number of mapping layers.
    mapping_fmaps=512,  # Number of activations in the mapping layers.
    mapping_lrmul=0.01,  # Learning rate multiplier for the mapping layers.
    mapping_nonlinearity='lrelu',  # Activation function: 'relu', 'lrelu'.
    use_wscale=True,  # Enable equalized learning rate?
    normalize_latents=True,  # Normalize latent vectors (Z) before feeding them to the mapping layers?
    dtype='float32',  # Data type to use for activations and outputs.
    **_kwargs):  # Ignore unrecognized keyword args.

    act, gain = {
        'relu': (tf.nn.relu, np.sqrt(2)),
        'lrelu': (leaky_relu, np.sqrt(2))
    }[mapping_nonlinearity]

    # Inputs.
    latents_in.set_shape([None, latent_size])
    labels_in.set_shape([None, label_size])
    latents_in = tf.cast(latents_in, dtype)
    labels_in = tf.cast(labels_in, dtype)
    x = latents_in

    # Embed labels and concatenate them with latents.
    if label_size:
        with tf.variable_scope('LabelConcat'):
            w = tf.get_variable('weight',
                                shape=[label_size, latent_size],
                                initializer=tf.initializers.random_normal())
            y = tf.matmul(labels_in, tf.cast(w, dtype))
            x = tf.concat([x, y], axis=1)

    # Normalize latents.
    if normalize_latents:
        x = pixel_norm(x)

    # Mapping layers.
    for layer_idx in range(mapping_layers):
        with tf.variable_scope('Dense%d' % layer_idx):
            fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps
            x = dense(x,
                      fmaps=fmaps,
                      gain=gain,
                      use_wscale=use_wscale,
                      lrmul=mapping_lrmul)
            x = apply_bias(x, lrmul=mapping_lrmul)
            x = act(x)

    # Broadcast.
    if dlatent_broadcast is not None:
        with tf.variable_scope('Broadcast'):
            x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1])

    # Output.
    assert x.dtype == tf.as_dtype(dtype)
    return tf.identity(x, name='dlatents_out')
Esempio n. 60
0
    def __init__(self, fwhm, ratio=1.0, theta=0.0, sigma_radius=1.5,
                 normalize_zerosum=True):

        if fwhm < 0:
            raise ValueError('fwhm must be positive.')

        if ratio <= 0 or ratio > 1:
            raise ValueError('ratio must be positive and less or equal '
                             'than 1.')

        if sigma_radius <= 0:
            raise ValueError('sigma_radius must be positive.')

        self.fwhm = fwhm
        self.ratio = ratio
        self.theta = theta
        self.sigma_radius = sigma_radius
        self.xsigma = self.fwhm * gaussian_fwhm_to_sigma
        self.ysigma = self.xsigma * self.ratio

        theta_radians = np.deg2rad(self.theta)
        cost = np.cos(theta_radians)
        sint = np.sin(theta_radians)
        xsigma2 = self.xsigma**2
        ysigma2 = self.ysigma**2

        self.a = (cost**2 / (2.0 * xsigma2)) + (sint**2 / (2.0 * ysigma2))
        # CCW
        self.b = 0.5 * cost * sint * ((1.0 / xsigma2) - (1.0 / ysigma2))
        self.c = (sint**2 / (2.0 * xsigma2)) + (cost**2 / (2.0 * ysigma2))

        # find the extent of an ellipse with radius = sigma_radius*sigma;
        # solve for the horizontal and vertical tangents of an ellipse
        # defined by g(x,y) = f
        self.f = self.sigma_radius**2 / 2.0
        denom = (self.a * self.c) - self.b**2

        # nx and ny are always odd
        self.nx = 2 * int(max(2, math.sqrt(self.c * self.f / denom))) + 1
        self.ny = 2 * int(max(2, math.sqrt(self.a * self.f / denom))) + 1

        self.xc = self.xradius = self.nx // 2
        self.yc = self.yradius = self.ny // 2

        # define the kernel on a 2D grid
        yy, xx = np.mgrid[0:self.ny, 0:self.nx]
        self.circular_radius = np.sqrt((xx - self.xc)**2 + (yy - self.yc)**2)
        self.elliptical_radius = (self.a * (xx - self.xc)**2
                                  + 2.0 * self.b * (xx - self.xc)
                                  * (yy - self.yc)
                                  + self.c * (yy - self.yc)**2)

        self.mask = np.where(
            (self.elliptical_radius <= self.f)
            | (self.circular_radius <= 2.0), 1, 0).astype(int)
        self.npixels = self.mask.sum()

        # NOTE: the central (peak) pixel of gaussian_kernel has a value of 1.
        self.gaussian_kernel_unmasked = np.exp(-self.elliptical_radius)
        self.gaussian_kernel = self.gaussian_kernel_unmasked * self.mask

        # denom = variance * npixels
        denom = ((self.gaussian_kernel**2).sum()
                 - (self.gaussian_kernel.sum()**2 / self.npixels))
        self.relerr = 1.0 / np.sqrt(denom)

        # normalize the kernel to zero sum
        if normalize_zerosum:
            self.data = ((self.gaussian_kernel
                          - (self.gaussian_kernel.sum() / self.npixels))
                         / denom) * self.mask
        else:
            self.data = self.gaussian_kernel

        self.shape = self.data.shape