コード例 #1
0
def scirpus_regobj(preds, dtrain):
    labels = dtrain.get_label()
    x = (preds - labels)
    from numpy import exp as npexp
    grad = 2 * x * npexp(-(x**2)) * (npexp(x**2) + x**2 + 1)
    hess = 2 * npexp(-(x**2)) * (npexp(x**2) - 2 * (x**4) + 5 * (x**2) - 1)
    return grad, hess
def scirpus_regobj(preds, dtrain):
    labels = dtrain.get_label()
    x = (preds - labels)
    from numpy import exp as npexp
    grad = 2 * x * npexp(-(x ** 2)) * (npexp(x ** 2) + x ** 2 + 1)
    hess = 2 * npexp(-(x ** 2)) * (npexp(x ** 2) - 2 * (x ** 4) + 5 * (x ** 2) - 1)
    return grad, hess
コード例 #3
0
ファイル: softmax_cost.py プロジェクト: hkingravi/KMVis
def softmax_grad( theta, nclasses, dim, wdecay, data, labels ):
    # unroll parameters from theta
    theta = reshape(theta,(dim, nclasses)) # Do this
    theta= theta.T
    nsamp = data.shape[1]

    # generate ground truth matrix
    onevals = squeeze(ones((1,nsamp)))
    rows = squeeze(labels)-1 # Here should -1 to align zero-indexing
    cols = arange(nsamp)

    ground_truth = csr_matrix((onevals,(rows,cols))).todense()
#     plt.imshow(ground_truth,interpolation='nearest')
#     plt.draw()
#     print ground_truth

    # compute hypothesis; use some in-place computations
    theta_dot_prod = dot(theta,data)
    theta_dot_prod = theta_dot_prod - numpy.amax(theta_dot_prod, axis=0) # This was wrong
    soft_theta = npexp(theta_dot_prod)
    soft_theta_sum = npsum(soft_theta,axis=0)
    soft_theta_sum = tile(soft_theta_sum,(nclasses,1))
    hyp = soft_theta/soft_theta_sum

    # compute gradient
    thetagrad = (-1.0/nsamp)*dot(ground_truth-hyp,transpose(data)) + wdecay*theta

    thetagrad = asarray(thetagrad)
    thetagrad = thetagrad.flatten(1)
    return thetagrad
コード例 #4
0
def scirpus_error(preds, dtrain):
    labels = dtrain.get_label()
    x = (labels - preds)
    from numpy import exp as npexp
    error = (x**2) * (1 - npexp(-(x**2)))
    from numpy import mean
    return 'error', mean(error)
def scirpus_error(preds, dtrain):
    labels = dtrain.get_label()
    x = (labels - preds)
    from numpy import exp as npexp
    error = (x ** 2) * (1 - npexp(-(x ** 2)))
    from numpy import mean
    return 'error', mean(error)
コード例 #6
0
ファイル: util.py プロジェクト: JessAwBryant/py-metagenomics
def asciiHistogram(histogram, log=False, width=60, label='length', maxLabelWidth=10):
    (values,edges)=histogram[:2]
    
    maxValue=max(values)
    
    centers=[int(float(sum(edges[i:i+2]))/2.) for i in range(len(values))]
    largestLabel = max(max([len(str(c)) for c in centers]),len(label))
    if largestLabel<6:
        largestLabel=6
    elif largestLabel>maxLabelWidth:
        largestLabel=maxLabelWidth
    
    plotWidth=width-largestLabel+1
    
    midPoint = npexp((nplog(maxValue)-nplog(.5))/2) if log else maxValue/2
    output="%s|count%s%s|%s%s|\n" % (rightPad(label,largestLabel),
                                     "".join([" " for i in range(plotWidth/2 - len(str(int(midPoint))) - len("count"))]),
                                     str(int(midPoint)),
                                     "".join([" " for i in range(int(ceil(plotWidth/2.)) - 1 - len(str(int(maxValue))))]),
                                     str(int(maxValue)),
                                     )
    #output+="%s|%s\n" % ("".join(["_" for i in range(largestLabel)]),
    #                     "".join(["_" for i in range(plotWidth)]),
    #                     )
    for i, v in enumerate(values):
        output+="%s|%s\n" % (rightPad(str(centers[i]),largestLabel),getBarString(v, maxValue, plotWidth, log))
    return output
コード例 #7
0
ファイル: softmax_cost.py プロジェクト: hkingravi/KMVis
def softmax_cost( theta, nclasses, dim, wdecay, data, labels ):
    # unroll parameters from theta
    theta = reshape(theta,(dim, nclasses)) # This was wrong
    theta= theta.T
    nsamp = data.shape[1]

    # generate ground truth matrix
    onevals = squeeze(ones((1,nsamp)))
    rows = squeeze(labels)-1 # This was wrong
    cols = arange(nsamp)
    ground_truth = csr_matrix((onevals,(rows,cols))).todense()


    # compute hypothesis; use some in-place computations
    theta_dot_prod = dot(theta,data)
    theta_dot_prod = theta_dot_prod - numpy.amax(theta_dot_prod, axis=0) # This was wrong
    soft_theta = npexp(theta_dot_prod)
    soft_theta_sum = npsum(soft_theta,axis=0)
    soft_theta_sum = tile(soft_theta_sum,(nclasses,1))
    hyp = soft_theta/soft_theta_sum


    # compute cost
    log_hyp = nplog(hyp)
    temp = array(multiply(ground_truth,log_hyp))
    temp = npsum(npsum(temp,axis=1),axis=0)
    cost = (-1.0/nsamp)*temp + 0.5*wdecay*pow(norm(theta,'fro'),2)
    return cost
コード例 #8
0
def make_data(n=None, i=None, p=None):
    from random import randint
    from numpy import log, random as nprandom, arange as nparange, exp as npexp
    n = n or randint(10,100)
    i = i or randint(1,100)
    p = p or randint(1,50) / 100
    xx = nparange(1, n + 1)
    yy_clean = npexp(xx * p) * i
    yy = yy_clean + nprandom.normal(0, log(yy_clean).round() + (yy_clean//10), size=n)
    return (xx,yy,yy_clean, n,i,p)
コード例 #9
0
    def _sigmoid(self, x):
        """
        This method is separate from `forward` because it
        will be used later with `backward` as well.

        `x`: A numpy array-like object.

        Return the result of the sigmoid function.

        Your code here!
        """
        from numpy import exp as npexp
        return 1 / (1 + npexp(-x))
コード例 #10
0
def stetson_jindex(ftimes, fmags, ferrs, weightbytimediff=False):
    '''This calculates the Stetson index for the magseries, based on consecutive
    pairs of observations. Based on Nicole Loncke's work for her Planets and
    Life certificate at Princeton.

    This requires finite times, mags, and errs.

    If weightbytimediff is True, the Stetson index for any pair of mags will be
    reweighted by the difference in times between them using the scheme in
    Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017).

    w_i = exp(- (t_i+1 - t_i)/ delta_t )

    '''

    ndet = len(fmags)

    if ndet > 9:

        # get the median and ndet
        medmag = npmedian(fmags)

        # get the stetson index elements
        delta_prefactor = (ndet / (ndet - 1))
        sigma_i = delta_prefactor * (fmags - medmag) / ferrs
        sigma_j = nproll(sigma_i,
                         1)  # Nicole's clever trick to advance indices
        # by 1 and do x_i*x_(i+1)

        if weightbytimediff:

            time_i = ftimes
            time_j = nproll(ftimes, 1)
            difft = npdiff(ftimes)
            deltat = npmedian(difft)

            weights_i = npexp(-difft / deltat)
            products = (weights_i * sigma_i[1:] * sigma_j[1:])
        else:
            # ignore first elem since it's actually x_0*x_n
            products = (sigma_i * sigma_j)[1:]

        stetsonj = (npsum(npsign(products) * npsqrt(npabs(products)))) / ndet

        return stetsonj

    else:

        LOGERROR('not enough detections in this magseries '
                 'to calculate stetson J index')
        return npnan
コード例 #11
0
ファイル: ml.py プロジェクト: jfgonzalez99/bigDATA
    def kRegression(self, point, k, weight):
        """ Performs a locally weighted kernel regression
        Args
        ---
        `point : np.array` the point to be classified

        `k : int` the number of nearest neighbors

        `weight : float` how strongly the closeness of the neighbor is taken into consideration

        Returns
        ---
        `classification : float` the classification label of the given point
        """
        distances = []
        weights = []
        for i in range(self.N):
            distance = lrDistance(point, self.X[:, i], self.dim)
            distances.append(distance)
            weights.append(npexp(-distance / weight))

        # Find indices of nearest neighbors
        ind_neighbors = argpartition(distances, k)[:k]

        # Sort neighbors by weight
        neigbor_weights = []
        for i in ind_neighbors:
            neigbor_weights.append((i, weights[i]))
        nw = reversed(sorted(neigbor_weights, key=itemgetter(1)))

        # Matrix of nearest points
        P = zeros((self.dim, k))
        # Diagonal matrix of weights
        K = zeros((k, k))
        # Labels vector
        f = zeros(k)

        column = 0
        for i, weight in nw:
            P[:, column] = self.X[:, i]
            K[column][column] = weight
            f[column] = self.y[i]
            column += 1

        w = inverse(P @ K @ P.T) @ (P @ K) @ f
        classification = w.T @ point

        return classification
コード例 #12
0
ファイル: softmax_cost.py プロジェクト: hkingravi/KMVis
def softmax_predict( theta, nclasses, dim,  data ):
    # unroll parameters from theta
    theta = reshape(theta,(dim, nclasses)) # Do this
    theta= theta.T

    # compute hypothesis; use some in-place computations
    theta_dot_prod = dot(theta,data)
    theta_dot_prod = theta_dot_prod - numpy.amax(theta_dot_prod, axis=0) # This was wrong
    soft_theta = npexp(theta_dot_prod)
    soft_theta_sum = npsum(soft_theta,axis=0)
    soft_theta_sum = tile(soft_theta_sum,(nclasses,1))
    hyp = soft_theta/soft_theta_sum
    print "hyp.shape"
    print hyp.shape
    pred=numpy.argmax(hyp, axis=0)

    return numpy.asarray(pred)
コード例 #13
0
ファイル: util.py プロジェクト: jmeppley/py-metagenomics
def ascii_histogram(histogram,
                    log=False,
                    width=60,
                    label='length',
                    maxLabelWidth=10):
    (values, edges) = histogram[:2]

    maxValue = max(values)

    centers = [
        int(float(sum(edges[i:i + 2])) / 2.) for i in range(len(values))
    ]
    largestLabel = max(max([len(str(c)) for c in centers]), len(label))
    if largestLabel < 6:
        largestLabel = 6
    elif largestLabel > maxLabelWidth:
        largestLabel = maxLabelWidth

    plotWidth = width - largestLabel + 1

    midPoint = npexp((nplog(maxValue) - nplog(.5)) / 2) \
        if log else maxValue / 2
    midPointStr = str(int(midPoint))
    padding_a_width = int(plotWidth / 2) - len(midPointStr) - len("count")
    padding_a = "".join([" " for i in range(padding_a_width)])
    maxValueStr = str(int(maxValue))
    padding_b_width = int(ceil(plotWidth / 2.)) - 1 - len(maxValueStr)
    padding_b = "".join([" " for i in range(padding_b_width)])
    output = "%s|count%s%s|%s%s|\n" % (
        rightPad(label, largestLabel),
        padding_a,
        midPointStr,
        padding_b,
        maxValueStr,
    )
    # output+="%s|%s\n" % ("".join(["_" for i in range(largestLabel)]),
    #                     "".join(["_" for i in range(plotWidth)]),
    #                     )
    for i, v in enumerate(values):
        output += "%s|%s\n" % (rightPad(str(centers[i]), largestLabel),
                               getBarString(v, maxValue, plotWidth, log))
    return output
コード例 #14
0
ファイル: SoftMax.py プロジェクト: hkingravi/KMVis
    def cost(self, theta):
        """
        This function computes the cost associated to the softmax classifier.

        :param data: data matrix :math:`D_1\in\mathbb{R}^{d \\times n}`, where
                     :math:`d` is the dimensionality and
                     :math:`n` is the number of training samples
        :type data: numpy array
        """
        # unroll parameters from theta
        theta = reshape(theta, (self.dim, self.nclasses))
        theta = theta.T
        nsamp = self.data.shape[1]

        # generate ground truth matrix
        onevals = squeeze(ones((1, nsamp)))
        rows = squeeze(self.labels) - 1
        cols = arange(nsamp)
        ground_truth = csr_matrix((onevals, (rows, cols))).todense()

        # compute hypothesis; use some in-place computations
        theta_dot_prod = dot(theta, self.data)
        theta_dot_prod = theta_dot_prod - numpy.amax(theta_dot_prod, axis=0)
        soft_theta = npexp(theta_dot_prod)
        soft_theta_sum = npsum(soft_theta, axis=0)
        soft_theta_sum = tile(soft_theta_sum, (self.nclasses, 1))
        hyp = soft_theta / soft_theta_sum

        # compute cost
        log_hyp = nplog(hyp)
        temp = array(multiply(ground_truth, log_hyp))
        temp = npsum(npsum(temp, axis=1), axis=0)
        cost = (-1.0 / nsamp) * temp + 0.5 * self.wdecay * pow(norm(theta, "fro"), 2)
        thetagrad = (-1.0 / nsamp) * dot(ground_truth - hyp, transpose(self.data)) + self.wdecay * theta

        thetagrad = thetagrad.flatten(1)
        return cost, thetagrad
コード例 #15
0
def exp(x):
    return npexp(x)
コード例 #16
0
ファイル: newChef.py プロジェクト: mlandolfi/ratchef
 def sigmoid(self, x):
     return (1 / (1 + npexp(-x)))
コード例 #17
0
ファイル: munchetal.py プロジェクト: decarlof/Pore3D
def munchetal(im, args):
    """Process a sinogram image with the Munch et al. de-striping algorithm.

    Parameters
    ----------
    im : array_like
        Image data as numpy array.

    wlevel : int
        Levels of the wavelet decomposition.

    sigma : float
        Smoothing effect.
       
    Example (using tiffile.py)
    --------------------------
    >>> im = imread('sino_orig.tif')
    >>> im = munchetal(im, 4, 1.0)    
    >>> imsave('sino_flt.tif', im) 

    References
    ----------
    B. Munch, P. Trtik, F. Marone, M. Stampanoni, Stripe and ring artifact removal with
    combined wavelet-Fourier filtering, Optics Express 17(10):8567-8591, 2009.

    """  
    # Disable a warning:
    simplefilter("ignore", ComplexWarning)
    
    # Get args:
    wlevel, sigma = args.split(";")    
    wlevel = int(wlevel)
    sigma  = float(sigma)

    # The wavelet transform to use : {'haar', 'db1'-'db20', 'sym2'-'sym20', 'coif1'-'coif5', 'dmey'}
    wname = "db2"

    # Wavelet decomposition:
    coeffs = wavedec2(im.astype(float32), wname, level=wlevel)
    coeffsFlt = [coeffs[0]] 

    # FFT transform of horizontal frequency bands:
    for i in range(1, wlevel + 1):  

        # FFT:
        fcV = fftshift(fft(coeffs[i][1], axis=0))  
        my, mx = fcV.shape
        
        # Damping of vertical stripes:
        damp = 1 - npexp(-(arange(-floor(my / 2.),-floor(my / 2.) + my) ** 2) / (2 * (sigma ** 2)))      
        dampprime = kron(ones((1,mx)), damp.reshape((damp.shape[0],1)))
        fcV = fcV * dampprime    

        # Inverse FFT:
        fcVflt = ifft(ifftshift(fcV), axis=0)
        cVHDtup = (coeffs[i][0], fcVflt, coeffs[i][2])             
        coeffsFlt.append(cVHDtup)

    # Get wavelet reconstruction:
    im_f = real(waverec2(coeffsFlt, wname))

    # Return image according to input type:
    if (im.dtype == 'uint16'):
        
        # Check extrema for uint16 images:
        im_f[im_f < iinfo(uint16).min] = iinfo(uint16).min
        im_f[im_f > iinfo(uint16).max] = iinfo(uint16).max

        # Return filtered image (an additional row and/or column might be
        # present):
        return im_f[0:im.shape[0],0:im.shape[1]].astype(uint16)
    else:
        return im_f[0:im.shape[0],0:im.shape[1]]
コード例 #18
0
def homomorphic(im, args):
	"""Process the input image with an homomorphic filtering.

	Parameters
	----------
	im : array_like
		Image data as numpy array.	

	d0 : float
		Cutoff in the range [0.01, 0.99] of the high pass Gaussian filter. 
        Higher values means more high pass effect. [Suggested for default: 0.80].

    alpha : float
		Offset to preserve the zero frequency where. Higher values means less 
        high pass effect. [Suggested for default: 0.2]

	(Parameters d0 and alpha have to passed as a string separated by ;)
		   
	Example (using tiffile.py)
	--------------------------
	>>> im = imread('im_orig.tif')
	>>> im = homomorphic(im, '0.5;0.2')    
	>>> imsave('im_flt.tif', im) 
	

	References
	----------
  

	"""    
	# Get args:
	param1, param2 = args.split(";")       	 
	d0 = (1.0 - float(param1))  # Simpler for user
	alpha = float(param2) 
	
	# Internal parameters for Gaussian low-pass filtering:
	d0 = d0 * (im.shape[1] / 2.0)	

	# Take the log:
	im = nplog(1 + im)

	# Compute FFT:
	n_byte_align(im, simd_alignment) 
	im = rfft2(im, threads=2)

	# Prepare the frequency coordinates:
	u = arange(0, im.shape[0], dtype=float32)
	v = arange(0, im.shape[1], dtype=float32)

	# Compute the indices for meshgrid:
	u[(u > im.shape[0] / 2.0)] = u[(u > im.shape[0] / 2.0)] - im.shape[0]    
	v[(v > im.shape[1] / 2.0)] = v[(v > im.shape[1] / 2.0)] - im.shape[1]

	# Compute the meshgrid arrays:
	V, U = meshgrid(v, u)

	# Compute the distances D(U, V):
	D = sqrt(U ** 2 + V ** 2)

	# Prepare Guassian filter:
	H = npexp(-(D ** 2) / (2 * (d0 ** 2)))
	H = (1 - H) + alpha
	
	# Do the filtering:
	im = H * im   

	# Compute inverse FFT of the filtered data:
	n_byte_align(im, simd_alignment)
	#im = real(irfft2(im, threads=2))
	im = irfft2(im, threads=2)

	# Take the exp:
	im = npexp(im) - 1

	# Return image according to input type:
	return im.astype(float32)
コード例 #19
0
def homomorphic(im, args):
    """Process the input image with an homomorphic filtering.

	Parameters
	----------
	im : array_like
		Image data as numpy array.	

	d0 : float
		Cutoff in the range [0.01, 0.99] of the high pass Gaussian filter. 
        Higher values means more high pass effect. [Suggested for default: 0.80].

    alpha : float
		Offset to preserve the zero frequency where. Higher values means less 
        high pass effect. [Suggested for default: 0.2]

	(Parameters d0 and alpha have to passed as a string separated by ;)
		   
	Example (using tiffile.py)
	--------------------------
	>>> im = imread('im_orig.tif')
	>>> im = homomorphic(im, '0.5;0.2')    
	>>> imsave('im_flt.tif', im) 
	

	References
	----------
  

	"""
    # Get args:
    param1, param2 = args.split(";")
    d0 = (1.0 - float(param1))  # Simpler for user
    alpha = float(param2)

    # Internal parameters for Gaussian low-pass filtering:
    d0 = d0 * (im.shape[1] / 2.0)

    # Take the log:
    im = nplog(1 + im)

    # Compute FFT:
    n_byte_align(im, simd_alignment)
    im = rfft2(im, threads=2)

    # Prepare the frequency coordinates:
    u = arange(0, im.shape[0], dtype=float32)
    v = arange(0, im.shape[1], dtype=float32)

    # Compute the indices for meshgrid:
    u[(u > im.shape[0] / 2.0)] = u[(u > im.shape[0] / 2.0)] - im.shape[0]
    v[(v > im.shape[1] / 2.0)] = v[(v > im.shape[1] / 2.0)] - im.shape[1]

    # Compute the meshgrid arrays:
    V, U = meshgrid(v, u)

    # Compute the distances D(U, V):
    D = sqrt(U**2 + V**2)

    # Prepare Guassian filter:
    H = npexp(-(D**2) / (2 * (d0**2)))
    H = (1 - H) + alpha

    # Do the filtering:
    im = H * im

    # Compute inverse FFT of the filtered data:
    n_byte_align(im, simd_alignment)
    #im = real(irfft2(im, threads=2))
    im = irfft2(im, threads=2)

    # Take the exp:
    im = npexp(im) - 1

    # Return image according to input type:
    return im.astype(float32)
コード例 #20
0
ファイル: utils_ctf.py プロジェクト: philliphelms/cyclomps
def exp(m):
    m = to_nparray(m)
    m = npexp(m)
    m = from_nparray(m)
    return m
コード例 #21
0
ファイル: ml.py プロジェクト: jfgonzalez99/bigDATA
def softmax(x):
    """ Softmax function with input vector x """
    exp_x = npexp(x)
    exp_sum = sum(exp_x)
    smax = exp_x / exp_sum
    return smax
コード例 #22
0
ファイル: muenchetal.py プロジェクト: ElettraSciComp/STP-Core
def muenchetal(im, args):
	"""Process a sinogram image with the Munch et al. de-striping algorithm.

	Parameters
	----------
	im : array_like
		Image data as numpy array.

	wlevel : int
		Levels of the wavelet decomposition.

	sigma : float
		Smoothing effect.

	(Parameters wlevel and sigma have to passed as a string separated by ;)
	   
	Example (using tiffile.py)
	--------------------------
	>>> im = imread('sino_orig.tif')
	>>> im = munchetal(im, '4;1.0')    
	>>> imsave('sino_flt.tif', im) 

	References
	----------
	B. Munch, P. Trtik, F. Marone, M. Stampanoni, Stripe and ring artifact removal with
	combined wavelet-Fourier filtering, Optics Express 17(10):8567-8591, 2009.

	"""  
	# Disable a warning:
	simplefilter("ignore", ComplexWarning)

	# Get args:
	wlevel, sigma = args.split(";")    
	wlevel = int(wlevel)
	sigma  = float(sigma)

	# The wavelet transform to use : {'haar', 'db1'-'db20', 'sym2'-'sym20', 'coif1'-'coif5', 'dmey'}
	wname = "db5"

	# Wavelet decomposition:
	coeffs = wavedec2(im.astype(float32), wname, level=wlevel)
	coeffsFlt = [coeffs[0]] 

	# FFT transform of horizontal frequency bands:
	for i in range(1, wlevel + 1):  

		# Padding and windowing of input signal:
		n_byte_align(coeffs[i][1], simd_alignment) 
		siz = coeffs[i][1].shape
		tmp = pad(coeffs[i][1], pad_width=((coeffs[i][1].shape[0] / 2, coeffs[i][1].shape[0] / 2), (0,0)), mode='constant') # or 'constant' for zero padding
		tmp = pad(tmp, pad_width=((0,0) ,(coeffs[i][1].shape[1] / 2, coeffs[i][1].shape[1] / 2)), mode='constant')    # or 'constant' for zero padding
		tmp = _windowing_lr(tmp, siz[1])
		tmp = _windowing_lr(tmp.T, siz[0]).T	

		# FFT:
		fcV = fftshift(fft(tmp, axis=0, threads=2))  
		my, mx = fcV.shape
		
		# Damping of vertical stripes:
		damp = 1 - npexp(-(arange(-floor(my / 2.),-floor(my / 2.) + my) ** 2) / (2 * (sigma ** 2)))      
		dampprime = kron(ones((1,mx)), damp.reshape((damp.shape[0],1)))
		fcV = fcV * dampprime    

		# Inverse FFT:
		fcV = ifftshift(fcV)
		n_byte_align(fcV, simd_alignment)
		fcVflt = ifft(fcV, axis=0, threads=2)

		## Crop image:
		tmp = fcVflt[fcVflt.shape[0] / 4:(fcVflt.shape[0] / 4 + siz[0]), fcVflt.shape[1] / 4:(fcVflt.shape[1] / 4 + siz[1])]

		# Dump back coefficients:
		cVHDtup = (coeffs[i][0], tmp, coeffs[i][2])          
		coeffsFlt.append(cVHDtup)

	# Get wavelet reconstruction:
	im_f = real(waverec2(coeffsFlt, wname))

	# Return filtered image (an additional row and/or column might be present):
	return im_f[0:im.shape[0],0:im.shape[1]].astype(float32)
コード例 #23
0
def invsp(x):
    return nplog(npexp(x) - 1)
コード例 #24
0
ファイル: ml.py プロジェクト: jfgonzalez99/bigDATA
def tansig(x):
    """ Tansig function with input vector x """
    tsig = 2 / (1 + npexp(-2 * x)) - 1
    return tsig
コード例 #25
0
ファイル: varfeatures.py プロジェクト: JinbiaoJi/astrobase
def stetson_jindex(ftimes, fmags, ferrs, weightbytimediff=False):
    '''This calculates the Stetson index for the magseries, based on consecutive
    pairs of observations.

    Based on Nicole Loncke's work for her Planets and Life certificate at
    Princeton in 2014.

    Parameters
    ----------

    ftimes,fmags,ferrs : np.array
        The input mag/flux time-series with all non-finite elements removed.

    weightbytimediff : bool
        If this is True, the Stetson index for any pair of mags will be
        reweighted by the difference in times between them using the scheme in
        Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017)::

            w_i = exp(- (t_i+1 - t_i)/ delta_t )

    Returns
    -------

    float
        The calculated Stetson J variability index.

    '''

    ndet = len(fmags)

    if ndet > 9:

        # get the median and ndet
        medmag = npmedian(fmags)

        # get the stetson index elements
        delta_prefactor = (ndet / (ndet - 1))
        sigma_i = delta_prefactor * (fmags - medmag) / ferrs

        # Nicole's clever trick to advance indices by 1 and do x_i*x_(i+1)
        sigma_j = nproll(sigma_i, 1)

        if weightbytimediff:

            difft = npdiff(ftimes)
            deltat = npmedian(difft)

            weights_i = npexp(-difft / deltat)
            products = (weights_i * sigma_i[1:] * sigma_j[1:])
        else:
            # ignore first elem since it's actually x_0*x_n
            products = (sigma_i * sigma_j)[1:]

        stetsonj = (npsum(npsign(products) * npsqrt(npabs(products)))) / ndet

        return stetsonj

    else:

        LOGERROR('not enough detections in this magseries '
                 'to calculate stetson J index')
        return npnan
コード例 #26
0
ファイル: _clarifier.py プロジェクト: joyxyz1994/QSDsan
def _settling_flux(X, v_max, v_max_practical, X_min, rh, rp, n0):
    X_star = npmax(X-X_min, n0)
    v = npmin(v_max_practical, v_max*(npexp(-rh*X_star) - npexp(-rp*X_star)))
    return X*npmax(v, n0)
コード例 #27
0
ファイル: muenchetal.py プロジェクト: ElettraSciComp/STP-Core
def muenchetal(im, args):
    """Process a sinogram image with the Munch et al. de-striping algorithm.

	Parameters
	----------
	im : array_like
		Image data as numpy array.

	wlevel : int
		Levels of the wavelet decomposition.

	sigma : float
		Smoothing effect.

	(Parameters wlevel and sigma have to passed as a string separated by ;)
	   
	Example (using tiffile.py)
	--------------------------
	>>> im = imread('sino_orig.tif')
	>>> im = munchetal(im, '4;1.0')    
	>>> imsave('sino_flt.tif', im) 

	References
	----------
	B. Munch, P. Trtik, F. Marone, M. Stampanoni, Stripe and ring artifact removal with
	combined wavelet-Fourier filtering, Optics Express 17(10):8567-8591, 2009.

	"""
    # Disable a warning:
    simplefilter("ignore", ComplexWarning)

    # Get args:
    wlevel, sigma = args.split(";")
    wlevel = int(wlevel)
    sigma = float(sigma)

    # The wavelet transform to use : {'haar', 'db1'-'db20', 'sym2'-'sym20', 'coif1'-'coif5', 'dmey'}
    wname = "db5"

    # Wavelet decomposition:
    coeffs = wavedec2(im.astype(float32), wname, level=wlevel)
    coeffsFlt = [coeffs[0]]

    # FFT transform of horizontal frequency bands:
    for i in range(1, wlevel + 1):

        # Padding and windowing of input signal:
        n_byte_align(coeffs[i][1], simd_alignment)
        siz = coeffs[i][1].shape
        tmp = pad(coeffs[i][1],
                  pad_width=((coeffs[i][1].shape[0] / 2,
                              coeffs[i][1].shape[0] / 2), (0, 0)),
                  mode='constant')  # or 'constant' for zero padding
        tmp = pad(tmp,
                  pad_width=((0, 0), (coeffs[i][1].shape[1] / 2,
                                      coeffs[i][1].shape[1] / 2)),
                  mode='constant')  # or 'constant' for zero padding
        tmp = _windowing_lr(tmp, siz[1])
        tmp = _windowing_lr(tmp.T, siz[0]).T

        # FFT:
        fcV = fftshift(fft(tmp, axis=0, threads=2))
        my, mx = fcV.shape

        # Damping of vertical stripes:
        damp = 1 - npexp(-(arange(-floor(my / 2.), -floor(my / 2.) + my)**2) /
                         (2 * (sigma**2)))
        dampprime = kron(ones((1, mx)), damp.reshape((damp.shape[0], 1)))
        fcV = fcV * dampprime

        # Inverse FFT:
        fcV = ifftshift(fcV)
        n_byte_align(fcV, simd_alignment)
        fcVflt = ifft(fcV, axis=0, threads=2)

        ## Crop image:
        tmp = fcVflt[fcVflt.shape[0] / 4:(fcVflt.shape[0] / 4 + siz[0]),
                     fcVflt.shape[1] / 4:(fcVflt.shape[1] / 4 + siz[1])]

        # Dump back coefficients:
        cVHDtup = (coeffs[i][0], tmp, coeffs[i][2])
        coeffsFlt.append(cVHDtup)

    # Get wavelet reconstruction:
    im_f = real(waverec2(coeffsFlt, wname))

    # Return filtered image (an additional row and/or column might be present):
    return im_f[0:im.shape[0], 0:im.shape[1]].astype(float32)
コード例 #28
0
ファイル: mutate_helpers.py プロジェクト: OscarDeGar/py_grama
def exp(x):
    r"""Exponential (e-base)
    """
    return npexp(x)
コード例 #29
0
ファイル: ml.py プロジェクト: jfgonzalez99/bigDATA
def sigmoid(x):
    """ Sigmoid function with input vector x """
    sig = 1 / (1 + npexp(-x))
    return sig