def findBump(twoder):
	mins = argrelextrema(twoder[20:70], np.less, order = 5)[0]
	maxes = argrelextrema(twoder[10:65], np.greater, order = 5)[0]
	
	max2der = max(abs(twoder))
	realmins = []
	realmaxes = []
	thresh = 0.00
	for i in range(len(mins)):
		ind = mins[i] + 20
		if twoder[min(len(twoder)-1,ind+6)] - twoder[ind] > thresh*max2der and twoder[ind-6] - twoder[ind] > thresh*max2der:
			realmins.append(ind)
	if twoder[-1] < 0 and twoder[-3] < 0:
		realmins.append(len(twoder)-1)
	for i in range(len(maxes)):
		ind = maxes[i] + 10
		if twoder[ind] - twoder[min(len(twoder)-1,ind+6)] > thresh*max2der and twoder[ind] - twoder[ind-6] > thresh*max2der:
			realmaxes.append(ind)
	
	if len(realmaxes) <= 1:
		return -1
	startLoc = -1
	for i in range(len(realmins)):
		if realmins[-i-1] < realmaxes[1]:
			startLoc = (realmins[-i-1] + realmaxes[1])/2
			break
	midLoc = -1
	for i in range(len(realmins)):
		if realmins[i] > realmaxes[1]:
			midLoc = (realmins[i] + realmaxes[1])/2
			break
	return [startLoc, midLoc]
コード例 #2
0
ファイル: noteClass.py プロジェクト: MTG/smc-2016
    def localMinMax(self, y, box_pts=None):

        #  detect local minima and maxima using the smoothed curve

        self.resetLocalMinMax()

        leny = len(y)
        # smooth the signal
        # print box_pts,len(y)
        if box_pts is None:             # detail segmentation box
            n = int(leny/10)
            box_pts = 3 if n<3 else n
        if leny>52 and box_pts>=leny:   # segment > 0.3s but box is too large
            n = int(leny/5)
            box_pts = n
        if box_pts < leny:
            self.ySmooth = uf.smooth(y, box_pts)
        half_box_pts = np.ceil(box_pts/2.0)

        if len(self.ySmooth):
            # for local maxima
            self.maximaInd = argrelextrema(self.ySmooth, np.greater)
            # remove the boundary effect of convolve
            self.maximaInd = [mi for mi in self.maximaInd[0] if (mi>half_box_pts and mi<leny-half_box_pts)]

            # for local minima
            self.minimaInd = argrelextrema(self.ySmooth, np.less)
            # remove the boundary effect of convolve
            self.minimaInd = [mi for mi in self.minimaInd[0] if (mi>half_box_pts and mi<leny-half_box_pts)]

        return box_pts
コード例 #3
0
    def extract(self, spikes, sample_rate):
        channels_data = spikes
        number_of_channels = len(channels_data)
        number_of_points_in_channel = len(channels_data[0])
        lower_boundary = 0
        upper_boundary = int(number_of_points_in_channel / 2) - 1

        lmda = int(upper_boundary * 1.5)
        dt = 1.0 / sample_rate
        step_tau = int((upper_boundary - lower_boundary) / 50)  ## was constant 1000 ## ensure that integer
        number_of_iterations = len(np.r_[lower_boundary:upper_boundary:step_tau])  ##(upper_boundary-lower_boundary)/step_tau

        # TODO: From the auditory nerve activity compute the autocorrelation of each nerve activity
        iteration = np.zeros(number_of_iterations)
        for i in range(number_of_iterations):
            for j in range(number_of_channels):
                # TODO: Sum these autocorrelation across nerves to construct the summary autocorrelation
                iteration[i] += self.integration(lower_boundary, upper_boundary, sample_rate, dt, channels_data, j, i * step_tau, lmda)
        iteration = iteration / iteration[0]  ##normalization

        # TODO: Extract the argument of the first non-zero peak in the autocorrelation
        peak_tau = step_tau * argrelextrema(iteration, np.greater)[0][iteration[argrelextrema(iteration, np.greater)[0]].argmax()]

        # TODO: Pitch matching maybe?

        # TODO: Return pitch estimate
        return 1.0 / (peak_tau * dt)
コード例 #4
0
ファイル: calc_gaussian_imf.py プロジェクト: fndjjx/emd
def most_like_imf(data, imf, imf_index1, imf_index2): 


    residual_short_sample_width, residual_short_std, short_distance = most_like_gaussian(data,imf,imf_index1)
    print "short sample width %s"%residual_short_sample_width
    print "short std %s"%residual_short_std
    residual_long_sample_width, residual_long_std, long_distance = most_like_gaussian(data,imf,imf_index2)
    i = 1
    while residual_short_sample_width == residual_long_sample_width:# and residual_short_factor == residual_long_factor:
        residual_long_sample_width, residual_long_std, _ = most_like_gaussian(data,imf,imf_index2,i)
        print "long sample width %s"%residual_long_sample_width
        print "long std %s"%residual_long_std
        i += 1
    

    print "long sample width %s"%residual_long_sample_width
    print "long std %s"%residual_long_std

    confidence = 0
    if (short_distance/np.mean(data[-20:]))<0.01 and (long_distance/np.mean(data[-20:]))<0.01:
        confidence = 1
    long_period = gaussian_smooth(data, residual_long_sample_width, residual_long_std)
    short_period = gaussian_smooth(data, residual_short_sample_width, residual_short_std)
 
    diff = [short_period[i]-long_period[i] for i in range(len(long_period))]
    #diff = gaussian_smooth(diff, 21, 4)
    
    data = np.array(diff)
    max_index = list(argrelextrema(data,np.greater)[0])
    min_index = list(argrelextrema(data,np.less)[0])

    return max_index, min_index, residual_long_sample_width, residual_short_sample_width, diff, confidence
コード例 #5
0
ファイル: calc_match.py プロジェクト: fndjjx/emd
def calc_para(list1,list2):
    

    list1_max_index = list(argrelextrema(np.array(list1),np.greater)[0])
    list1_min_index = list(argrelextrema(np.array(list1),np.less)[0])

    list2_max_index = list(argrelextrema(np.array(list2),np.greater)[0])
    list2_min_index = list(argrelextrema(np.array(list2),np.less)[0])

    error=10000

#######
#    if list1_max_index!=[] and list2_max_index!=[]:
#        error = abs(abs(list1[0]-list2[0])/list2[0])+abs(abs(list1[-1]-list2[-1])/list2[-1])+abs(abs(list1[list1_max_index[0]]-list2[list2_max_index[0]])/list2[list2_max_index[0]])+abs(1-list1_max_index[0]/list2_max_index[0])+abs(1-(len(list1)-list1_max_index[0])/(len(list2)-list2_max_index[0]))
#    if list1_min_index!=[] and list2_min_index!=[]:
#        error = abs(abs(list1[0]-list2[0])/list2[0])+abs(abs(list1[-1]-list2[-1])/list2[-1])+abs(abs(list1[list1_min_index[0]]-list2[list2_min_index[0]])/list2[list2_min_index[0]])+abs(1-list1_min_index[0]/list2_min_index[0])+abs(1-(len(list1)-list1_min_index[0])/(len(list2)-list2_min_index[0]))
        #error = abs(list1[0]-list2[0])+abs(list1[-1]-list2[-1])+abs(list1[list1_min_index[0]]-list2[list2_min_index[0]])
#######
    if list1_max_index!=[] and list2_max_index!=[]:
#        list1 = [i/abs(list1[list1_max_index[0]]-list1[0]) for i in list1]
#        list2 = [i/abs(list2[list2_max_index[0]]-list2[0]) for i in list2]
    #    error = abs((list2[0]/list1[0])-1)+abs((list2_max_index[0]/list1_max_index[0])-1)+abs((len(list2)-list2_max_index[0])/(len(list1)-list1_max_index[0])-1)
        #error = abs((list2[0]-list1[0])/list2[0])+abs((list2_max_index[0]-list1_max_index[0])/list2_max_index[0])+abs(((len(list2)-list2_max_index[0])-(len(list1)-list1_max_index[0]))/(len(list2)-list2_max_index[0]))+abs((list1[list1_max_index[0]]-list2[list2_max_index[0]])/list1[list1_max_index[0]])+abs((list2[-1]-list1[-1])/list2[-1])
        error = abs((list2[0]-list1[0])/min(list2[0],list1[0]))+abs((list2_max_index[0]-list1_max_index[0])/min(list2_max_index[0],list1_max_index[0]))+abs(((len(list2)-list2_max_index[0])-(len(list1)-list1_max_index[0]))/min((len(list2)-list2_max_index[0]),(len(list1)-list1_max_index[0])))+abs((list1[list1_max_index[0]]-list2[list2_max_index[0]])/min(list1[list1_max_index[0]],list2[list2_max_index[0]]))+abs((list2[-1]-list1[-1])/min(list1[-1],list2[-1]))+abs((len(list1)-len(list2))/min(len(list1),len(list2)))
    if list1_min_index!=[] and list2_min_index!=[]:
#        list1 = [i/abs(list1[list1_min_index[0]]-list1[0]) for i in list1]
#        list2 = [i/abs(list2[list2_min_index[0]]-list2[0]) for i in list2]
    #    error = abs((list2[0]/list1[0])-1)+abs((list2_min_index[0]/list1_min_index[0])-1)+abs((len(list2)-list2_min_index[0])/(len(list1)-list1_min_index[0])-1)
        #error = abs((list2[0]-list1[0])/list2[0])+abs((list2_min_index[0]-list1_min_index[0])/list2_min_index[0])+abs(((len(list2)-list2_min_index[0])-(len(list1)-list1_min_index[0]))/(len(list2)-list2_min_index[0]))+abs((list1[list1_min_index[0]]-list2[list2_min_index[0]])/list1[list1_min_index[0]])+abs((list2[-1]-list1[-1])/list2[-1])
        error = abs((list2[0]-list1[0])/min(list2[0],list1[0]))+abs((list2_min_index[0]-list1_min_index[0])/min(list2_min_index[0],list1_min_index[0]))+abs(((len(list2)-list2_min_index[0])-(len(list1)-list1_min_index[0]))/min((len(list2)-list2_min_index[0]),(len(list1)-list1_min_index[0])))+abs((list1[list1_min_index[0]]-list2[list2_min_index[0]])/min(list1[list1_min_index[0]],list2[list2_min_index[0]]))+abs((list2[-1]-list1[-1])/min(list1[-1],list2[-1]))+abs((len(list1)-len(list2))/min(len(list1),len(list2)))
    return error
コード例 #6
0
ファイル: Prepare_spectrum.py プロジェクト: madamow/pyEW
def find_strong_lines(x, xo, gggf, gggf_infm, cfile, logfile):
    # We need to find strong lines in spectrum and ingnore all small changes in flux.
    # Here - code checks the change in signal around inflection points and compares it
    # to noise multiplied by rejection parameter

    max_ind = argrelextrema(gggf, np.greater)
    min_ind = argrelextrema(gggf, np.less)

    max_tab = np.array([x[max_ind], gggf[max_ind]])
    min_tab = np.array([x[min_ind], gggf[min_ind]])

    thold = get_thold(gggf, cfile.getfloat('Lines', 'r_lvl'))
    str_lines = []

    if not (max_tab.size != 0 and min_tab.size != 0 and thold == 0.0):
        for item in gggf_infm:
            indx = np.abs(max_tab[0, :] - item).argmin()
            indm = np.abs(min_tab[0, :] - item).argmin()

            if ((np.abs(max_tab[1, indx]) > thold) and
                    (np.abs(min_tab[1, indm]) > thold)):
                str_lines.append(item)

    str_lines = evaluate_lines(xo, str_lines,
                               cfile.getfloat('Lines', 'det_level'),
                               gggf_infm, logfile)
    return str_lines
コード例 #7
0
def peaksValleys(data, attrib):

	for i in range(0,len(attrib)-2):

		k = [[], []] # k[0] picos e k[1] vales

		[k[0].append(data[j]) for j in argrelextrema(data, np.greater)[0]]
		[k[1].append(data[j]) for j in argrelextrema(data, np.less)[0]]

		#print (k[0])
		#print (k[1])

		aux = max(k[0])
		aux2 = min(k[1])
		for i in range(len(k[0])):
			try:
				if aux/k[0][i] < 0.7:
					k[0].pop(i)
			except:
				break


		for i in range(len(k[1])):
			try:
				if aux2/k[1][i] < 0.1:
					k[1].pop(i)
			except:
				break
		#print (k[0])
		#print (k[1])

	return k[0], k[1]
コード例 #8
0
ファイル: force.py プロジェクト: Haider-BA/snake
  def get_extrema(self, limits=(0.0, float('inf')), order=5):
    """Computes masks (i.e. arrays of indices) of the extrema of the force.

    Parameters
    ----------
    order: integer, optional
      Number of neighboring points used to define an extremum; 
      default: 5.

    Returns
    -------
    minima: 1D array of integers
      Index of all minima.
    maxima: 1D array of integers
      Index of all maxima.
    """
    minima = signal.argrelextrema(self.values, numpy.less_equal, 
                                  order=order)[0][:-1]
    maxima = signal.argrelextrema(self.values, numpy.greater_equal, 
                                  order=order)[0][:-1]
    mask = numpy.where(numpy.logical_and(self.times >= limits[0],
                                         self.times <= limits[1]))[0]
    minima = numpy.intersect1d(minima, mask, assume_unique=True)
    maxima = numpy.intersect1d(maxima, mask, assume_unique=True)
    # remove indices that are too close
    minima = minima[numpy.append(True, minima[1:]-minima[:-1] > order)]
    maxima = maxima[numpy.append(True, maxima[1:]-maxima[:-1] > order)]
    return minima, maxima
コード例 #9
0
ファイル: utilities.py プロジェクト: mlsamsom/PyFrictionTools
def tribocycle_finder(y_disp):
    """
    finds tribo-cycles using the y displacement curve
    :param y_disp:
    :return:
    """

    y_disp = moving_average(y_disp)
    maxima = argrelextrema(y_disp, np.greater, order=1000)
    minima = argrelextrema(y_disp, np.less, order=500)

    if maxima[0].size > minima[0].size:
        cycle_ends = maxima
        cycle_mid = minima
    elif minima[0].size > maxima[0].size:
        cycle_ends = minima
        cycle_mid = maxima
    else:
        print 'Error in tribocycle finder, y displacement waveform incorrect'
        plt.plot(y_disp)
        plt.show()
        cycle_ends = np.nan
        cycle_mid = np.nan

    return cycle_ends, cycle_mid
コード例 #10
0
ファイル: resolution_direct_line.py プロジェクト: acjak/dfxm
def plotPlots(linearray, sampletitle, roi):
	plt.figure(figsize=(14, 14))
	gs1 = matplotlib.gridspec.GridSpec(8, 8)
	gs1.update(wspace=0.25,  hspace=0.3)
	from scipy.signal import argrelextrema

	for i, li in enumerate(linearray):  # [0, :]):

		axarr = plt.subplot(gs1[i])

		lin = linearray[:, i]

		axarr.plot(lin)

		try:
			ma = np.mean(lin[argrelextrema(lin, np.greater)[0]])
			mi = np.mean(lin[argrelextrema(lin, np.less)[0]])
			axarr.plot([0,len(lin)],[ma,ma])
			axarr.plot([0,len(lin)],[mi,mi])
			axarr.set_title('{:.2%}'.format((ma-mi)/ma))
		except IndexError:
			print "Index error."

		axarr.set_ylim(0.6,1)
		# set_title('%.4f, %.4f' % (float(a[i]), float(b[i])))
		axarr.xaxis.set_major_formatter(plt.NullFormatter())
		axarr.yaxis.set_major_formatter(plt.NullFormatter())

	print data.directory, sampletitle, str(roi)
	# plt.savefig('{}/{}_{}_lines.pdf'.format(data.directory, sampletitle, str(roi)))
	plt.savefig(data.directory + '/%s_%s_lines.pdf' % (sampletitle, str(roi)))
コード例 #11
0
ファイル: test4.py プロジェクト: leaguilar/playground
def simplify3(nk):
	result=[]
	nk=np.array(nk)
	xk = nk/float(np.sum(nk))
	#print nk
	
	#X_plot = np.linspace(0, len(nk), 1000)[:, np.newaxis]
	sdiv=1000
	X_plot = np.linspace(0, len(xk), sdiv)[:, np.newaxis]
	custm = stats.rv_discrete(name='custm',a=0,b=7, values=(range(len(xk)), xk))
	yk= custm.rvs(size=100000)
	#yk.flatten()
	#fig, ax = plt.subplots(1, 1)
	#ax.hist(yk, normed=True, histtype='stepfilled', alpha=0.2)
	# gaussian KDE
	X=yk.reshape(-1, 1)
	kde = KernelDensity(kernel='gaussian', bandwidth=0.6).fit(X)
	log_dens = kde.score_samples(X_plot)
	mi, ma = argrelextrema(log_dens, np.less)[0], argrelextrema(log_dens, np.greater)[0]
	mi=np.rint(mi*float(len(xk))/float(sdiv))
	ma=np.rint(ma*float(len(xk))/float(sdiv))
	start=0	
	#print mi
	for i in mi:
		i=int(i)
		if start!=i:
			val=np.average(nk[start:i])
			for j in xrange(start,i):
				result.append(val)
		start=i	
	val=np.average(nk[start:])
	for j in xrange(start,len(nk)):
			result.append(val)
	return np.array(result)
コード例 #12
0
ファイル: reduce_frame.py プロジェクト: hdtee/nirspec_drp
def find_order_edge_peaks(reduced):
    
    from scipy.signal import argrelextrema

    # make top and bottom edge images
    rolled = np.roll(reduced.flat, 5, axis=0)
    reduced.topEdgesImg = rolled - reduced.flat
    reduced.botEdgesImg = reduced.flat - rolled
    
    
    # take a vertical cut of edges
    reduced.topEdgesProfile = np.median(reduced.topEdgesImg[:, 40:50], axis=1)
    reduced.botEdgesProfile = np.median(reduced.botEdgesImg[:, 40:50], axis=1)

    # find the highest peaks in crosscut, search +/- 15 pixels to narrow down list
    top_extrema = argrelextrema(reduced.topEdgesProfile, np.greater, order=35)[0]
    bot_extrema = argrelextrema(reduced.botEdgesProfile, np.greater, order=35)[0]

    # find crosscut values at those extrema
    top_intensities = reduced.topEdgesProfile[top_extrema]
    bot_intensities = reduced.botEdgesProfile[bot_extrema]

    reduced.topEdgePeaks = zip(top_extrema, top_intensities)
    reduced.botEdgePeaks = zip(bot_extrema, bot_intensities)
    
    return
コード例 #13
0
ファイル: extrema.py プロジェクト: Yasboti/Stocker
def accumulateFrame(symbols, order, mindate, maxdate):
    connection = sqlite3.connect('../data/historical.sl')
    c = connection.cursor()
    df = pd.DataFrame(columns=['ts','min','max']).set_index('ts')
    for symbol in symbols:
        query = "SELECT ts,close,high,low FROM eod WHERE symbol = '" + symbol + "' AND ts BETWEEN " + str(mindate) + " AND " + str(maxdate) + " ORDER BY ts;"
        c.execute(query)
        data = c.fetchall()
        dates = array([datetime.datetime.fromordinal(q[0]) for q in data])
        closes = array([q[1] for q in data])
        highs = array([q[2] for q in data])
        lows = array([q[3] for q in data])
        # compute extrema
        if len(lows) > 0:
            cmin = argrelextrema(data=lows, comparator=np.less, order=order)
            # create frame for minima indexed by date
            if(len(cmin[0]) > 0):
                mins = np.vstack((dates[cmin], np.negative(np.ones(cmin[0].size)), np.zeros(cmin[0].size))).transpose()
                df1 = pd.DataFrame(mins, columns=['ts','min','max']).set_index('ts')
                df = df.add(df1, fill_value=0)
        if len(highs) > 0:
            cmax = argrelextrema(data=highs, comparator=np.greater, order=order)
            # create frame for maxima indexed by date
            if(len(cmax[0]) > 0):
                maxes = np.vstack((dates[cmax], np.zeros(cmax[0].size), np.ones(cmax[0].size))).transpose()
                df2 = pd.DataFrame(maxes, columns=['ts','min','max']).set_index('ts')
                df = df.add(df2, fill_value=0)
    return df
コード例 #14
0
def get_local_Extrema(time,data):
    ''' # Function to get the local extrema for a response
    #
    # Inputs:
    #   time = time array corresponding to the data
    #   data = the response data array (only pass a single dimension/state at at time)
    #
    # Output:
    #   localMaxes = the amplitude of the local maxes
    #   localMax_Times = the times of the local maxes
    #
    # Created: 03/28/14
    #   - Joshua Vaughan
    #   - [email protected]
    #   - http://www.ucs.louisiana.edu/~jev9637
    ######################################################################################
    '''
    from scipy import signal
    
    # Get local maximums
    localMax_indexes = signal.argrelextrema(data, np.greater)
    localMaxes = data[localMax_indexes]
    localMax_Times = time[localMax_indexes]

    # Get local minimums
    localMin_indexes = signal.argrelextrema(data, np.less)
    localMins = data[localMin_indexes]
    localMin_Times = time[localMin_indexes]
    
    return localMaxes, localMax_Times, localMins, localMin_Times
コード例 #15
0
    def check(self, data, date):
        dataarr = np.asarray(data)
        extmax = argrelextrema(dataarr, np.greater)
        extmin = argrelextrema(dataarr, np.less)

        newdata = [[],[]]
        retpattern = []
        for i,d in enumerate(data):
            if i in extmax[0] or i in extmin[0]:
                newdata[0].append(date[i])
                newdata[1].append(d)

        for idx, val in enumerate(newdata[0]):
            if idx + 6 > len(newdata[0]):
                break
            itemx = newdata[0][idx: idx+6]
            itemy = newdata[1][idx: idx+6]
            for p in self.pattern:
                try:
                    ret = p.pattern(itemy)
                    if ret:
                        print p.name 
                        print itemx, itemy
                        #pylab.scatter(itemx, itemy)
                        retpattern.append({'name': p.name, "iyd": itemy, \
                                                                "ixd": itemx})
                except:
                    print "error in %s" % p.name

        return retpattern
コード例 #16
0
ファイル: corfunc.py プロジェクト: lewisodriscoll/corfunc-py
def extract(x, y):
    """Extract the interesting measurements from a correlation function"""
    # Calculate indexes of maxima and minima
    maxs = argrelextrema(y, np.greater)[0]
    mins = argrelextrema(y, np.less)[0]

    # If there are no maxima, return NaN
    garbage = Struct(minimum=np.nan,
                     maximum=np.nan,
                     dtr=np.nan,
                     Lc=np.nan,
                     d0=np.nan,
                     A=np.nan)
    if len(maxs) == 0:
        return garbage
    GammaMin = y[mins[0]]  # The value at the first minimum

    ddy = (y[:-2]+y[2:]-2*y[1:-1])/(x[2:]-x[:-2])**2  # Second derivative of y
    dy = (y[2:]-y[:-2])/(x[2:]-x[:-2])  # First derivative of y
    # Find where the second derivative goes to zero
    zeros = argrelextrema(np.abs(ddy), np.less)[0]
    # locate the first inflection point
    linear_point = zeros[0]
    linear_point = int(mins[0]/10)

    # Try to calculate slope around linear_point using 80 data points
    lower = linear_point - 40
    upper = linear_point + 40

    # If too few data points to the left, use linear_point*2 data points
    if lower < 0:
        lower = 0
        upper = linear_point * 2
    # If too few to right, use 2*(dy.size - linear_point) data points
    elif upper > dy.size:
        upper = dy.size
        width = dy.size - linear_point
        lower = 2*linear_point - dy.size

    m = np.mean(dy[lower:upper])  # Linear slope
    b = y[1:-1][linear_point]-m*x[1:-1][linear_point]  # Linear intercept

    Lc = (GammaMin-b)/m  # Hard block thickness

    # Find the data points where the graph is linear to within 1%
    mask = np.where(np.abs((y-(m*x+b))/y) < 0.01)[0]
    if len(mask) == 0:  # Return garbage for bad fits
        return garbage
    dtr = x[mask[0]]  # Beginning of Linear Section
    d0 = x[mask[-1]]  # End of Linear Section
    GammaMax = y[mask[-1]]
    A = -GammaMin/GammaMax  # Normalized depth of minimum

    return Struct(minimum=x[mins[0]],
                  maximum=x[maxs[0]],
                  dtr=dtr,
                  Lc=Lc,
                  d0=d0,
                  A=A)
コード例 #17
0
def find_peaks_from_fourier(coefs, freqs):
	extremal_inds = signal.argrelextrema(coefs.real, np.greater)[0]
	extremal_inds = np.r_[extremal_inds, signal.argrelextrema(coefs.real, np.less)[0]]
	extremal_inds = np.r_[extremal_inds, signal.argrelextrema(coefs.imag, np.greater)[0]]
	extremal_inds = np.r_[extremal_inds, signal.argrelextrema(coefs.imag, np.less)[0]]
	extremal_inds = np.unique(extremal_inds)
	freq_inds = sorted(extremal_inds, key=lambda ind: -np.abs(coefs[ind]))
	return freqs[freq_inds]
コード例 #18
0
def _diff_peaks_(hist):
    '''
    detect peaks in a histogram
    '''
    peaks = argrelextrema(hist, np.greater_equal, order = 2)
    valleys = argrelextrema(hist, np.less_equal, order = 2)
    print(peaks)
    print(valleys)
    return peaks[0], valleys[0]
コード例 #19
0
ファイル: runs.py プロジェクト: jpbottaro/biolearn
def breaks(x, y, order=20, smooth_sigma=0.5):
    y = gaussian_filter1d(y, smooth_sigma)
    extremas = np.concatenate((
        argrelextrema(y, np.greater_equal, order=order)[0],
        argrelextrema(y, np.less_equal, order=order)[0]))
    for i in [0, len(x) - 1]:
        if i not in extremas:
            extremas = np.append(extremas, i)
    return np.sort(extremas)
コード例 #20
0
ファイル: pre_condition.py プロジェクト: fndjjx/sds
def openFile(filepath, period):
    os.system('mkdir train_data')
    fp = open(filepath,'r')
    lines = fp.readlines()
    fp.close()
    data_list = []
    print lines
    for eachline in lines:
        eachline.strip('\n')
	print "each line of data%s"%eachline
        data_list.append(float(eachline))
    x = np.arange(0,len(lines))
    y = data_list
    y_array = np.array(y)
    x_ymax = argrelextrema(y_array,np.greater)
    x_ymin = argrelextrema(y_array,np.less)
#ymax = []
#ymin = []
#
#for i in range(len(y)):
#    for j in range(len(x_ymax[0])):
#        if i == x_ymax[0][j]:
#            ymax.append(y[i])
#for i in range(len(y)):
#    for j in range(len(x_ymin[0])):
#        if i == x_ymin[0][j]:
#            ymin.append(y[i])
    y_new = []
    for i in range(len(y)):
        if i in x_ymax[0]:
            y_new.append(y[i])
            y_new.append("1")
        elif i in x_ymin[0]:
            y_new.append(y[i])
            y_new.append("-1")
        else:
            y_new.append(y[i])
            y_new.append("0")
    for i in range(len(y_new)):
        y_new[i] = "%s\n"%y_new[i]

    for i in range(0,2*len(y)-period,2): 
        count = 0
	if i+period*2 > len(y_new):
	    #count = (len(y_new) - i)/2
	    break
	else:
	    count = period
	fp = open("train_data/data_afterpre_%s"%i,'w')
        fp.writelines("     1 1")
	fp.write("\n")
	fp.writelines(y_new[i:i+period*2])

	fp.seek(0,0)
	fp.write("%s "%count)
        fp.close()
コード例 #21
0
ファイル: utils.py プロジェクト: KNMI/VERCE
def find_local_extrema(data):
    """
    Function finding local extrema. It can also deal with flat extrema,
    e.g. a flat top or bottom. In that case the first index of all flat
    values will be returned.

    Returns a tuple of maxima and minima indices.
    """
    diff = np.diff(data)
    flats = np.argwhere(diff == 0)

    # Discard neighbouring flat points.
    new_flats = list(flats[0:1])
    for i, j in zip(flats[:-1], flats[1:]):
        if j - i == 1:
            continue
        new_flats.append(j)
    flats = new_flats

    maxima = []
    minima = []

    # Go over each flats position and check if its a maxima/minima.
    for idx in flats:
        l_type = "left"
        r_type = "right"
        for i in itertools.count():
            this_idx = idx - i - 1
            if diff[this_idx] < 0:
                l_type = "minima"
                break
            elif diff[this_idx] > 0:
                l_type = "maxima"
                break
        for i in itertools.count():
            this_idx = idx + i + 1
            if this_idx >= len(diff):
                break
            if diff[this_idx] < 0:
                r_type = "maxima"
                break
            elif diff[this_idx] > 0:
                r_type = "minima"
                break
        if r_type != l_type:
            continue
        if r_type == "maxima":
            maxima.append(int(idx))
        else:
            minima.append(int(idx))

    maxs = set(list(argrelextrema(data, np.greater)[0]))
    mins = set(list(argrelextrema(data, np.less)[0]))

    return np.array(sorted(list(maxs.union(set(maxima)))), dtype="int32"), \
        np.array(sorted(list(mins.union(set(minima)))), dtype="int32")
コード例 #22
0
ファイル: tools.py プロジェクト: nglazyrin/MixSplitter
def get_feature_vector(data, start, end):
    subset = data[start: end]
    vector1 = numpy.mean(subset, axis=0)
    normalize_to_01(vector1)

    energy = numpy.sum(subset, axis=1)
    local_max = argrelextrema(energy, numpy.greater)[0]
    local_min = argrelextrema(energy, numpy.less)[0]
    avg_max = numpy.average(energy[local_max])
    avg_min = numpy.average(energy[local_min])
    i = 0
    j = 0
    local_attacks = {'slow': [], 'fast': []}
    local_decays = {'slow': [], 'fast': []}
    while i < len(local_max) and j < len(local_min):
        value = (energy[local_max[i]] - energy[local_min[j]]) / abs(local_max[i] - local_min[j])
        peak_type = 'fast' if energy[local_max[i]] > avg_max else 'slow'
        if local_max[i] > local_min[j]:
            local_attacks[peak_type].append(value)
            j += 1
        else:
            local_decays[peak_type].append(value)
            i += 1
    vector2 = numpy.array([#numpy.mean(local_attacks['slow']), numpy.std(local_attacks['slow']),
                           numpy.mean(local_attacks['fast']), numpy.std(local_attacks['fast']),
                           #numpy.mean(local_decays['slow']), numpy.std(local_decays['slow']),
                           numpy.mean(local_decays['fast']), numpy.std(local_decays['fast']),
                           avg_max / avg_min])
    '''
    attacks = []
    decays = []
    for row in range(subset.shape[1]):
        # energy = numpy.sum(subset, axis=1)
        energy = subset[:, row]
        local_max = argrelextrema(energy, numpy.greater)[0]
        local_min = argrelextrema(energy, numpy.less)[0]
        local_attacks = 0
        local_decays = 0
        while i < len(local_max) and j < len(local_min):
            value = (energy[local_max[i]] - energy[local_min[j]]) / abs(local_max[i] - local_min[j])
            if local_max[i] > local_min[j]:
                local_attacks += value
                j += 1
            else:
                local_decays += value
                i += 1
        attacks.append(local_attacks)
        decays.append(local_decays)
    '''
    # vector2 = numpy.array(attacks)
    normalize_to_01(vector2)
    # vector3 = numpy.array(decays)
    # normalize_to_01(vector3)
    return numpy.concatenate((vector1, vector2), axis=0)
コード例 #23
0
ファイル: draglift.py プロジェクト: pencil-code/pencil-code
def find_extrema(series, maxmin):
    """
    Input: Data series, extrama of intereset (max or min)
    Use scipy to find this
    scipy.signal.argrelextrema(array type of np,comparison operator eg np.greater or np.less)
    """
    from scipy.signal import argrelextrema # use to find local minima or maxima in numpy array
    if (maxmin == 1):
        return argrelextrema(series,np.greater)[0]
    elif(maxmin == -1):
        return argrelextrema(series,np.less)[0] # Tuple, return only nparray
コード例 #24
0
ファイル: best_split.py プロジェクト: kjohnsson/modality
def best_split(data, I=(-np.inf, np.inf)):
    '''With bimodal data, finding split at lowest density.'''
    h_crit = critical_bandwidth_m_modes(data, 2, I)
    kde = KernelDensity(kernel='gaussian', bandwidth=h_crit).fit(data.reshape(-1, 1))
    x = np.linspace(max(np.min(data), I[0]), min(np.max(data), I[1]), 200)
    y = np.exp(kde.score_samples(x.reshape(-1, 1)))
    modes = argrelextrema(np.hstack([[0], y, [0]]), np.greater)[0]
    if len(modes) != 2:
        raise ValueError("{} modes at: {}".format(len(modes), x[modes-1]))
    ind_min = modes[0]-1 + argrelextrema(y[(modes[0]-1):(modes[1]-1)], np.less)[0]
    return x[ind_min]
コード例 #25
0
ファイル: judge.py プロジェクト: fndjjx/sds
 def extrem_interval(self, rawdata):
     data = np.array(rawdata)
     max_index = argrelextrema(data,np.greater)[0]
     min_index = argrelextrema(data,np.less)[0]
   
     if max_index[0]>min_index[0]:
         max_min_period_delta = [max_index[i]-min_index[i] for i in range(min(len(max_index),len(min_index)))]
         min_max_period_delta = [min_index[i+1]-max_index[i] for i in range(min(len(max_index),len(min_index))-1)]
     else:
         max_min_period_delta = [max_index[i+1]-min_index[i] for i in range(min(len(max_index),len(min_index))-1)]
         min_max_period_delta = [min_index[i]-max_index[i] for i in range(min(len(max_index),len(min_index)))]
     return (max_min_period_delta,min_max_period_delta)
コード例 #26
0
ファイル: judge.py プロジェクト: fndjjx/sds
 def ma_fig(self):
     data = np.array(self.data) 
     max_index = argrelextrema(data,np.greater)[0]
     min_index = argrelextrema(data,np.less)[0]
     for i in max_index:
         self.max_ma5_list.append(self.ma(self.data,5,0,i))
         self.max_ma10_list.append(self.ma(self.data,10,0,i))
         self.max_ma30_list.append(self.ma(self.data,30,0,i))
     for i in min_index:
         self.min_ma5_list.append(self.ma(self.data,5,0,i))
         self.min_ma10_list.append(self.ma(self.data,10,0,i))
         self.min_ma30_list.append(self.ma(self.data,30,0,i))
コード例 #27
0
    def _get_cage_boundary(self, ground_point, frame, direction='left'):
        """ determines the cage boundary starting from a ground_point
        going in the given direction """

        # check whether we have to calculate anything
        if not self.params['cage/determine_boundaries']:
            if direction == 'left':
                return (0, ground_point[1])
            elif direction == 'right':
                return (frame.shape[1] - 1, ground_point[1])
            else:
                raise ValueError('Unknown direction `%s`' % direction)
            
        # extend the ground line toward the left edge of the cage
        if direction == 'left':
            border_point = (0, ground_point[1])
        elif direction == 'right':
            image_width = frame.shape[1] - 1
            border_point = (image_width, ground_point[1])
        else:
            raise ValueError('Unknown direction `%s`' % direction)
        
        # do the line scan
        profile = image.line_scan(frame, border_point, ground_point,
                                  self.params['cage/linescan_width'])
        
        # smooth the profile slightly
        profile = ndimage.filters.gaussian_filter1d(profile,
                                                    self.params['cage/linescan_smooth'])
        
        # add extra points to make determining the extrema reliable
        profile = np.r_[0, profile, 255]

        # determine first maximum and first minimum after that
        maxima = signal.argrelextrema(profile, comparator=np.greater_equal)
        pos_max = maxima[0][0]
        minima = signal.argrelextrema(profile[pos_max:],
                                      comparator=np.less_equal)
        pos_min = minima[0][0] + pos_max
        # we have to use argrelextrema instead of argrelmax and argrelmin,
        # because the latter don't capture wide maxima like [0, 1, 1, 0]
        
        if pos_min - pos_max >= 2:
            # get steepest point in this interval
            pos_edge = np.argmin(np.diff(profile[pos_max:pos_min + 1])) + pos_max
        else:
            # get steepest point in complete profile
            pos_edge = np.argmin(np.diff(profile))

        if direction == 'right':
            pos_edge = image_width - pos_edge
        
        return (pos_edge, ground_point[1])
コード例 #28
0
ファイル: epr_amplitude.py プロジェクト: manuelgodoy/EPR
def peaks(signal):
    """signal must be a np array"""

    # for local maxima
    indices_max = argrelextrema(signal, np.greater)

    # for local minima
    indices_min = argrelextrema(signal, np.less)

    maxima = signal[indices_max[0]].max()
    minima = signal[indices_min[0]].min()

    return maxima, minima
コード例 #29
0
def find_resonances(data, fit_dips_below = None, max_width = None):
	min_inds = (signal.argrelextrema(data, np.less)[0]).astype(int)
	max_inds = (signal.argrelextrema(data, np.greater)[0]).astype(int)
	widths = max_inds[1:] - max_inds[:-1]
	dip_inds = min_inds[1:] if min_inds[0] < max_inds[0] else min_inds[:-1]
	windows = [(max_inds[i], max_inds[i+1]) for i in range(len(dip_inds)-1)] # should this be 3?
	# a dip must go below fit_dips_below, unless fit_dips_below = None and it's width should be at most max_width unless max_width = None
	filter_fun = lambda p: ((data[p[0]] < fit_dips_below) if (fit_dips_below != None) else True) and (p[1] <= max_width if max_width != None else True)
	dip_inds, widths, windows = map(np.array, zip(*filter(filter_fun, zip(dip_inds, widths, windows))))
	# sort by width
	# sort_inds = np.argsort(-1 * widths)
	# return dip_inds[sort_inds], windows[sort_inds]
	return dip_inds, windows
コード例 #30
0
  def get_extrema(self, order=5):
    """Computes extrema indices (minima and maxima) of the force coefficient.

    Parameters
    ----------
    order: int
      Number of points on each side to use for comparison; default: 5.
    """
    minima = signal.argrelextrema(self.values, numpy.less_equal, order=order)[0][:-1]
    maxima = signal.argrelextrema(self.values, numpy.greater_equal, order=order)[0][:-1]
    # remove indices that are too close
    self.minima = minima[numpy.append(True, minima[1:]-minima[:-1] > order)]
    self.maxima = maxima[numpy.append(True, maxima[1:]-maxima[:-1] > order)]
コード例 #31
0
def process_results(path, ec_key, dataset_name):
    """ Read and process the results from this dataset"""
    print(path, dataset_name, ec_key)

    # Results folder
    results_folder = os.path.join(path, 'data/results')
    # List all the items in the results folder
    items = sorted(os.listdir(results_folder))
    # Select the csv files
    items = [item for item in items if ".csv" in item]
    # Select the axon recording files
    items = [item for item in items if item[:4] == "Axon"]

    # Array for the axons' activity maxima
    maxima = []

    # AP peak times
    appt_ = {}
    # AP latency times
    aplt_ = {}

    # Flags indicating which axons did fire and which not
    hasAP = {}

    # Voltage data
    data = {}

    # Geometrical properties
    xx_ = []
    yy_ = []
    rr_ = []

    # Iterate over the files and read them
    for filename in items:
        # Actually, i is taken from the file name
        i = int(filename.replace('Axon', '').replace('.csv', ''))
        data[i] = {}
        with open(os.path.join(results_folder, filename), "r") as f:
            fr = csv.reader(f)
            for row in fr:
                r0 = row[0]
                if ("NODE" in r0) or ("node" in r0):
                    data[i][key].append([float(x) for x in row[1:]])
                elif len(row) == 3:
                    xx_.append(float(r0))
                    yy_.append(float(row[1]))
                    rr_.append(float(row[2]))
                elif len(row) == 1:
                    try:
                        # print(key)
                        data[i][key] = np.array(data[i][key])
                    except NameError:
                        # There's no key yet
                        pass
                    key = r0
                    data[i][key] = []

        # When the last key is read, don't forget storing it
        data[i][key] = np.array(data[i][key])
        del key

    # Check maxima and relevant stuff
    vcrit = 15.
    for i, data_ in data.items():
        axondata = data_["v"]
        # Check if the maximum is an AP
        # print(axondata)
        maximum = axondata.max()
        maxima.append(maximum)
        if maximum > 0:
            # Regions where v is greater than vcrit
            whereAPs = np.where(axondata > vcrit)
            # Time when the first AP is fired (v rises above vcrit mV)
            when1stAP = whereAPs[1].min()
            where1stAP = whereAPs[0][np.where(whereAPs[1] == when1stAP)][0]
            segment_maxima = argrelextrema(axondata[where1stAP], np.greater)[0]
            # Local maxima
            local_maxima = axondata[where1stAP][segment_maxima]
            # Local maxima greater than vcrit mV
            # IMPORTANT: I named the following variable when1stAP_ just so
            # it doesn't overwrite when1stAP, but I can make it overwrite
            # it if I want
            when1stAP_ = segment_maxima[np.where(local_maxima > vcrit)][0]
            if True:
                APpeaktime = dt * (when1stAP - 1)
                appt_[i] = APpeaktime
                aplt_[i] = APpeaktime - 0.01
            hasAP[i] = True
        else:
            hasAP[i] = False
            aplt_[i] = 'nan'
        i += 1

    # Maxima to array
    maxima = np.array(maxima)

    # AP peak times to array
    # And subtract the pulse delay from them
    appt_values = np.array(list(appt_.values())) - 0.01

    if len(appt_values) == 0:
        # print("No axon fired an AP")
        pass
        # sys.exit()

    # Geometrical properties to array
    xx_ = np.array(xx_)
    yy_ = np.array(yy_)
    rr_ = np.array(rr_)

    # Topology

    # Open and read topology file
    topo_path = os.path.join(path,
                             'data/load/created_nerve_internal_topology.json')
    with open(topo_path, 'r') as f:
        topology = json.load(f)

    # Open and read the contours file
    contours_file = os.path.join(path, 'data/load/created_nerve_contour.csv')
    contours = {}
    with open(contours_file, "r") as f:
        fr = csv.reader(f)
        for row in fr:
            try:
                # Try to get numbers
                x = float(row[0])
            except ValueError:
                # It's a string
                key = row[0]
                contours[key] = []
            else:
                y = float(row[1])
                # Append the point to the corresponding contour
                contours[key].append([x, y])

    # Delete the key for tidyness
    del key

    # Polygons for each fascicle and the nerve
    polygons = {}
    for k, c in contours.items():
        pol = geo.Polygon(c)
        polygons[k] = pol.plpol

    # Fired axons per fascicle and in total in the nerve
    fascicle_ap_counter = {}
    for k in contours:
        if 'ascicle' in k:
            fascicle_ap_counter[k] = 0

    # Find them
    for i, b in hasAP.items():
        if b:
            # Find fascicle of this axon
            for k, p in polygons.items():
                if 'ascicle' in k:
                    if p.contains_point((xx_[i], yy_[i])):
                        # print('Axon %i is in %s'%(i, k))
                        fascicle_ap_counter[k] += 1
                        break

    # Read electrode settings
    settings_path = os.path.join(path, 'settings/electrodes.json')
    with open(settings_path, 'r') as f:
        stim_settings = json.load(f)

    current = list(
        list(stim_settings.values())[0]
        ['stimulation protocol'].values())[0]['currents'][0]

    total_number_APs = sum(list(fascicle_ap_counter.values()))

    # Dictionary to gather all the important data
    data_final = OrderedDict()
    data_final['dataset_name'] = dataset_name
    data_final['current'] = current
    data_final['fascicle_ap_counter'] = fascicle_ap_counter
    data_final['total_number_APs'] = total_number_APs
    data_final['AP times'] = aplt_

    # Save the data in the 'all data' dictionary
    data_all[dataset_name] = data_final

    # Save data into the data_recruitment dictionary
    data_recruitment['currents'].append(current)
    data_recruitment['recruitment'][ec_key]['nerve'].append(total_number_APs)
    for k, n in fascicle_ap_counter.items():
        data_recruitment['recruitment'][ec_key][k].append(n)

    # Save results in a json file
    with open('stim_results_%s%s' % (dataset_name, '.json'), 'w') as f:
        json.dump(data_final, f, indent=4)
コード例 #32
0
def signal_mean_local_max(signal):
    local_max = argrelextrema(signal, np.greater)
    return np.mean(signal[local_max])
コード例 #33
0
#plt.figure(figsize=(10,2/3*10))

plt.subplot(321)
plt.imshow(img)
##
# Prepare
res = np.abs(filter.gaussian_filter(filter.hsobel(img), 5))
res = np.clip(res, 0.0, 0.2)
plt.subplot(322)
plt.imshow(res)
plt.colorbar()

# Identify the left and right sides of the circles
#vsum=medfilt(np.sum(res,0),7)
vsum = np.sum(res, 0)
idx = argrelextrema(vsum, np.greater)
peaks = vsum[idx]
p1 = np.argmax(peaks)
peaks = np.delete(peaks, p1)
p2 = np.argmax(peaks)
p2 = p2 + 1 if p1 < p2 else p2
p1 = idx[0][p1]
p2 = idx[0][p2]

if p2 < p1:
    p1, p2 = p2, p1

pd3 = int(np.floor((p2 - p1) / 3))

plt.subplot(323)
plt.plot(vsum)
コード例 #34
0
def extract_ppg(ppg):
    # Derive heart rate (HR) from blood volume pulse (BVP), given by the PPG sensor.
    peaks_temp = signal.argrelextrema(ppg, comparator = np.greater_equal, order = 3)
    peaks_temp = peaks_temp[0]  # Find initial, unfiltered locations of the heart beat peaks.
    peaks = []

    window_start = 0
    window_end = 400
    t = 0.5 # Tolerance for locating true heart beat peaks.
    j = 0

    # Filter peaks in peaks_temp to locate the true heart beat peaks in the samples, separating
    # them from misidentified peaks.
    while window_end != 8400:
        window_mean = np.mean(ppg[window_start : window_end + 1])
        
        for i in peaks_temp:
            if (len(peaks) == 0):
                if (ppg[i] > t * np.amax(ppg[window_start : window_end + 1]) + t * window_mean and i >= window_start and i <= window_end and ppg[i] == np.amax(ppg[window_start : window_end + 1])):
                    peaks.append(i)
            else:
                if (ppg[i] > t * np.amax(ppg[window_start : window_end + 1]) + t * window_mean and i - peaks[j] > 400 and i >= window_start and i <= window_end and ppg[i] == np.amax(ppg[window_start : window_end + 1])):
                    peaks.append(i)
                    j += 1

        # Move the window along the samples to check for true peaks among 400 samples per loop (the size of our window).
        window_start += 400
        window_end += 400

    peaks = np.array(peaks)
    peaks_value = [ppg[a] for a in peaks]

    beat_count = len(peaks)  
    print("Beat count:", beat_count)

    # Calculate instantaneous HR, using 60.0 as seconds per minute.
    if beat_count >= 2:
        time_intervals = (peaks[1 : beat_count - 1] - peaks[0]) / 800.0 # Peak time interval differences from the first peak, using 800.0 as the sampling rate. 
        hr = np.zeros(beat_count - 1)

        for i in range(1, beat_count - 1):
            hr[i - 1] = 60.0 / (time_intervals[i - 1] / i)  
    else:
        hr = np.zeros(1)

    # Extract HR features.
    mean_hr = np.mean(hr)  # Calculate the mean of HR.

    # Calculate the mean of the absolute value of the first difference of the HR values.
    if len(hr) >= 2:
        mean_first_diff_hr = np.mean(np.fabs(np.diff(hr)))  
    else:
        mean_first_diff_hr = 0

    # Derive peak-to-peak interval (PPI) from BVP.
    if len(peaks) >= 2:
        ppi = (np.diff(peaks) * 1000.0) / 800.0
    else:
        ppi = np.zeros(1)

    # Extract heart rate variability (HRV) features.
    global_max_ppi = np.amax(ppi)  # Calculate the global maximum of the PPI signal.
    global_min_ppi = np.amin(ppi)  # Calculate the global minimum of the PPI signal.
    mean_ppi = np.mean(ppi)  # Calculate the mean of the PPI signal.
    std_ppi = np.std(ppi)  # Calculate the standard deviation of the PPI signal.

    # Calculate the square root of the mean of the squares of the differences between adjacent PP intervals (RMSSD).
    if len(ppi) >= 2:
        rmssd = np.sqrt(np.mean(np.power(np.diff(ppi), 2)))  
        ppi_diff = np.fabs(np.diff(ppi)) # First difference of PPI values, used for PP50 calculation.
    else:
        rmssd = 0
        ppi_diff = np.zeros(1) 

    pp50_count = 0

    # Calculate the proportion of the number of pairs of successive PPs that differ by more than 50 ms (PP50), divided by the total number of PPs (pPP50).
    if beat_count >= 2:
        for i in range(0, beat_count - 2):
            if ppi_diff[i] > 50:
                pp50_count += 1

        ppp50 = (float(pp50_count) / float(beat_count)) * 100.0  
    else:
        ppp50 = 0

    return mean_hr, mean_first_diff_hr, global_max_ppi, global_min_ppi, mean_ppi, std_ppi, rmssd, ppp50  
コード例 #35
0
    def test_Network_04(self):
        cellParameters = dict(
            morphology=os.path.join(LFPy.__path__[0], 'test',
                                    'ball_and_sticks_w_lists.hoc'),
            templatefile=os.path.join(LFPy.__path__[0], 'test',
                                      'ball_and_stick_template.hoc'),
            templatename='ball_and_stick_template',
            templateargs=None,
            passive=True,
            dt=2**-3,
            tstop=100,
            delete_sections=False,
        )

        synapseParameters = dict(idx=0,
                                 syntype='Exp2Syn',
                                 weight=0.002,
                                 tau1=0.1,
                                 tau2=0.1,
                                 e=0)

        populationParameters = dict(
            CWD=None,
            CELLPATH=None,
            Cell=LFPy.NetworkCell,
            cell_args=cellParameters,
            pop_args=dict(radius=100, loc=0., scale=20.),
            rotation_args=dict(x=0, y=0),
            POP_SIZE=1,
            name='test',
        )
        networkParameters = dict(dt=2**-3,
                                 tstart=0.,
                                 tstop=100.,
                                 v_init=-70.,
                                 celsius=6.3,
                                 OUTPUTPATH='tmp_testNetworkPopulation')
        # set up
        network = LFPy.Network(**networkParameters)
        network.create_population(**populationParameters)

        cell = network.populations['test'].cells[0]

        # create synapses
        synlist = []
        numsynapses = 2
        for i in range(numsynapses):
            synlist.append(LFPy.Synapse(cell=cell, **synapseParameters))
            synlist[-1].set_spike_times(np.array([10 + (i * 10)]))

        network.simulate()

        # test that the input results in the correct amount of PSPs
        np.testing.assert_equal(
            ss.argrelextrema(cell.somav, np.greater)[0].size, numsynapses)

        # clean up
        network.pc.gid_clear()
        os.system('rm -r tmp_testNetworkPopulation')
        for population in network.populations.values():
            for cell in population.cells:
                cell.strip_hoc_objects()
        neuron.h('forall delete_section()')
コード例 #36
0
def find_maxima(x):
    return signal.argrelextrema(x, np.greater)[0]
コード例 #37
0
def analyze_mode(mode):
    maxima = signal.argrelextrema(mode, np.greater)
    minima = signal.argrelextrema(mode, np.less)
    extrema = np.size(maxima) + np.size(minima)
    zero_crossings = find_number_of_zero_crossings(mode)
    return extrema, zero_crossings, np.mean(mode)
コード例 #38
0
        print "   (using a kde to find the chi-quare minima...)"

for x in range(0, n_objects):
    if (args.multiple):
        idx_value = np.where(ids == ids[x])[0]
        idx = idx_value[0]

        # First, do the version where I just look at all objects within range_frac of the best chi-square
        if (kde_version == 0):
            range_frac = 0.10

            minima_indices = []
            minima = []
            close_fits = []
            # Find all of the minima
            minima_indices = argrelextrema(chi2fit[:, x], np.less)[0]
            minima = tempfilt['zgrid'][argrelextrema(chi2fit[:, x], np.less)]
            if len(chi2fit[minima_indices, x] > 0):
                best_fit_chisq = np.min(chi2fit[minima_indices, x])
                best_fit_range = best_fit_chisq + range_frac * best_fit_chisq
                close_fits = np.where(
                    chi2fit[minima_indices, x] < best_fit_range)[0]

                w.write(
                    str(np.int(ids[x])) + '  ' + str(z_spec[x]) + '  ' +
                    str(z_phot[x]) + '  ' + str(z_prob[x]) + '  ' +
                    str(q_z[x]) + '  ' + str(round(NIRc_SNR_raw[x], 4)) +
                    '  ' + str(z_a[x]) + '  ' + str(chi_a[x]) + '  ')
                if (len(close_fits) == 1):
                    w.write('\n')
                else:
コード例 #39
0
def find_minima(x):
    return signal.argrelextrema(x, np.less)[0]
            u = np.zeros((w,dims[0], dims[1]))
            d = np.zeros((w+1,dims[0], dims[1]))
            raw = np.concatenate((u, raw, d), axis=0)
    elif raw.shape[0] > upper:
        # crop the image along the third dimension
        dh = int(raw.shape[0] - upper / 2)
        raw = raw[dh:-dh, :, :]
    return raw


for (c,(i,m)) in enumerate(list(zip(images,masks))):
    r,h=nrrd.read(i)
    r = cv2.resize(r.astype("float32"), dsize=dims, interpolation=cv2.INTER_CUBIC)
    r=np.swapaxes(r,0,-1)
    if seek_for_lungs:
        idx = argrelextrema(np.mean(r, (1, 2)), np.greater)
        min = idx[0][0]
        max = idx[0][-1]
        r = r[min+dmin:max-dmax]
    r=pad(r,upper)
    X.append(r)



    r, h = nrrd.read(m)
    r = cv2.resize(r.astype("float32"), dsize=dims, interpolation=cv2.INTER_CUBIC)
    r = np.swapaxes(r, 0, -1)
    if seek_for_lungs:
        # seleziono solo gli indici corretti
        r = r[min+dmin:max-dmax]
    r = pad(r, upper)
コード例 #41
0
ファイル: new_180_2.py プロジェクト: rosswhitfield/corelli
    for i in range(len(x) - 1):
        if x[i] - x[i + 1] < 100 and lookup[x[i]] == lookup[x[i + 1]]:
            out.append(int((x[i] + x[i + 1]) / 2))
            i += 1
        else:
            out.append(x[i])
    out.append(x[-1])
    return np.array(out)


d2ave = average(d2, 100)
plt.plot(d2)
plt.plot(d2ave)
plt.show()

minima = argrelextrema(d2ave, np.less_equal, order=400)[0]
maxima = argrelextrema(d2ave, np.greater_equal, order=400)[0]

minima_mask = np.ma.masked_where(d2ave[minima] > -0.45, minima)
maxima_mask = np.ma.masked_where(d2ave[maxima] < 0.45, maxima)

plt.scatter(minima_mask, d2ave[minima_mask])
plt.scatter(maxima_mask, d2ave[maxima_mask])

plt.plot(m2 / 100)
plt.plot(d2ave)
plt.show()

d3ave = average(d3, 100)
plt.plot(d3)
plt.plot(d3ave)
コード例 #42
0
def add_spectacle_to_fits(old_fits_name, new_fits_name, **kwargs):
    threshold = kwargs.get('threshold', 0.01)
    plot = kwargs.get('plot', False)

    orig_hdu = fits.open(old_fits_name)
    new_hdu = fits.HDUList([orig_hdu[0]])
    new_hdu.append(orig_hdu[1])

    keys_to_copy = ('LINENAME',
                    'RESTWAVE',
                    'F_VALUE',
                    'GAMMA',
                    'SIM_TAU_HDENS',
                    'SIM_TAU_TEMP',
                    'SIM_TAU_METAL',
                    'TOT_COLUMN',
                    'EXTNAME',
                    'XTENSION',     ## hack-y since don't want to not delete these
                    'BITPIX',
                    'NAXIS',
                    'NAXIS1',
                    'NAXIS2',
                    'PCOUNT',
                    'GCOUNT',
                    'TFIELDS',
                    'TTYPE1',
                    'TFORM1',
                    'TTYPE2',
                    'TFORM2',
                    'TUNIT2',
                    'TTYPE3',
                    'TFORM3',
                    'TTYPE4',
                    'TFORM4',
                    'TTYPE5',
                    'TFORM5',
                    'TTYPE6',
                    'TFORM6',
                    'TTYPE7',
                    'TFORM7',
                    'TTYPE8',
                    'TFORM8',
                    'TTYPE9',
                    'TFORM9',
                    'TTYPE10',
                    'TFORM10')

    ## now for the individual lines
    nlines = np.int(orig_hdu[0].header['NLINES'])
    for line_num in np.arange(nlines):
        key = 'LINE_'+str(line_num+1)
        line_name = orig_hdu[0].header[key]
        print('~~~~> trying',line_name,'~~~~~>>>>')

        if any([x.name.upper() == line_name.upper() for x in orig_hdu]):
            new_ext = orig_hdu[line_name]
            for k in orig_hdu[line_name].header:
                if k not in keys_to_copy:
                    print("deleting ", k)
                    del new_ext.header[k]

            lambda_0 = orig_hdu[line_name].header['RESTWAVE']
            disp = orig_hdu[line_name].data['wavelength']
            flux = orig_hdu[line_name].data['flux']
            tau = orig_hdu[line_name].data['tau']
            redshift = orig_hdu[line_name].data['redshift']
            zsnap = np.median(redshift)

            ## we want Nmin
            Nmin = np.size(np.where(new_flux[argrelextrema(new_flux, np.less)[0]] < (1-threshold)))
            new_ext.header['Nmin'] = Nmin

            print("~~~~> now trying to run spectacle on line ",line_name, "~~~~~~>")
            lines_properties = MISTY.get_line_info(disp, flux, \
                                            tau=tau, \
                                            redshift=zsnap, \
                                            lambda_0=lambda_0, \
                                            f_value=orig_hdu[line_name].header['F_VALUE'], \
                                            gamma=orig_hdu[line_name].header['GAMMA'], \
                                            ion_name=line_name, \
                                            threshold = threshold)
            print(lines_properties)


            for line_key in lines_properties:
                new_ext.header[line_key] = lines_properties[line_key]


            new_hdu.append(new_ext)
            print('~~~~> all done with',line_name,'~~~~~<<<')
        else:
            print('<<<<<~~~~ ',line_name,' not found :-(  ~~~~~<<<<<<')

    print("writing out to .... " + new_fits_name)
    new_hdu.writeto(new_fits_name, overwrite=True, output_verify='fix')
コード例 #43
0
def region_loc(f):
    f_4 = os.path.expanduser(f)

    im = cv2.imread(f_4)

    variant = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)

    channel_1, channel_2, channel_3 = variant[:, :,
                                              0], variant[:, :,
                                                          1], variant[:, :, 2]

    x = range(0, 1944)
    y = channel_3[:, 1100]

    cv2.line(variant, (1000, 0), (1000, 1944), (0, 0, 255), 15)

    signal2 = y

    signal_series = pd.Series(signal2)

    smooth_data = pd.rolling_mean(signal_series, 10)

    smooth_set = pd.Series(smooth_data)

    spl = UnivariateSpline(x, y)
    tm = signal.argrelextrema(spl(x), np.less)

    sm_factor = 5000

    while len(tm[0]) > 5:
        spl.set_smoothing_factor(sm_factor)
        tm = signal.argrelextrema(spl(x), np.less)
        sm_factor = sm_factor + 200

    peakind = signal.find_peaks_cwt(spl(x), np.arange(10, 200), noise_perc=20)

    t = []

    for i in range(0, len(tm)):
        t.append(spl(x)[tm[i]])

    z = []

    for i in range(0, len(peakind)):
        z.append(spl(x)[peakind[i]])

    #add LOOP for peaking back in so we know the inflection points

    #plt.plot(x, spl(x), 'r--')
    #plt.scatter(tm, t)
    #plt.show()
    #
    #plt.plot(x, spl(x), 'r--')
    #plt.scatter(peakind, z)
    #plt.show()

    # Here we are looking to identify all of the peaks.  We are eliminating outliers lower than 50, because the
    #alog looks 50 pxl on both sides
    f_sets = []

    peak_location_factor = 0

    for i in tm[0]:
        if i > 50:
            f_der = np.gradient(spl(x)[i - 50:i + 50])
            f_sets.append(f_der)

        else:
            peak_location_factor = 1
            continue

    peak_quants = []
    for i in range(0, len(f_sets)):
        peak_quants.append(max(f_sets[i]))

    ROI = peak_quants.index(max(peak_quants)) + peak_location_factor

    #Our final return is the location of the center of the "TEST LINE"
    B_C = tm[0][ROI]

    #********************************************************************
    #********************************************************************
    #**************************FX 2*********************************
    #********************************************************************
    #********************************************************************

    x = range(0, 2592)
    y = channel_3[B_C - 100, :]

    signal2 = y

    signal_series = pd.Series(signal2)

    smooth_data = pd.rolling_mean(signal_series, 10)

    smooth_set = pd.Series(smooth_data)

    spl = UnivariateSpline(x, y)
    tm = signal.argrelextrema(spl(x), np.less)

    sm_factor = 50000

    spl.set_smoothing_factor(sm_factor)
    tm = signal.argrelextrema(spl(x), np.less)

    #while len(tm[0]) > 5:
    #    spl.set_smoothing_factor(sm_factor)
    #    tm  = signal.argrelextrema(spl(x), np.less)
    #    sm_factor = sm_factor + 200ps

    spike = min(spl(x)) + 30

    spike_value = 0
    spike_ind = 0

    while spike_value < spike:
        spike_value = spl(x)[spike_ind]
        spike_ind = spike_ind + 1

    return (B_C, spike_ind)
コード例 #44
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import argrelextrema
import sys

#read phi2 data from gkyl run
import postgkyl as pg
filename = sys.argv[1]
data = pg.data.GData(filename)
beta = np.float(filename.split("_")[0].split("-")[1])

expected = 1.0 / np.sqrt(beta + .01)

#put timesteps and phi2 values into lists
times = data.peakGrid()[0]
phi2s = data.peakValues()

peaktimes = times[argrelextrema(phi2s, np.greater)[0]]
peaktimes = np.insert(peaktimes, 0, 0)
diff = 0
count = 0
for i in np.arange(peaktimes.size - 1):
    diff = diff + peaktimes[i + 1] - peaktimes[i]
    count = count + 1
period = 2 * diff / count
freq = 2 * np.pi / period
print("%f\t%f\t%f" % (beta, freq, expected))
コード例 #45
0
list_of_means = []

#Estimate the mean brightness and only select the brightest stack and the stack before the brightest
for tiff_index in range(len(photos)):
    if photos[tiff_index] != '.DS_Store':
        tiff_photo = cv.imread(basepath + "/" + photos[tiff_index])
        #mean intensity of the pixels/mean intensity of the image
        list_of_means.append(np.mean(tiff_photo))
#if you plot lis_of_means you find distinct peaks in intensity of each image studied
dim1 = np.shape(tiff_photo)[0]
dim2 = np.shape(tiff_photo)[1]

array_of_means = np.array(list_of_means)
#find the maximum and minimum intensity from the list_of_means
local_maxima = argrelextrema(array_of_means, np.greater)[0]
local_minima = argrelextrema(array_of_means, np.less)[0]

false_maximas = []
#there are 2 maximas for each img (1 true and another false maxima) which can be visualised with plt.plot(list_of_means)
local_maxima_list = []
for maxima in local_maxima:
    local_maxima_list.append(maxima)

#check if the local maxima is actually a local minima
for minima in local_minima:
    for maxima in local_maxima:
        if minima == maxima + 1:
            false_maximas.append(maxima)
#remove all the false maximas
for false in false_maximas:
コード例 #46
0
ファイル: plot.py プロジェクト: roosamic/FYS3150
infile.close()
"""
plt.plot(system1[0,0,:], system1[0, 1,:])
plt.show()
"""

length = np.zeros((steps))
perihelion = []
theta = []
perihelion.append(system1[0, :, 0])
theta.append(np.arctan2(system1[0, 1, 0], system1[0, 0, 0]))

for i in xrange(steps):
    length[i] = np.linalg.norm(system1[0, :, i])

index = argrelextrema(length, np.less)

for i in index[0]:
    perihelion.append(system1[0, :, i])
    theta.append(np.arctan2(system1[0, 1, i], system1[0, 0, i]))
print theta

times = np.linspace(0, 1, len(theta))
plt.plot(times, theta)
plt.xlabel("Time [Yr]")
plt.ylabel("Angle $\\theta$")
plt.legend(["Theta with GR-correction"])
plt.show()

x = np.zeros(len(perihelion))
y = np.zeros(len(perihelion))
コード例 #47
0
    return mul2


MazeSize = 300
GridNum = 500
max_lamda = 200
x = np.linspace(20, max_lamda, 300)
# for i in [40,60,80,100]:
for i in [40]:
    grids1 = CreateGrid(1, i)
    grids = CreateGrid(300, 0)
    cor = cross_correlation(grids1, grids)
    for jj in range(0, 100):
        grids = CreateGrid(300, 0)
        cor += cross_correlation(grids1, grids)
    local_max_index = argrelextrema(cor.reshape(-1), np.greater)[0]

    plt.figure()
    plt.plot(x, cor[0, :])
    plt.plot(x[local_max_index], cor[0, local_max_index], "o", label="min")
    plt.xlabel('lamda (cm)')
    plt.ylabel('Normalized Cross-Correlation')

    diff = [(x[local_max_index][i]) / x[local_max_index][i - 1]
            for i in range(1, len(local_max_index))]
    ave = [np.average(diff) for i in range(20, max_lamda)]
    print(np.average(diff))
    plt.figure()
    plt.scatter(x[local_max_index][1:], diff)
    plt.plot(range(20, max_lamda), ave, color='r', linewidth=1.5)
    plt.xlabel('Peak Location (cm)')
コード例 #48
0
    def generateGraphData(self):
        if debug:
            print(self.data)
        time = np.arange(len(self.data[:, 0])) * 1.0 / self.rate
        nfft = 1024 * 6
        pxx, freq, bins, plot = plt.specgram(self.data[:, 0],
                                             NFFT=nfft,
                                             noverlap=32,
                                             Fs=2,
                                             interpolation='bilinear')

        # Another plot
        #plt.show()
        plt.savefig('specgramHeavy.png')
        a = np.mean(pxx, axis=0)
        aa = np.arange(len(a))
        a = a / np.max(a) * np.max(self.data[:, 0])
        aa = aa / np.max(aa) * time[-1]

        f = interp1d(aa, a)
        newSmooth = f(time)

        indMax = argrelextrema(newSmooth, np.greater)[0]
        indMin = argrelextrema(newSmooth, np.less)[0]

        lastValue = np.where(newSmooth == newSmooth[-1])[0]
        indMin = np.hstack((indMin, lastValue))

        # A plot
        # plt.plot(time,self.data[:,0])
        # plt.plot(aa,a)
        # plt.plot(time[indMax],newSmooth[indMax])
        # plt.plot(time[indMin],newSmooth[indMin])
        #plt.show()
        #plt.savefig('specgram2.png')

        notes = np.array([indMax, indMin]).T
        letterNotes, freqs, individualNotes = self.getFreq(notes, self.data)

        if debug:
            print("Letter notes: ")
            print(letterNotes)

        result = []
        # Format the notes
        for i, v in enumerate(letterNotes):
            #print v
            letters = letterNotes[i].encode('ASCII').lower()
            try:
                fixed = fixNotes(letters)
                result.append(fixed)
            except UnboundLocalError:
                m = re.search('\w.\d', letters)
                shortenedNote = m.group(0)
                newNote = shortenedNote[0] + 's' + shortenedNote[-1]
                fixed = fixNotes(newNote)
                #print fixed
                result.append(fixed)
                pass

        print "Result:"
        return result
コード例 #49
0
                                                         intervalLength=0.01*fs),
                                         minout=0, method='extremos')))

time = np.linspace(0, len(song)/fs, len(song))
umbral = 0.05
supra = np.where(envelope44k > umbral)[0]
silabas = consecutive(supra, min_length=100)

# %%
times = []
freqs = []
times_sil = []
freqs_sil = np.zeros_like(song)
freq_zc = np.zeros_like(song)
freq_max = 6000
max_sig = argrelextrema(song, np.greater, order=int(fs/freq_max))
max_pos = [x for x in max_sig[0] if song[x] > np.mean(song)]
for x in range(len(max_pos)-1):
    n_time = int((max_pos[x+1]+max_pos[x])/2)
    freq_zc[n_time] = fs/(max_pos[x+1]-max_pos[x])


def on_pick(event):
    global xmouse, ymouse
    global time
    global Sxx
    global df
    global tu
    global tu_closest
    global time_closest
    global f_event
for line in text:
    line_new = ''
    for a, b in enumerate(line):
        if b == ',': line_new = line_new + "."
        else: line_new = line_new + b
    final.append(line_new)
    i = i + 1

c = len(text)
tab = np.loadtxt(final, skiprows=17, delimiter="\t", max_rows=c - 17 - 1)

tablambda = tab[:, 0]
spectre = tab[:, 1]

from scipy.signal import argrelextrema
index = argrelextrema(spectre, np.less)
index_interf = np.empty([0], dtype=float)
mask1 = tablambda < lambdamax
mask2 = tablambda > lambdamin
mask = mask1 & mask2
ll = tablambda[mask]
ss = spectre[mask]
tt = np.empty(len(ss), dtype=float)

plt.plot(ll, ss)

N = len(ll)

ss_min = np.min(ss)
# critere pour la limite :
inter_lim = 2 * ss_min
コード例 #51
0
ファイル: calc_spline_imf.py プロジェクト: renewday/emd
            


if __name__ == "__main__":


    start = int(sys.argv[1])
    end = int(sys.argv[2])
    imf_number = int(sys.argv[3])

    data = load_data("../back_test/test_data/zgtb")
    data = [data[i]-ma_func(data,3)[i] for i in range(len(data))]
    print data[end]
    imf_base = emd_decom(data,6)
    imf_component_base = np.array(imf_base[imf_number][:end+1])
    imf_max_index_base = list(argrelextrema(imf_component_base,np.greater)[0])
    imf_min_index_base = list(argrelextrema(imf_component_base,np.less)[0])
    print "standard"
    print imf_max_index_base
    print imf_min_index_base

    data1 = data[start:end+1]
    imf = emd_decom(data1,6)
    imf_component = np.array(imf[imf_number])
    imf_component = np.array(imf[imf_number])
    imf_max_index = list(argrelextrema(imf_component,np.greater)[0])
    imf_min_index = list(argrelextrema(imf_component,np.less)[0])
    imf_max_index = [i+start for i in imf_max_index]
    imf_min_index = [i+start for i in imf_min_index]
    print "partial"
    print imf_max_index
コード例 #52
0
def find_orientation(i,
                     fitsim,
                     ra,
                     dec,
                     nn_ra,
                     nn_dec,
                     nn_dist,
                     maj,
                     s=(3 / 60.),
                     plot=False,
                     head=None,
                     hdulist=None):
    '''	
	Finds the orientation of NN

	To run this function for all sources, use setup_find_orientation_NN() instead.

	A big part of this function is a re-run of the postage function,
	since we need to make a new postage basically, but this time as an array.

	Arguments: 
	i : the new_Index of the source

	fitsim: the postage created earlier 

	ra, dec: the ra and dec of the source

	Maj: the major axis of the source // TO-BE

	s: the width of the image, default 3 arcmin, because it has to be a tiny bit
	lower than the postage created earlier (width 4 arcmin) or NaNs will appear in the image

	head and hdulist, the header and hdulist if a postage hasn't been created before.
	(This is so it doesn't open every time in the loop but is opened before the loop.)

	Output: 
	i, max_angle, len(err_indices)
	Which are: the new_Index of the source, the best angle of the orientation, and
	the amount of orientations that have avg flux value > 80% of the peak avg flux value along the line
	
	If plot=True, produces the plots of the best orientation and the Flux vs Orientation as well


	'''
    ################## BEGIN Postage Function #############
    if not head:
        head = pf.getheader(fitsim)
        hdulist = pf.open(fitsim)

    # for NN take the projected middle as the RA and DEC
    ra, dec = (ra + nn_ra) / 2., (dec + nn_dec) / 2.

    # Parse the WCS keywords in the primary HDU
    wcs = pw.WCS(hdulist[0].header)
    # Some pixel coordinates of interest.
    skycrd = np.array([ra, dec])
    skycrd = np.array([[ra, dec, 0, 0]], np.float_)
    # Convert pixel coordinates to world coordinates
    # The second argument is "origin" -- in this case we're declaring we
    # have 1-based (Fortran-like) coordinates.
    pixel = wcs.wcs_sky2pix(skycrd, 1)
    # Some pixel coordinates of interest.
    x = pixel[0][0]
    y = pixel[0][1]
    pixsize = abs(wcs.wcs.cdelt[0])
    if pl.isnan(s):
        s = 25.
    N = (s / pixsize)
    # print 'x=%.5f, y=%.5f, N=%i' %(x,y,N)
    ximgsize = head.get('NAXIS1')
    yimgsize = head.get('NAXIS2')
    if x == 0:
        x = ximgsize / 2
    if y == 0:
        y = yimgsize / 2
    offcentre = False
    # subimage limits: check if runs over edges
    xlim1 = x - (N / 2)
    if (xlim1 < 1):
        xlim1 = 1
        offcentre = True
    xlim2 = x + (N / 2)
    if (xlim2 > ximgsize):
        xlim2 = ximgsize
        offcentre = True
    ylim1 = y - (N / 2)
    if (ylim1 < 1):
        ylim1 = 1
        offcentre = True
    ylim2 = y + (N / 2)
    if (ylim2 > yimgsize):
        offcentre = True
        ylim2 = yimgsize

    xl = int(xlim1)
    yl = int(ylim1)
    xu = int(xlim2)
    yu = int(ylim2)
    ################## END Postage Function #############

    # extract the data array instead of making a postage stamp
    from astropy.nddata.utils import extract_array
    data_array = extract_array(hdulist[0].data, (yu - yl, xu - xl), (y, x))

    # use a radius for the line that is the NN distance,
    # but with 4 pixels more added to the radius
    # to make sure we do capture the whole source
    radius = nn_dist / 2. * 40  #arcmin --> image units
    radius = int(radius) + 4  # is chosen arbitrarily

    # in the P173+55 1 arcmin = 40 image units, should check if this is true everywhere, for FieldNames it is.

    # check if there are no NaNs in the cutout image, if there are then make smaller cutout
    if True in (np.isnan(data_array)):
        if s < (2 / 60.):
            print "No hope left for this source "
            return i, 0.0, 100.5, 100.5, 'no_hope', 100.5, 100.5

        elif s == (2 / 60.):
            print "Nan in the cutout image AGAIN ", head['OBJECT'], ' i = ', i
            try:
                return find_orientation(i,
                                        fitsim,
                                        ra,
                                        dec,
                                        nn_ra,
                                        nn_dec,
                                        nn_dist,
                                        maj,
                                        s=(2 * radius + 2) / 40. / 60.,
                                        plot=plot,
                                        head=head,
                                        hdulist=hdulist)
            except RuntimeError:
                print "No hope left for this source, "
                return i, 0.0, 100.5, 100.5, 'no_hope', 100.5, 100.5

        else:
            print "NaN in the cutout image: ", head['OBJECT'], ' i = ', i
            try:
                return find_orientation(i,
                                        fitsim,
                                        ra,
                                        dec,
                                        nn_ra,
                                        nn_dec,
                                        nn_dist,
                                        maj,
                                        s=(2 * radius + 4) / 40. / 60.,
                                        plot=plot,
                                        head=head,
                                        hdulist=hdulist)
            except RuntimeError:
                print "No hope left for this source, "
                return i, 0.0, 100.5, 100.5, 'no_hope', 100.5, 100.5

    from scipy.ndimage import map_coordinates
    #the center of the image is at the halfway point -1 for using array-index
    xcenter = np.shape(data_array)[0] / 2 - 1  # pixel coordinates
    ycenter = np.shape(data_array)[1] / 2 - 1  # pixel coordinates

    # define the start and end points of the line with 'num' points and radius = radius
    x0, y0 = xcenter - radius, ycenter
    x1, y1 = xcenter + radius, ycenter
    num = 1000

    # the final orientation will be max_angle
    max_angle = 0
    max_value = 0
    # flux values for 0 to 179 degrees of rotation (which is also convenietly their index)
    all_values = []
    for angle in range(0, 180):
        # rotate line on the left side of the center
        x0_rot, y0_rot = rotate_point((xcenter, ycenter), (x0, y0), angle)
        # rotate line on the right side of the center
        x1_rot, y1_rot = rotate_point((xcenter, ycenter), (x1, y1), angle)
        # the rotated line
        x_rot, y_rot = np.linspace(x0_rot, x1_rot,
                                   num), np.linspace(y0_rot, y1_rot, num)
        # extract the values along the line,
        zi = map_coordinates(data_array,
                             np.vstack((y_rot, x_rot)),
                             prefilter=True)
        # calc the mean flux
        meanie = np.sum(zi)
        if meanie > max_value:
            max_value = meanie
            max_angle = angle
        all_values.append(meanie)

    # calculate all orientiations for which the average flux lies within
    # 80 per cent of the peak average flux
    err_orientations = np.where(all_values > (0.8 * max_value))[0]

    # find the cutoff value, dependent on the distance, should think about whether i want to use Maj
    # Ill defined cutoff value but is later redefined in merge_value_added_catalog.py
    # So selection should NOT be done on basis of the 'classification' column
    cutoff = 2 * np.arctan(
        (maj / 60.) / nn_dist) * 180 / np.pi  # convert rad to deg
    if len(err_orientations) > cutoff:
        classification = 'Large err'
    else:
        classification = 'Small err'

    # then find the amount of maxima and the lobe_ratio
    from scipy.signal import argrelextrema
    # rotate line on the left side of the center
    x0_rot, y0_rot = rotate_point((xcenter, ycenter), (x0, y0), max_angle)
    # rotate line on the right side of the center
    x1_rot, y1_rot = rotate_point((xcenter, ycenter), (x1, y1), max_angle)
    x_rot, y_rot = np.linspace(x0_rot, x1_rot,
                               num), np.linspace(y0_rot, y1_rot, num)

    zi = map_coordinates(data_array, np.vstack((y_rot, x_rot)), prefilter=True)
    # find the local maxima in the flux along the line
    indx_extrema = argrelextrema(zi, np.greater)

    if zi[0] > zi[1]:
        # then there is a maximum (outside of the line range) , namely the first point
        new_indx_extrema = (np.append(indx_extrema, 0), )
        del indx_extrema
        indx_extrema = new_indx_extrema
        del new_indx_extrema

    if zi[len(zi) - 1] > zi[len(zi) - 2]:
        # then there is a maximum (outside of the line range), namely the last point
        new_indx_extrema = (np.append(indx_extrema, len(zi) - 1), )
        del indx_extrema
        indx_extrema = new_indx_extrema
        del new_indx_extrema

    amount_of_maxima = len(indx_extrema[0])
    # calculate the flux ratio of the lobes
    lobe_ratio_bool = False
    lobe_ratio = 0.0  # in case there is only 1 maximum
    position_angle = 0.0  # in case there is only 1 maximum
    if amount_of_maxima > 1:
        if amount_of_maxima == 2:
            lobe_ratio = (zi[indx_extrema][0] / zi[indx_extrema][1])
            # calculate the position angle
            lobe1 = np.array(
                [[x_rot[indx_extrema][0], y_rot[indx_extrema][0], 0, 0]])
            lobe2 = np.array(
                [[x_rot[indx_extrema][1], y_rot[indx_extrema][1], 0, 0]])
            lobe1 = wcs.wcs_pix2sky(lobe1, 1)
            lobe2 = wcs.wcs_pix2sky(lobe2, 1)
            position_angle = PositionAngle(lobe1[0][0], lobe1[0][1],
                                           lobe2[0][0], lobe2[0][1])
            if position_angle < 0:
                position_angle += 180.

        else:
            # more maxima, so the lobe_ratio is defined as the ratio between the brightest lobes
            indx_maximum_extrema = np.flip(
                np.argsort(zi[indx_extrema]),
                0)[:2]  #argsort sorts ascending so flip makes it descending
            indx_maximum_extrema = indx_extrema[0][indx_maximum_extrema]
            lobe_ratio = (zi[indx_maximum_extrema[0]] /
                          zi[indx_maximum_extrema][1])

            #find the RA and DEC of the two brightest lobes
            lobe1 = np.array([[
                x_rot[indx_maximum_extrema][0], y_rot[indx_maximum_extrema][0],
                0, 0
            ]])
            lobe2 = np.array([[
                x_rot[indx_maximum_extrema][1], y_rot[indx_maximum_extrema][1],
                0, 0
            ]])
            lobe1 = wcs.wcs_pix2sky(lobe1, 1)
            lobe2 = wcs.wcs_pix2sky(lobe2, 1)
            # then calculate the position angle
            position_angle = PositionAngle(lobe1[0][0], lobe1[0][1],
                                           lobe2[0][0], lobe2[0][1])
            if position_angle < 0:
                position_angle += 180.

    if plot:
        # the plot of the rotated source and the flux along the line
        fig, axes = plt.subplots(nrows=2, figsize=(12, 12))
        axes[0].imshow(data_array, origin='lower')
        axes[0].plot([x0_rot, x1_rot], [y0_rot, y1_rot], 'r-', alpha=0.3)
        axes[1].plot(zi)
        Fratio = 10.
        if amount_of_maxima > 1:
            if ((1. / Fratio) < lobe_ratio < (Fratio)):
                lobe_ratio_bool = True

        plt.suptitle('Field: ' + head['OBJECT'] + ' | Source ' + str(i) +
                     '\n Best orientation = ' + str(max_angle) +
                     ' degrees | err_orientation: ' +
                     str(len(err_orientations)) + '\nlobe ratio ' +
                     str(lobe_ratio_bool) + ' | extrema: ' +
                     str(indx_extrema) + ' | nn_dist: ' + str(nn_dist) +
                     '\n lobe ratio: ' + str(lobe_ratio) +
                     ' | Position Angle: ' + str(position_angle))

        # saving the figures to seperate directories
        if amount_of_maxima == 2:
            plt.savefig('/data1/osinga/figures/cutouts/NN/try_4/2_maxima/' +
                        head['OBJECT'] + 'src_' + str(i) + '.png')

        elif amount_of_maxima > 2:
            plt.savefig('/data1/osinga/figures/cutouts/NN/try_4/more_maxima/' +
                        head['OBJECT'] + 'src_' + str(i) + '.png')

        else:
            plt.savefig('/data1/osinga/figures/cutouts/NN/try_4/1_maximum/' +
                        head['OBJECT'] + 'src_' + str(i) + '.png')
        plt.clf()
        plt.close()

        # the plot of the flux vs orientation
        plt.plot(all_values, label='all orientations')
        plt.scatter(err_orientations,
                    np.array(all_values)[err_orientations],
                    color='y',
                    label='0.8 fraction')
        plt.axvline(x=max_angle,
                    ymin=0,
                    ymax=1,
                    color='r',
                    label='best orientation')
        plt.title('Best orientation for Source ' + str(i) +
                  '\nClassification: ' + classification + ' | Cutoff: ' +
                  str(cutoff) + ' | Error: ' + str(len(err_orientations)))
        plt.ylabel('Average flux (arbitrary units)')
        plt.xlabel('orientation (degrees)')
        plt.legend()
        plt.xlim(0, 180)
        # saving the figures to seperate directories
        if amount_of_maxima == 2:
            plt.savefig('/data1/osinga/figures/cutouts/NN/try_4/2_maxima/' +
                        head['OBJECT'] + 'src_' + str(i) + '_orientation.png')
        elif amount_of_maxima > 2:
            plt.savefig('/data1/osinga/figures/cutouts/NN/try_4/more_maxima/' +
                        head['OBJECT'] + 'src_' + str(i) + '_orientation.png')
        else:
            plt.savefig('/data1/osinga/figures/cutouts/NN/try_4/1_maximum/' +
                        head['OBJECT'] + 'src_' + str(i) + '_orientation.png')

        plt.clf()
        plt.close()
    return i, max_angle, len(
        err_orientations
    ), amount_of_maxima, classification, lobe_ratio, position_angle  #, err_position_angle
コード例 #53
0
ファイル: utils.py プロジェクト: MoAdel1/ForexBasicModeling
def data_construct(DataFrame, lookUp, predictionWindow, pairName):
    '''function to construct the features from the inspection window and to create the supervised x,y pairs for training.

    Parameters
    ----------
    DataFrame : dataFrame
    LookUp : int
    predictionWindow : int
    pairName : str

    Returns
    -------
    output : dict
        a dict containing inputs matrix, targets matrix, raw inputs and mapping dict for features

    '''
    # fetch data for indicators calculations
    openPrice = DataFrame.o.values.astype("double")
    closePrice = DataFrame.c.values.astype("double")
    highPrice = DataFrame.h.values.astype("double")
    lowPrice = DataFrame.l.values.astype("double")
    volume = DataFrame.volume.values.astype("double")

    # calculate technical indicators values
    simple_ma_slow = ta.SMA(closePrice, 30)  # slow moving average
    simple_ma_fast = ta.SMA(closePrice, 15)  # fast moving average
    exp_ma_slow = ta.EMA(closePrice, 20)  # slow exp moving average
    exp_ma_fast = ta.EMA(closePrice, 10)  # fast exp moving average
    bbands = ta.BBANDS(closePrice, timeperiod=15)  # calculate bollinger bands
    deltaBands = (bbands[0] - bbands[2]
                  ) / bbands[2]  # deltas between bands vector (bollinger)
    macd_s1, macd_s2, macd_hist = ta.MACD(
        closePrice)  # MACD values calculation
    sar = ta.SAR(highPrice, lowPrice)  # prabolic SAR
    stochK, stochD = ta.STOCH(highPrice, lowPrice,
                              closePrice)  # stochastic calculations
    rsi = ta.RSI(closePrice, timeperiod=15)  # RSI indicator
    adx = ta.ADX(highPrice, lowPrice, closePrice,
                 timeperiod=15)  # ADX indicator
    mfi = ta.MFI(highPrice, lowPrice, closePrice, volume,
                 timeperiod=15)  # money flow index

    # calculate statistical indicators values
    beta = ta.BETA(highPrice, lowPrice, timeperiod=5)  # beta from CAPM model
    slope = ta.LINEARREG_ANGLE(
        closePrice,
        timeperiod=5)  # slope for fitting linera reg. to the last x points

    # calculate candle indicators values
    spinTop = ta.CDLSPINNINGTOP(openPrice, highPrice, lowPrice, closePrice)
    doji = ta.CDLDOJI(openPrice, highPrice, lowPrice, closePrice)
    dojiStar = ta.CDLDOJISTAR(openPrice, highPrice, lowPrice, closePrice)
    marubozu = ta.CDLMARUBOZU(openPrice, highPrice, lowPrice, closePrice)
    hammer = ta.CDLHAMMER(openPrice, highPrice, lowPrice, closePrice)
    invHammer = ta.CDLINVERTEDHAMMER(openPrice, highPrice, lowPrice,
                                     closePrice)
    hangingMan = ta.CDLHANGINGMAN(openPrice, highPrice, lowPrice, closePrice)
    shootingStar = ta.CDLSHOOTINGSTAR(openPrice, highPrice, lowPrice,
                                      closePrice)
    engulfing = ta.CDLENGULFING(openPrice, highPrice, lowPrice, closePrice)
    morningStar = ta.CDLMORNINGSTAR(openPrice, highPrice, lowPrice, closePrice)
    eveningStar = ta.CDLEVENINGSTAR(openPrice, highPrice, lowPrice, closePrice)
    whiteSoldier = ta.CDL3WHITESOLDIERS(openPrice, highPrice, lowPrice,
                                        closePrice)
    blackCrow = ta.CDL3BLACKCROWS(openPrice, highPrice, lowPrice, closePrice)
    insideThree = ta.CDL3INSIDE(openPrice, highPrice, lowPrice, closePrice)

    # prepare the final matrix
    '''
    matrix configurations ::> [o,c,h,l,ma_slow,ma_fast,exp_slow,exp_fast,
                           deltaBands,macd_s1,macd_s2,sar,stochK,
                           stochD,rsi,adx,mfi,beta,slope,spinTop,doji,dojiStar,
                           marubozu,hammer,invHammer,hangingMan,shootingStar,engulfing,
                           morningStar,eveningStar,whiteSoldier,blackCrow,insideThree]
    a 33 features matrix in total
    '''
    DataMatrix = np.column_stack(
        (openPrice, closePrice, highPrice, lowPrice, simple_ma_slow,
         simple_ma_fast, exp_ma_slow, exp_ma_fast, deltaBands, macd_s1,
         macd_s2, sar, stochK, stochD, rsi, adx, mfi, beta, slope, spinTop,
         doji, dojiStar, marubozu, hammer, invHammer, hangingMan, shootingStar,
         engulfing, morningStar, eveningStar, whiteSoldier, blackCrow,
         insideThree))

    # remove undifined values
    DataMatrix = DataMatrix[~np.isnan(DataMatrix).any(
        axis=1)]  # remove all raws containing nan values

    # define number of windows to analyze
    framesCount = DataMatrix.shape[0] - (
        lookUp +
        predictionWindow) + 1  # 1D convolution outputsize = ceil[((n-f)/s)+1]

    # define input/output arrays container
    rawInputs = {}
    inputsOpen = np.zeros((framesCount, lookUp))
    inputsClose = np.zeros((framesCount, lookUp))
    inputsHigh = np.zeros((framesCount, lookUp))
    inputsLow = np.zeros((framesCount, lookUp))
    inputs = np.zeros((framesCount, 62))
    outputs = np.zeros((framesCount, 1))

    # main loop and data
    for i in range(framesCount):
        mainFrame = DataMatrix[i:i + lookUp + predictionWindow, :]
        window = np.array_split(mainFrame, [lookUp])[0]
        windowForecast = np.array_split(mainFrame, [lookUp])[1]
        '''
        window configurations ::>
        [0:o,1:c,2:h,3:l,4:ma_slow,5:ma_fast,6:exp_slow,7:exp_fast,
         8:deltaBands,9:macd_slow,10:macd_fast,11:sar,12:stochK,
         13:stochD,14:rsi,15:adx,16:mfi,17:beta,18:slope,19:spinTop,20:doji,21:dojiStar,
         22:marubozu,23:hammer,24:invHammer,25:hangingMan,26:shootingStar,27:engulfing,
         28:morningStar,29:eveningStar,30:whiteSoldier,31:blackCrow,32:insideThree]
        '''

        #sma features detection
        ma_slow = window[:, 4]
        ma_fast = window[:, 5]
        uptrend_cross = ma_fast > ma_slow
        uptrend_cross = np.concatenate(
            (np.array([False]),
             (uptrend_cross[:-1] <
              uptrend_cross[1:])))  # check the false->true transition
        try:
            uptrend_cross_location = np.where(uptrend_cross == True)[0][
                -1]  # latest uptrend cross_over location
        except:
            uptrend_cross_location = -1
        downtrend_cross = ma_slow > ma_fast
        downtrend_cross = np.concatenate(
            (np.array([False]),
             (downtrend_cross[:-1] <
              downtrend_cross[1:])))  # check the false->true transition
        try:
            downtrend_cross_location = np.where(downtrend_cross == True)[0][
                -1]  # latest downtrend cross_over location
        except:
            downtrend_cross_location = -1
        if (uptrend_cross_location >
                downtrend_cross_location):  # latest cross is an uptrend
            sma_latest_crossover = 1  # uptrend sign
            sma_location_of_latest_crossover = uptrend_cross_location
            alpha_1 = (math.atan(ma_slow[uptrend_cross_location] -
                                 ma_slow[uptrend_cross_location - 1])) * (
                                     180 / math.pi)
            alpha_2 = (math.atan(ma_fast[uptrend_cross_location] -
                                 ma_fast[uptrend_cross_location - 1])) * (
                                     180 / math.pi)
            sma_latest_crossover_angle = alpha_1 + alpha_2
        elif (downtrend_cross_location >
              uptrend_cross_location):  # latest cross is a downtrend
            sma_latest_crossover = -1  # downtrend sign
            sma_location_of_latest_crossover = downtrend_cross_location
            alpha_1 = (math.atan(ma_slow[downtrend_cross_location] -
                                 ma_slow[downtrend_cross_location - 1])) * (
                                     180 / math.pi)
            alpha_2 = (math.atan(ma_fast[downtrend_cross_location] -
                                 ma_fast[downtrend_cross_location - 1])) * (
                                     180 / math.pi)
            sma_latest_crossover_angle = alpha_1 + alpha_2
        else:  # no cross in the given window
            sma_latest_crossover = 0  # no sign
            sma_location_of_latest_crossover = -1
            sma_latest_crossover_angle = 0
        up_count = np.sum(ma_fast > ma_slow)
        down_count = np.sum(ma_slow > ma_fast)
        if (up_count > down_count):
            sma_dominant_type_fast_slow = 1
        elif (down_count > up_count):
            sma_dominant_type_fast_slow = -1
        else:
            sma_dominant_type_fast_slow = 0

        #ema features detection
        exp_slow = window[:, 6]
        exp_fast = window[:, 7]
        uptrend_cross = exp_fast > exp_slow
        uptrend_cross = np.concatenate(
            (np.array([False]),
             (uptrend_cross[:-1] <
              uptrend_cross[1:])))  # check the false->true transition
        try:
            uptrend_cross_location = np.where(uptrend_cross == True)[0][
                -1]  # latest uptrend cross_over location
        except:
            uptrend_cross_location = -1
        downtrend_cross = exp_slow > exp_fast
        downtrend_cross = np.concatenate(
            (np.array([False]),
             (downtrend_cross[:-1] <
              downtrend_cross[1:])))  # check the false->true transition
        try:
            downtrend_cross_location = np.where(downtrend_cross == True)[0][
                -1]  # latest downtrend cross_over location
        except:
            downtrend_cross_location = -1
        if (uptrend_cross_location >
                downtrend_cross_location):  # latest cross is an uptrend
            ema_latest_crossover = 1  # uptrend sign
            ema_location_of_latest_crossover = uptrend_cross_location
            alpha_1 = (math.atan(exp_slow[uptrend_cross_location] -
                                 exp_slow[uptrend_cross_location - 1])) * (
                                     180 / math.pi)
            alpha_2 = (math.atan(exp_fast[uptrend_cross_location] -
                                 exp_fast[uptrend_cross_location - 1])) * (
                                     180 / math.pi)
            ema_latest_crossover_angle = alpha_1 + alpha_2
        elif (downtrend_cross_location >
              uptrend_cross_location):  # latest cross is a downtrend
            ema_latest_crossover = -1  # downtrend sign
            ema_location_of_latest_crossover = downtrend_cross_location
            alpha_1 = (math.atan(exp_slow[downtrend_cross_location] -
                                 exp_slow[downtrend_cross_location - 1])) * (
                                     180 / math.pi)
            alpha_2 = (math.atan(exp_fast[downtrend_cross_location] -
                                 exp_fast[downtrend_cross_location - 1])) * (
                                     180 / math.pi)
            ema_latest_crossover_angle = alpha_1 + alpha_2
        else:  # no cross in the given window
            ema_latest_crossover = 0  # no sign
            ema_location_of_latest_crossover = -1
            ema_latest_crossover_angle = 0
        up_count = np.sum(exp_fast > exp_slow)
        down_count = np.sum(exp_slow > exp_fast)
        if (up_count > down_count):
            ema_dominant_type_fast_slow = 1
        elif (down_count > up_count):
            ema_dominant_type_fast_slow = -1
        else:
            ema_dominant_type_fast_slow = 0

        # B.Bands features detection
        deltaBands = window[:, 8]
        deltaBands_mean = np.mean(deltaBands)
        deltaBands_std = np.std(deltaBands)
        deltaBands_maximum_mean = np.amax(deltaBands) / deltaBands_mean
        deltaBands_maximum_location = np.where(
            deltaBands == np.amax(deltaBands))[0][-1]  # location of maximum
        deltaBands_minimum_mean = np.amin(deltaBands) / deltaBands_mean
        deltaBands_minimum_location = np.where(
            deltaBands == np.amin(deltaBands))[0][-1]  # location of maximum

        # macd features detection
        macd_slow = window[:, 9]
        macd_fast = window[:, 10]
        uptrend_cross = macd_fast > macd_slow
        uptrend_cross = np.concatenate(
            (np.array([False]),
             (uptrend_cross[:-1] <
              uptrend_cross[1:])))  # check the false->true transition
        try:
            uptrend_cross_location = np.where(uptrend_cross == True)[0][
                -1]  # latest uptrend cross_over location
        except:
            uptrend_cross_location = -1
        downtrend_cross = macd_slow > macd_fast
        downtrend_cross = np.concatenate(
            (np.array([False]),
             (downtrend_cross[:-1] <
              downtrend_cross[1:])))  # check the false->true transition
        try:
            downtrend_cross_location = np.where(downtrend_cross == True)[0][
                -1]  # latest downtrend cross_over location
        except:
            downtrend_cross_location = -1
        if (uptrend_cross_location >
                downtrend_cross_location):  # latest cross is an uptrend
            macd_latest_crossover = 1  # uptrend sign
            macd_location_of_latest_crossover = uptrend_cross_location
            alpha_1 = (math.atan(macd_slow[uptrend_cross_location] -
                                 macd_slow[uptrend_cross_location - 1])) * (
                                     180 / math.pi)
            alpha_2 = (math.atan(macd_fast[uptrend_cross_location] -
                                 macd_fast[uptrend_cross_location - 1])) * (
                                     180 / math.pi)
            macd_latest_crossover_angle = alpha_1 + alpha_2
        elif (downtrend_cross_location >
              uptrend_cross_location):  # latest cross is a downtrend
            macd_latest_crossover = -1  # downtrend sign
            macd_location_of_latest_crossover = downtrend_cross_location
            alpha_1 = (math.atan(macd_slow[downtrend_cross_location] -
                                 macd_slow[downtrend_cross_location - 1])) * (
                                     180 / math.pi)
            alpha_2 = (math.atan(macd_fast[downtrend_cross_location] -
                                 macd_fast[downtrend_cross_location - 1])) * (
                                     180 / math.pi)
            macd_latest_crossover_angle = alpha_1 + alpha_2
        else:  # no cross in the given window
            macd_latest_crossover = 0  # no sign
            macd_location_of_latest_crossover = -1
            macd_latest_crossover_angle = 0
        up_count = np.sum(macd_fast > macd_slow)
        down_count = np.sum(macd_slow > macd_fast)
        if (up_count > down_count):
            macd_dominant_type_fast_slow = 1
        elif (down_count > up_count):
            macd_dominant_type_fast_slow = -1
        else:
            macd_dominant_type_fast_slow = 0

        # sar features detection
        average_price = (window[:, 0] + window[:, 1] + window[:, 2] +
                         window[:, 3]) / 4
        sar = window[:, 11]
        uptrend = sar < average_price
        uptrend = np.concatenate(
            (np.array([False]),
             (uptrend[:-1] < uptrend[1:])))  # check the false->true transition
        try:
            uptrend_location = np.where(
                uptrend == True)[0][-1]  # latest uptrend location
        except:
            uptrend_location = -1
        downtrend = sar > average_price
        downtrend = np.concatenate(
            (np.array([False]),
             (downtrend[:-1] <
              downtrend[1:])))  # check the false->true transition
        try:
            downtrend_location = np.where(
                downtrend == True)[0][-1]  # latest downtrend location
        except:
            downtrend_location = -1
        if (uptrend_location >
                downtrend_location):  # latest signal is an uptrend
            sar_latest_shiftPoint = 1
            sar_latest_shiftPoint_location = uptrend_location
        elif (downtrend_location >
              uptrend_location):  # latest signal is a downtrend
            sar_latest_shiftPoint = -1
            sar_latest_shiftPoint_location = downtrend_location
        else:  # same direction along the frame under question
            sar_latest_shiftPoint = 0  # no sign
            sar_latest_shiftPoint_location = -1
        sar_total_number_shifts = np.where(
            downtrend == True)[0].shape[0] + np.where(
                uptrend == True)[0].shape[0]

        # stochastic(K) features detection
        stochK = window[:, 12]
        stochK_mean = np.mean(stochK)
        stochK_std = np.std(stochK)
        uptrend = stochK <= 20
        uptrend = np.concatenate(
            (np.array([False]),
             (uptrend[:-1] < uptrend[1:])))  # check the false->true transition
        try:
            uptrend_location = np.where(
                uptrend == True)[0][-1]  # latest uptrend location
        except:
            uptrend_location = -1
        downtrend = stochK >= 80
        downtrend = np.concatenate(
            (np.array([False]),
             (downtrend[:-1] <
              downtrend[1:])))  # check the false->true transition
        try:
            downtrend_location = np.where(
                downtrend == True)[0][-1]  # latest downtrend location
        except:
            downtrend_location = -1
        if (uptrend_location >
                downtrend_location):  # latest signal is an uptrend
            stochK_latest_event = 1
            stochK_event_location = uptrend_location
        elif (downtrend_location >
              uptrend_location):  # latest signal is a downtrend
            stochK_latest_event = -1
            stochK_event_location = downtrend_location
        else:  # same direction along the frame under question
            stochK_latest_event = 0  # no sign
            stochK_event_location = -1

        # stochastic(D) features detection
        stochD = window[:, 13]
        stochD_mean = np.mean(stochD)
        stochD_std = np.std(stochD)
        uptrend = stochD <= 20
        uptrend = np.concatenate(
            (np.array([False]),
             (uptrend[:-1] < uptrend[1:])))  # check the false->true transition
        try:
            uptrend_location = np.where(
                uptrend == True)[0][-1]  # latest uptrend location
        except:
            uptrend_location = -1
        downtrend = stochD >= 80
        downtrend = np.concatenate(
            (np.array([False]),
             (downtrend[:-1] <
              downtrend[1:])))  # check the false->true transition
        try:
            downtrend_location = np.where(
                downtrend == True)[0][-1]  # latest downtrend location
        except:
            downtrend_location = -1
        if (uptrend_location >
                downtrend_location):  # latest signal is an uptrend
            stochD_latest_event = 1
            stochD_event_location = uptrend_location
        elif (downtrend_location >
              uptrend_location):  # latest signal is a downtrend
            stochD_latest_event = -1
            stochD_event_location = downtrend_location
        else:  # same direction along the frame under question
            stochD_latest_event = 0  # no sign
            stochD_event_location = -1

        # rsi features detection
        rsi = window[:, 14]
        rsi_mean = np.mean(rsi)
        rsi_std = np.std(rsi)
        uptrend = rsi <= 30
        uptrend = np.concatenate(
            (np.array([False]),
             (uptrend[:-1] < uptrend[1:])))  # check the false->true transition
        try:
            uptrend_location = np.where(
                uptrend == True)[0][-1]  # latest uptrend location
        except:
            uptrend_location = -1
        downtrend = rsi >= 70
        downtrend = np.concatenate(
            (np.array([False]),
             (downtrend[:-1] <
              downtrend[1:])))  # check the false->true transition
        try:
            downtrend_location = np.where(
                downtrend == True)[0][-1]  # latest downtrend location
        except:
            downtrend_location = -1
        if (uptrend_location >
                downtrend_location):  # latest signal is an uptrend
            rsi_latest_event = 1
            rsi_event_location = uptrend_location
        elif (downtrend_location >
              uptrend_location):  # latest signal is a downtrend
            rsi_latest_event = -1
            rsi_event_location = downtrend_location
        else:  # same direction along the frame under question
            rsi_latest_event = 0  # no sign
            rsi_event_location = -1

        # adx features detection
        adx = window[:, 15]
        adx_mean = np.mean(adx)
        adx_std = np.std(adx)
        splitted_array = np.array_split(adx, 2)
        m0 = np.mean(splitted_array[0])
        m1 = np.mean(splitted_array[1])
        adx_mean_delta_bet_first_second_half = (m1 - m0) / m0

        # mfi features detection
        mfi = window[:, 16]
        mfi_mean = np.mean(mfi)
        mfi_std = np.std(mfi)
        splitted_array = np.array_split(mfi, 2)
        m0 = np.mean(splitted_array[0])
        m1 = np.mean(splitted_array[1])
        mfi_mean_delta_bet_first_second_half = (m1 - m0) / m0

        # resistance levels features detection
        closePrice = window[:, 1]
        resLevels = argrelextrema(closePrice, np.greater, order=4)[0]
        if (resLevels.shape[0] == 0):
            relation_r1_close = 0
            relation_r2_close = 0
            relation_r3_close = 0
        elif (resLevels.shape[0] == 1):
            relation_r1_close = (closePrice[-1] -
                                 closePrice[resLevels[-1]]) / closePrice[-1]
            relation_r2_close = 0
            relation_r3_close = 0
        elif (resLevels.shape[0] == 2):
            relation_r1_close = (closePrice[-1] -
                                 closePrice[resLevels[-1]]) / closePrice[-1]
            relation_r2_close = (closePrice[-1] -
                                 closePrice[resLevels[-2]]) / closePrice[-1]
            relation_r3_close = 0
        else:
            relation_r1_close = (closePrice[-1] -
                                 closePrice[resLevels[-1]]) / closePrice[-1]
            relation_r2_close = (closePrice[-1] -
                                 closePrice[resLevels[-2]]) / closePrice[-1]
            relation_r3_close = (closePrice[-1] -
                                 closePrice[resLevels[-3]]) / closePrice[-1]

        # support levels features detection
        closePrice = window[:, 1]
        supLevels = argrelextrema(closePrice, np.less, order=4)[0]
        if (supLevels.shape[0] == 0):
            relation_s1_close = 0
            relation_s2_close = 0
            relation_s3_close = 0
        elif (supLevels.shape[0] == 1):
            relation_s1_close = (closePrice[-1] -
                                 closePrice[supLevels[-1]]) / closePrice[-1]
            relation_s2_close = 0
            relation_s3_close = 0
        elif (supLevels.shape[0] == 2):
            relation_s1_close = (closePrice[-1] -
                                 closePrice[supLevels[-1]]) / closePrice[-1]
            relation_s2_close = (closePrice[-1] -
                                 closePrice[supLevels[-2]]) / closePrice[-1]
            relation_s3_close = 0
        else:
            relation_s1_close = (closePrice[-1] -
                                 closePrice[supLevels[-1]]) / closePrice[-1]
            relation_s2_close = (closePrice[-1] -
                                 closePrice[supLevels[-2]]) / closePrice[-1]
            relation_s3_close = (closePrice[-1] -
                                 closePrice[supLevels[-3]]) / closePrice[-1]

        # slope features detection
        slope = window[:, 18]
        slope_mean = np.mean(slope)

        # beta features detection
        beta = window[:, 17]
        beta_mean = np.mean(beta)
        beta_std = np.std(beta)

        # spinTop features detection    np.sum(np.where(a==1)[0])
        count100plus = np.sum(np.where(window[:, 19] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 19] == -100)[0])) * -1
        spinTop_number_occurrence = count100plus + count100minus

        # doji features detection
        count100plus = np.sum(np.where(window[:, 20] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 20] == -100)[0])) * -1
        doji_number_occurrence = count100plus + count100minus

        # dojiStar features detection
        count100plus = np.sum(np.where(window[:, 21] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 21] == -100)[0])) * -1
        dojiStar_number_occurrence = count100plus + count100minus

        # marubozu features detection
        count100plus = np.sum(np.where(window[:, 22] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 22] == -100)[0])) * -1
        marubozu_number_occurrence = count100plus + count100minus

        # hammer features detection
        count100plus = np.sum(np.where(window[:, 23] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 23] == -100)[0])) * -1
        hammer_number_occurrence = count100plus + count100minus

        # invHammer features detection
        count100plus = np.sum(np.where(window[:, 24] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 24] == -100)[0])) * -1
        invHammer_number_occurrence = count100plus + count100minus

        # hangingMan features detection
        count100plus = np.sum(np.where(window[:, 25] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 25] == -100)[0])) * -1
        hangingMan_number_occurrence = count100plus + count100minus

        # shootingStar features detection
        count100plus = np.sum(np.where(window[:, 26] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 26] == -100)[0])) * -1
        shootingStar_number_occurrence = count100plus + count100minus

        # engulfing features detection
        count100plus = np.sum(np.where(window[:, 27] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 27] == -100)[0])) * -1
        engulfing_number_occurrence = count100plus + count100minus

        # morningStar features detection
        count100plus = np.sum(np.where(window[:, 28] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 28] == -100)[0])) * -1
        morningStar_number_occurrence = count100plus + count100minus

        # eveningStar features detection
        count100plus = np.sum(np.where(window[:, 29] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 29] == -100)[0])) * -1
        eveningStar_number_occurrence = count100plus + count100minus

        # whiteSoldier features detection
        count100plus = np.sum(np.where(window[:, 30] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 30] == -100)[0])) * -1
        whiteSoldier_number_occurrence = count100plus + count100minus

        # blackCrow features detection
        count100plus = np.sum(np.where(window[:, 31] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 31] == -100)[0])) * -1
        blackCrow_number_occurrence = count100plus + count100minus

        # insideThree features detection
        count100plus = np.sum(np.where(window[:, 32] == 100)[0])
        count100minus = (np.sum(np.where(window[:, 32] == -100)[0])) * -1
        insideThree_number_occurrence = count100plus + count100minus

        # fill the inputs matrix
        inputs[i, 0] = sma_latest_crossover
        inputs[i, 1] = sma_location_of_latest_crossover
        inputs[i, 2] = sma_latest_crossover_angle
        inputs[i, 3] = sma_dominant_type_fast_slow
        inputs[i, 4] = ema_latest_crossover
        inputs[i, 5] = ema_location_of_latest_crossover
        inputs[i, 6] = ema_latest_crossover_angle
        inputs[i, 7] = ema_dominant_type_fast_slow
        inputs[i, 8] = deltaBands_mean
        inputs[i, 9] = deltaBands_std
        inputs[i, 10] = deltaBands_maximum_mean
        inputs[i, 11] = deltaBands_maximum_location
        inputs[i, 12] = deltaBands_minimum_mean
        inputs[i, 13] = deltaBands_minimum_location
        inputs[i, 14] = macd_latest_crossover
        inputs[i, 15] = macd_location_of_latest_crossover
        inputs[i, 16] = macd_latest_crossover_angle
        inputs[i, 17] = macd_dominant_type_fast_slow
        inputs[i, 18] = sar_latest_shiftPoint
        inputs[i, 19] = sar_latest_shiftPoint_location
        inputs[i, 20] = sar_total_number_shifts
        inputs[i, 21] = stochK_mean
        inputs[i, 22] = stochK_std
        inputs[i, 23] = stochK_latest_event
        inputs[i, 24] = stochK_event_location
        inputs[i, 25] = stochD_mean
        inputs[i, 26] = stochD_std
        inputs[i, 27] = stochD_latest_event
        inputs[i, 28] = stochD_event_location
        inputs[i, 29] = rsi_mean
        inputs[i, 30] = rsi_std
        inputs[i, 31] = rsi_latest_event
        inputs[i, 32] = rsi_event_location
        inputs[i, 33] = adx_mean
        inputs[i, 34] = adx_std
        inputs[i, 35] = adx_mean_delta_bet_first_second_half
        inputs[i, 36] = mfi_mean
        inputs[i, 37] = mfi_std
        inputs[i, 38] = mfi_mean_delta_bet_first_second_half
        inputs[i, 39] = relation_r1_close
        inputs[i, 40] = relation_r2_close
        inputs[i, 41] = relation_r3_close
        inputs[i, 42] = relation_s1_close
        inputs[i, 43] = relation_s2_close
        inputs[i, 44] = relation_s3_close
        inputs[i, 45] = slope_mean
        inputs[i, 46] = beta_mean
        inputs[i, 47] = beta_std
        inputs[i, 48] = spinTop_number_occurrence
        inputs[i, 49] = doji_number_occurrence
        inputs[i, 50] = dojiStar_number_occurrence
        inputs[i, 51] = marubozu_number_occurrence
        inputs[i, 52] = hammer_number_occurrence
        inputs[i, 53] = invHammer_number_occurrence
        inputs[i, 54] = hangingMan_number_occurrence
        inputs[i, 55] = shootingStar_number_occurrence
        inputs[i, 56] = engulfing_number_occurrence
        inputs[i, 57] = morningStar_number_occurrence
        inputs[i, 58] = eveningStar_number_occurrence
        inputs[i, 59] = whiteSoldier_number_occurrence
        inputs[i, 60] = blackCrow_number_occurrence
        inputs[i, 61] = insideThree_number_occurrence

        # fill raw inputs matrices
        inputsOpen[i, :] = window[:, 0].reshape(1, lookUp)
        inputsClose[i, :] = window[:, 1].reshape(1, lookUp)
        inputsHigh[i, :] = window[:, 2].reshape(1, lookUp)
        inputsLow[i, :] = window[:, 3].reshape(1, lookUp)

        # fill the output matrix
        futureClose = windowForecast[:, 1]
        if (pairName == "USD_JPY"):
            outputs[
                i, 0] = (futureClose[-1] - futureClose[0]
                         ) / 0.01  # one pip = 0.01 for any pair containing JPY
        else:
            outputs[i, 0] = (futureClose[-1] - futureClose[0]
                             ) / 0.0001  # one pip = 0.0001 for this pairs

    # create mapping dict.
    mappingDict = {
        "sma_latest_crossover": 0,
        "sma_location_of_latest_crossover": 1,
        "sma_latest_crossover_angle": 2,
        "sma_dominant_type_fast_slow": 3,
        "ema_latest_crossover": 4,
        "ema_location_of_latest_crossover": 5,
        "ema_latest_crossover_angle": 6,
        "ema_dominant_type_fast_slow": 7,
        "deltaBands_mean": 8,
        "deltaBands_std": 9,
        "deltaBands_maximum_mean": 10,
        "deltaBands_maximum_location": 11,
        "deltaBands_minimum_mean": 12,
        "deltaBands_minimum_location": 13,
        "macd_latest_crossover": 14,
        "macd_location_of_latest_crossover": 15,
        "macd_latest_crossover_angle": 16,
        "macd_dominant_type_fast_slow": 17,
        "sar_latest_shiftPoint": 18,
        "sar_latest_shiftPoint_location": 19,
        "sar_total_number_shifts": 20,
        "stochK_mean": 21,
        "stochK_std": 22,
        "stochK_latest_event": 23,
        "stochK_event_location": 24,
        "stochD_mean": 25,
        "stochD_std": 26,
        "stochD_latest_event": 27,
        "stochD_event_location": 28,
        "rsi_mean": 29,
        "rsi_std": 30,
        "rsi_latest_event": 31,
        "rsi_event_location": 32,
        "adx_mean": 33,
        "adx_std": 34,
        "adx_mean_delta_bet_first_second_half": 35,
        "mfi_mean": 36,
        "mfi_std": 37,
        "mfi_mean_delta_bet_first_second_half": 38,
        "relation_r1_close": 39,
        "relation_r2_close": 40,
        "relation_r3_close": 41,
        "relation_s1_close": 42,
        "relation_s2_close": 43,
        "relation_s3_close": 44,
        "slope_mean": 45,
        "beta_mean": 46,
        "beta_std": 47,
        "spinTop_number_occurrence": 48,
        "doji_number_occurrence": 49,
        "dojiStar_number_occurrence": 50,
        "marubozu_number_occurrence": 51,
        "hammer_number_occurrence": 52,
        "invHammer_number_occurrence": 53,
        "hangingMan_number_occurrence": 54,
        "shootingStar_number_occurrence": 55,
        "engulfing_number_occurrence": 56,
        "morningStar_number_occurrence": 57,
        "eveningStar_number_occurrence": 58,
        "whiteSoldier_number_occurrence": 59,
        "blackCrow_number_occurrence": 60,
        "insideThree_number_occurrence": 61
    }

    # remove undifined values from the output
    refMatrix = inputs
    inputs = inputs[~np.isnan(refMatrix).any(
        axis=1)]  # remove all raws containing nan values
    outputs = outputs[~np.isnan(refMatrix).any(
        axis=1)]  # remove all raws containing nan values
    inputsOpen = inputsOpen[~np.isnan(refMatrix).any(
        axis=1)]  # remove all raws containing nan values
    inputsClose = inputsClose[~np.isnan(refMatrix).any(
        axis=1)]  # remove all raws containing nan values
    inputsHigh = inputsHigh[~np.isnan(refMatrix).any(
        axis=1)]  # remove all raws containing nan values
    inputsLow = inputsLow[~np.isnan(refMatrix).any(
        axis=1)]  # remove all raws containing nan values

    # create raw inputs dict.
    rawInputs["open"] = inputsOpen
    rawInputs["close"] = inputsClose
    rawInputs["high"] = inputsHigh
    rawInputs["low"] = inputsLow

    # return the function output
    output = {
        "mappingDict": mappingDict,
        "rawInputs": rawInputs,
        "inputFeatures": inputs,
        "targets": outputs
    }
    return (output)
コード例 #54
0
def calculateWidth(filamentNo,filamentData, params, plotIndividualProfiles, printResults):
	
	# reading user defined parameters 	
	npix    	= params["npix"] 
	avg_len 	= params["avg_len"] 
	niter   	= params["niter"]
	lam 		= params["lam"]
	pix 		= params["pixel_size"]
	dist 		= params["distance"] 
	fits_file	= params["fits_file"]
	dv 		= params["dv"]
	smooth		= params["smooth"]
	noise_level 	= params["noise_level"]
	int_thresh 	= params["int_thresh"]
	intensity	= fits.getdata(fits_file)


	resultsList	= []
	range_perp	= (np.arange(-npix,npix)*pix*dist/206265) 
  	
 	n = 1
	n2 = avg_len
	
	# we divide the filament into smaller chunks so we can average 
	# the intensity profiles over these smaller chunks
	# the chunks are set to be 3 beamsize pieces and can be set with avg_len
	# in case the data is not divisble by avg_len there will be a left over chunk
	# these left over profiles will be averaged together
	leftover_chunk = len(filamentData)%n2
	#avg_until = len(filamentData)//n2
		

	while True: #n2 <  len(filamentData):	
		# this is only for testing 
		fig = pl.figure(figsize=(7,5))
		ax1 = aplpy.FITSFigure('/home/suri/projects/carmaorion/analysis/disperse/c18o/0.2kms_resolution/han1_mask_imfit_c18o_pix_2_Tmb_noEdges_north_v4.7-13.1.peak.fits', figure=fig)
		ax1.show_colorscale(cmap='Greys', vmin=-0.5,vmax=10.)
		ax1.ticks.set_xspacing(0.1)
		ax1.ticks.show()
		ax1.ticks.set_color('black')
	
		ax1.tick_labels.set_xformat('hh:mm:ss')
	        ax1.tick_labels.set_yformat('dd:mm')
			
		ax1.tick_labels.set_font(size='large', weight='medium', \
                          stretch='normal', family='serif', \
                          style='normal', variant='normal')
				
		v = [320,680,270,500]
		pl.axis(v)
		
		ax1.add_scalebar(0.01142,'0.1 pc',color='black',linewidth=3.,corner='top left')
			
		#print 'n2 :', n2	
	  	 
		print ' '
		print '######################################################################'
		print 'Calculating filament width using FilChaP'
		print 'Filament number		= ', filamentNo 	
		print 'Processed chunk		= ', n2//avg_len, 'of', len(filamentData)//avg_len
		print '######################################################################'

		line_perpList			= []
		line_perpList2			= []	
		average_profile			= np.zeros((npix))
		average_profile2		= np.zeros((npix))	
		average_profileFull		= np.zeros((npix*2))
		stacked_profile			= np.zeros((len(filamentData),npix*2))
		stacked_profile_baseSubt 	= np.zeros((len(filamentData),npix*2))

		
		while n < n2-1:
			print n
	 		
			x =filamentData[:,0]
			y = filamentData[:,1]
			z = filamentData[:,2]
			
			#xWorld, yWorld = w1.wcs_pix2world(x,y,0)
        		#xPix, yPix = w2.wcs_world2pix(xWorld,yWorld,0)
			pl.plot(x,y,'.', markersize=8)
				
			x0 = int(x[n])
			y0 = int(y[n])
			z0 = int(z[n])
			r0 = np.array([x0,y0], dtype=float) 		# point on the filament
		
			#to save the data
		        x0save = x0
		        y0save = y0
		        z0save = z0


			profileLSum = np.zeros(npix)      
			profileRSum = np.zeros(npix)
	
			
			###################################################################################
			# this is where we calculate the slices perpendicular to the spine of the filament
			# below loops are  part is implemented from Duarte-Cabral & Dobbs 2016.
			# we calculate distances in 2 separate for loops below
			# ################################################################################ 
			
			# find the tangent curve
			a = x[n+1] - x[n-1]
			b = y[n+1] - y[n-1]
			normal=np.array([a,b], dtype=float)
			#print a, b	
			#pl.plot(normal)
			#equation of normal plane: ax+by=const

			const = np.sum(normal*r0)
					
			# defining lists an array to be used below
			distance = np.zeros_like(intensity[0])
			distance2 = np.zeros_like(intensity[0])

			line_perp = np.zeros_like(intensity[0])
			line_perp2 = np.zeros_like(intensity[0])		
			
		
			# Loop 1: if the slope is negative
			if -float(b)/a > 0:
			
			  try:
				
				for ii in range(y0-npix,y0+1):
					for jj in range(x0-npix,x0+1):
			
						distance[ii,jj]=((jj-x0)**2.+(ii-y0)**2.)**0.5 #distance between point (i,j) and filament
						if (distance[ii,jj] <  npix-1):
							dist_normal=(np.fabs(a*jj+b*ii-const))/(np.sum(normal*normal))**0.5 #distance between point (i,j) and the normal 
							# take the point if it is in the vicinity of the normal (distance < 2 pix)
							if (dist_normal < 2):
								line_perp[ii,jj] = distance[ii,jj] #storing the nearby points
								line_perpList.extend((ii,jj,distance[ii,jj]))
				for ii in range(y0,y0+npix+1):
		               		for jj in range(x0,x0+npix+1):
	     
		                        	distance2[ii,jj]=((jj-x0)**2.+(ii-y0)**2.)**0.5 
		                        	if (distance2[ii,jj] <  npix-1):
		                                	dist_normal2=(np.fabs(a*jj+b*ii-const))/(np.sum(normal*normal))**0.5 
		                                	
		                                	if (dist_normal2 < 2): 
		                                        	line_perp2[ii,jj] = distance2[ii,jj] 
								line_perpList2.extend((ii,jj,distance2[ii,jj]))			
		                                        
	 		  except IndexError:
				print 'Index Error while creating the perpendicular array!'
				break 
			
			# Loop 2_ if the slope is positive
			elif -float(b)/a < 0:
			  try:
				for ii in range(y0,y0+npix+1):
					for jj in range(x0-npix,x0+1):
						distance[ii,jj]=((jj-x0)**2.+(ii-y0)**2.)**0.5
						if (distance[ii,jj] <  npix-1):
							dist_normal=(np.fabs(a*jj+b*ii-const))/(np.sum(normal*normal))**0.5
							if (dist_normal < 2):
								line_perp[ii,jj] = distance[ii,jj]
								line_perpList.extend((ii,jj,distance[ii,jj]))
				for ii in range(y0-npix,y0+1):
					for jj in range(x0, x0+npix+1):
						distance2[ii,jj]=((jj-x0)**2.+(ii-y0)**2.)**0.5
						if (distance2[ii,jj] <  npix-1):
							dist_normal2=(np.fabs(a*jj+b*ii-const))/(np.sum(normal*normal))**0.5
							if (dist_normal2 < 2):
								line_perp2[ii,jj] = distance2[ii,jj]
								line_perpList2.extend((ii,jj,distance2[ii,jj]))

			  except IndexError:
				print 'Index Error while creating the perpendicular array!'
			  	break	
			
			####################################################
			# now that we have the perpendicular slices ########
			# we can get the intensities along these slices ####
			####################################################
			perpendicularLine = np.array(line_perpList).reshape(-1,3)
                	perpendicularLine2 = np.array(line_perpList2).reshape(-1,3)
				
			pl.plot(perpendicularLine[:,1],perpendicularLine[:,0],'r.', markersize=0.5, alpha=0.7)
                        pl.plot(perpendicularLine2[:,1],perpendicularLine2[:,0],'r.', markersize=0.5,alpha=0.7)
	
			
			
			for dd in range(0,npix):

				if (dd == 0):	
					# this is where the skeleton point is x0,y0,z0
					# sum the intensities of the velocity channel before and after
					profileLSum[dd] = np.sum([intensity[z0-1,y0-1,x0-1], intensity[z0,y0-1,x0-1], intensity[z0+1,y0-1,x0-1]]) 

				if (dd > 0):
					# this is where we have to get the list of the perpendicular points
					# it could be that close to the caculated perpendicular line
					# there are several points that have to same distance to the line
					# we take the mean intensity and some over 3 channels
					index_d = np.where((line_perp>dd-1) * (line_perp<=dd))	
		
					profileLSum[dd] = np.sum( [np.mean(intensity[int(z0)-1,index_d[0]-1,index_d[1]-1]), \
							np.mean(intensity[int(z0),index_d[0]-1,index_d[1]-1]), \
                                                        np.mean(intensity[int(z0)+1,index_d[0]-1,index_d[1]-1])] )
 
	
				
				# it could also be that what the perpendicular got was NaNs
				# in that case, ignore them
				# if not, average them 
				# the average profile is what we will use for the fitting 				
				if np.isnan(profileLSum[dd]) != True:
					average_profile[dd] += profileLSum[dd]/(avg_len)
	

			for ddd in range(0,npix):        
				if (ddd == 0):
					# this is where the skeleton point is x0,y0,z0
					# sum the intensities of the velocity channel before and after	                       
				      	profileRSum[ddd] = np.sum([intensity[z0-1,y0-1,x0-1], intensity[z0,y0-1,x0-1], intensity[z0+1,y0-1,x0-1]]) 
				
		
				if (ddd > 0):
					# this is where we have to get the list of the perpendicular points
					# it could be that close to the caculated perpendicular line
					# there are several points that have to same distance to the line
					# we take the mean intensity and some over 3 channels
					index_d2 = np.where((line_perp2>ddd-1) * (line_perp2<=ddd))		
	
					profileRSum[ddd] = np.sum( [np.mean(intensity[int(z0)-1,index_d2[0]-1,index_d2[1]-1]), \
							np.mean(intensity[int(z0),index_d2[0]-1,index_d2[1]-1]), \
							np.mean(intensity[int(z0)+1,index_d2[0]-1,index_d2[1]-1])] )	
			
				
				if np.isnan(profileRSum[ddd]) != True:
		                         average_profile2[ddd] += profileRSum[ddd]/(avg_len)
	
			##############################################################
			# stack both sides of the intensity profiles #################
			##############################################################
		
			stacked_profile[n] = np.hstack((profileLSum[::-1],profileRSum))
			stacked_profile[n] = stacked_profile[n]*dv

			print stacked_profile[n]

		#	plt.figure(1)
	#		plt.plot(xrange(-npix,0), average_profile[::-1])
#			plt.plot(xrange(0,npix), average_profile2)
			
			#plt.plot(stacked_profile[n])
			#plt.show()
		
			# subtract baselines from each of these profiles
		
			z = baseline_als(stacked_profile[n], lam, 0.01,niter)
			stacked_profile_baseSubt[n] = stacked_profile[n] -z	
			 
			#if plotIndividualProfiles == True:
			#	pl.step(range_perp,stacked_profile_baseSubt[n],ls='-',color='#D3D3D3', lw=3.0, alpha=1.0)		
			n += 1
		#####################################################################################
		# exiting the first loop that allowed us to average a number of intensity profiles ##
		# this number is taken as three times the beamsize of the CARMA-NRO data ############
		# avg_length:12 , can be changed according to the used dataset. #####################
		# below, we fit this averaged profile to calculate the width ########################
		##################################################################################### 

		# this is the average radial intensity profile we need for the width calculation
		# so stack together both sides of the profile (- and +)
		# and multiply with the velocity channel width because it is an integrated intensity profile
		pl.gcf().subplots_adjust(bottom=0.15)		
		pl.gcf().subplots_adjust(left=0.15)
		pl.savefig('/home/suri/development/filchap_1.0/test_c18o_north/plots/slices_f103/widthPlotFil' + str(filamentNo)+'_slice' + str(n2) +'.png', dpi=300)
				
		average_profileFull = np.hstack((average_profile[::-1],average_profile2))
		average_profileFull = average_profileFull*dv
	
		# subtract baseline from the averaged profile
		# and also smooth it 
		# the smoothed profile will be used to find dips and peaks

		z2 = baseline_als(average_profileFull, lam, 0.01,niter)
		y_base_subt = average_profileFull -z2
		y_base_subt_smooth = gaussian_filter(y_base_subt, sigma=smooth) #3 beam=12
		
				
		###################################################################################
		####### calculating minima ######################################################## 
		###################################################################################	
		# we calculate minima by looking at the minus side of the peak 
		# and to the plus side: minima left and right
		# in order to make sure the minima are global,
		# we put an integrated intensity threshold (at the moment 5*sigma) 
		# only minima that have values below this threshold will be taken into account
		 	
		minimaLeft = argrelextrema(y_base_subt_smooth[0:npix], np.less, order=6)
		minimaRight = argrelextrema(y_base_subt_smooth[npix:npix*2], np.less, order=6) 
		
		
		# following loops are where we decide which minima to use for the fit boundaries
		# in case there are multiple minima, the one close to the peak is selected
		# if there is no minima found, the entire range is used (from 0 to 2*npix).

		if len(minimaLeft[0]) > 1:
			
			b1 = minimaLeft[0][-1]
			#pl.axvline(x=range_perp[b1], ymin=0,ls='--',color='black', alpha=0.5)

		elif len(minimaLeft[0]) == 1:
			
			#pl.axvline(x=range_perp[minimaLeft[0][0]], ymin=0,ls='--',color='black', alpha=0.5)
			b1 = minimaLeft[0][0]
		else:

			b1 = 0
			#pl.axvline(x=range_perp[b1], ymin=0,ls='--',color='black', alpha=0.5)

		if len(minimaRight[0]) > 1:

			b2 = minimaRight[0][0]+npix
			#pl.axvline(x=range_perp[b2], ymin=0,ls='--',color='black', alpha=0.5)
		
		
		elif len(minimaRight[0]) == 1:
			#pl.axvline(x=range_perp[minimaRight[0][0]+npix], ymin=0,ls='--',color='black', alpha=0.5)
		        b2 = minimaRight[0][0]+npix

		else:	
		        b2 = 2*npix
			#pl.axvline(x=range_perp[b2-1], ymin=0,ls='--',color='black', alpha=0.5)

	
		# plot the averaged profile
		#pl.step(range_perp,y_base_subt,'k-', lw=2.0, alpha=1.0)
		
		# uncomment if you want to plot the smoothed average profile
		#pl.step(range_perp,y_base_subt_smooth,'g',lw=1.0, alpha=0.4)
		
		###################################################################################
		# here we calculate the number of peaks ###########################################
		# within our boundaries 		###########################################
		# this will help compare the number of peaks & shoulders to the width #############
		###################################################################################
		
		#Adopted from Seamus' peak finding. 
	
		print 'Finding Peaks'	
		ydata_og 	= y_base_subt[b1:b2]
		ydata		= y_base_subt_smooth[b1:b2]
        	r		= range_perp[b1:b2]
		ny 		= len(ydata)
        	minr 		= np.min(r)
        	maxr 		= np.max(r)
        	dr		= (maxr-minr)/ny
	
		limit		= 5*noise_level 		# this is to check peak's significance	
		
		#derivatives
        	dy		= np.zeros_like(ydata)
		for ii in range(0,ny-1):
                	dy[ii] = (ydata[ii+1]-ydata[ii])/dr
		
		ddy 		= np.zeros_like(ydata)
        	for ii in range(0,ny-2):
                	ddy[ii] = (dy[ii+1]-dy[ii])/dr
		
		# work out the number of peaks and shoulders
		switch 		= np.zeros_like(ydata)
	        decrease	= 0
        	shoulder	= np.zeros_like(ydata)
		
		for ii in range(2,ny-2):
			# find a shoulder
			if(ddy[ii+1] > ddy[ii] and ddy[ii+2] > ddy[ii] and ddy[ii-1] > ddy[ii] and ddy[ii-2] > ddy[ii] and (ydata[ii]>limit or ydata[ii-1]>limit or ydata[ii+1]>limit)):
				
				shoulder[ii] = 1
			# find a peak
			if((dy[ii] < 0.0 and dy[ii-1]>0.0) and (ydata[ii]>limit or ydata[ii-1]>limit or ydata[ii+1]>limit)):

                        	switch[ii] = 1
		
		# check if there are any peaks detected	
		if( np.sum(switch) < 1 ):
                	print "No peak was detected in this slice"
                	print "Did I go wrong? - Seamus"
                	#return [[0,0,0],0]
		
		n_peaks = np.sum(switch)
        	n_peaks = int(n_peaks)
		
		index = np.linspace(0,ny-1,ny)
        	index = np.array(index,dtype=int)

        	id_g = index[switch==1]
		cent_g = r[id_g]
        	amp_g = ydata[id_g]
		
		is_shoulder = int(np.sum(shoulder)) - n_peaks
        	
		if(is_shoulder > 0):
			
			# if there exists a shoulder we plot them with vertical dashed lines 
                	shoulder_pos	= r[index[shoulder==1]]
			shoulder_amp	= ydata[index[shoulder==1]]
			print "Here are the shoulder positions", shoulder_pos
			
			#for kk in range(len(shoulder_pos)):
				
		#			pl.axvline(x=shoulder_pos[kk], ymin=0, ls='--', lw=1., color='g', alpha=0.5)	
		else:
			shoulder_pos    = []
			print 'I found no shoulders.'
		
		##################################################################################
		# finally calculating the width ################################################## 
		##################################################################################
		
		# initial guesses for the fits
		a		= np.amax(ydata_og)
		mu		= r[ np.argmax(ydata_og) ]
		pos_half	= np.argmin( np.abs( ydata_og-a/2 ) )
		sig 		= np.abs( mu - r[ pos_half] )
		p01 		= (a,mu,sig)
		p02 		= (a,mu,sig)


		try:
			# 1st method: calculate moments	
			tot_2, tot_3, tot_4 = 0, 0, 0	
			for ii in range(len(r)): 
				tot_2 += ydata_og[ii]*(r[ii] - np.mean(r))**2
				tot_3 += ydata_og[ii]*(r[ii] - np.mean(r))**3
				tot_4 += ydata_og[ii]*(r[ii] - np.mean(r))**4
	       
			var = math.sqrt(tot_2/np.sum(ydata_og))
			mom3 = tot_3/np.sum(ydata_og)
			mom4 = tot_4/np.sum(ydata_og)
				
			FWHM_moments = var*2.35
			skewness = mom3/(var**3)
			kurtosis = mom4/(var**4) - 3

			print 'moment:', FWHM_moments	
			# 2nd method: fit Gaussian and Plummer functions
			co_eff,var_matrix	= curve_fit(gaus,r, ydata_og,p0=p01,absolute_sigma=True)	
			#print co_eff
			co_eff3,var_matrix3	= curve_fit(plum2,r, ydata_og,p0=p02)
			#print co_eff3
			co_eff4,var_matrix4	= curve_fit(plum4,r, ydata_og,p0=p02)
			#print co_eff4
			
			#Calculate Chi-squared 
			noise			= noise_level  # 0.47/sqrt(3)/sqrt(12)
			num_freeParams		= 3  	        
			# gaussian fits
			chi_sq_gaus		= np.sum((ydata_og-gaus(r,*co_eff))**2) / noise**2
			red_chi_sq_gaus		= chi_sq_gaus / (len(ydata_og) - num_freeParams)			
			
			# plummer 2 fits
			chi_sq_plum2		= np.sum((ydata_og-plum2(r,*co_eff3))**2) / noise**2	
			red_chi_sq_plum2	= chi_sq_plum2 / (len(ydata_og) - num_freeParams)	
			
			# plummer 4 fits
			chi_sq_plum4		= np.sum((ydata_og-plum4(r,*co_eff4))**2) / noise**2
			red_chi_sq_plum4	= chi_sq_plum4 / (len(ydata_og) - num_freeParams)
			
			#fits
			fit			= gaus(range_perp,*co_eff)
			fit3			= plum2(range_perp,*co_eff3)
			fit4			= plum4(range_perp,*co_eff4)
		
			# fit standard deviation
			perr			= np.sqrt(np.diag(var_matrix))	
			perr3			= np.sqrt(np.diag(var_matrix3))
			perr4			= np.sqrt(np.diag(var_matrix4))	

			#pl.plot(range_perp,y_base_subt)
		#	pl.plot(range_perp,fit,ls='-.', color='#0000CD', lw=1.) 	 
		#	pl.plot(range_perp,fit3,ls='-',color='#DAA520', lw=1.) 
		#	pl.plot(range_perp,fit4,ls='--',color='red', lw=1.)	
			
		#	pl.xlabel('Distance from the ridge [pc]')
		#	pl.ylabel('Integrated Intensity [K.km/s]')	
		#	pl.grid(True, alpha=0.2)
			pl.gcf().subplots_adjust(bottom=0.15)		
			pl.gcf().subplots_adjust(left=0.15)
			#pl.savefig('/home/suri/development/filchap_1.0/test_c18o_north/plots/slices_f103/widthPlotFil' + str(filamentNo)+'_slice' + str(n2) +'.png', dpi=300)
			#pl.show()
			
			rangePix = b2-b1
		 
			FWHM_plummer2 = 3.464*co_eff3[2]
			FWHM_plummer4 = 1.533*co_eff4[2]
			
			resultsList.extend((co_eff[0],perr[0],co_eff[1],perr[1],co_eff[2]*2.35,perr[2],co_eff3[0],perr3[0],co_eff3[1],perr3[1],FWHM_plummer2,perr3[2],co_eff4[0],perr4[0],co_eff4[1],perr4[1],FWHM_plummer4,perr4[2],FWHM_moments,skewness,kurtosis,chi_sq_gaus,red_chi_sq_gaus,chi_sq_plum2,red_chi_sq_plum2,chi_sq_plum4,red_chi_sq_plum4,rangePix,x0save,y0save,z0save,len(shoulder_pos)))
		
			if printResults == True:
				print '###########################################################'
				print '############## Width Results ##############################'
				print ' '
				print 'FWHM (Second Moment)		=', FWHM_moments
				print 'FWHM (Gaussian Fit)		=', co_eff[2]*2.35
				print 'FWHM (Plummer 2)			=', FWHM_plummer2
				print 'FWHM (Plummer 4)			=', FWHM_plummer4 	
				print ' '
				print 'Skewness				=', skewness
				print 'Kurtosis				=', kurtosis
				print '###########################################################'	
			
		
		except (UnboundLocalError,RuntimeError,ValueError,TypeError) as e: 
	
			print 'I did not fit this.' 
			pass
		

		#print 'n2 just before the if: ', n2
		if leftover_chunk != 0 and n2 == len(filamentData)-1:
			print 'break here'
                        break		
		elif leftover_chunk != 0 and n2 == len(filamentData)-leftover_chunk:
			n2 += leftover_chunk-1
			avg_len = leftover_chunk	
			
		elif leftover_chunk == 0 and n2 == len(filamentData):
			break	
		
		else:
			n2 += avg_len
		 
	
		pl.clf()
		resultsArray = np.array(resultsList).reshape(-1,32)
		print resultsArray		

	return resultsArray
コード例 #55
0
arrays = pd.concat([malFile_array, cleanFile_array], axis=0, ignore_index=True)
features.insert(4, 'max Day count', 'default value 0')
features.insert(5, 'min Day count', 'default value 0')
features.insert(31, 'Prevalence', 'default value 0')
features.insert(32, 'Peaks', 'default value ')
features.insert(33, 'Sharp peaks', 'default value ')

for i in range(len(arrays)):
    a = arrays["Day_Array"][i]
    a = map(int, list(a[1:-1].split()))
    a = np.array([int(s) for s in a])
    features.at[i, 'max Day count'] = float(max(a))
    features.at[i, 'min Day count'] = float(min(a))
    features.at[i, 'Prevalence'] = float(sum(a))
    #------Peaks----
    peaks = argrelextrema(a, np.greater, mode='wrap')
    peaks = peaks[0][a[peaks[0]] > 3]
    features.at[i, 'Peaks'] = len(peaks)
    #------Sharp peaks----
    prominences = peak_prominences(a, peaks)[0]
    sharp_peaks_over = peaks
    for j in range(len(peaks) - 1, -1, -1):
        if prominences[j] < 15:
            sharp_peaks_over = np.delete(sharp_peaks_over, j, 0)
    features.at[i, 'Sharp peaks'] = len(sharp_peaks_over)

#create features.csv
parent_dir = os.getcwd()
features.to_csv(os.path.join(parent_dir, "features.csv"))

#------------------------Data exploration---------------------------------
コード例 #56
0
def signal_p2pmv(signal):
    local_max = argrelextrema(signal, np.greater)
    local_min = argrelextrema(signal, np.less)
    return np.mean(local_max) - np.mean(local_min)
コード例 #57
0
def findLocalMaxima(array):
    return argrelextrema(array, np.greater)[0]
コード例 #58
0
def findLocalMinima(array):
    return argrelextrema(array, np.less)[0]
コード例 #59
0
def signal_mean_local_min(signal):
    local_min = argrelextrema(signal, np.less)
    return np.mean(signal[local_min])
コード例 #60
0
def MM_RWT(rwt,par=1000):
#% MM_RWT -- Modulus Maxima of a Real Wavelet Transform
#%  Usage
#%    maxmap = MM_RWT(rwt,par)
#%  Inputs
#%    rwt    Output of RWT
#%    par    optional. If present, keep thresholds only
#%           above a certain value. default = 1000
#%  Outputs
#%    maxmap binary array indicating presence of max or not
#%
#%  Description
#%    Used to calculate fractal exponents etc.
#%

    (n, nscale) = rwt.shape;
    
#    rwt[0:50,:] = 0
#    rwt[975:1024,:] = 0

#    rwtmax = np.amax(np.amax(rwt))
#    
#    print "rwtmax is "
#    
#    above_thresh = np.greater( rwt, .01*rwtmax)
#    rwt = np.multiply(above_thresh, rwt)

    maxmap = np.zeros((n, nscale));
    
    #do some thresholding
    
    
    
    localmaxes = signal.argrelextrema(rwt, np.greater, axis=0, order=1)
    
#    print len(localmaxes)
#    print localmaxes[0]
#    print localmaxes[1]

    maxmap[localmaxes] = 1
    
    #print np.where(maxmap)
    
    
    
#    t      = range(n);
#    tplus  = np.roll(t, 1)
#    tminus = np.roll(t, -1)
#    
#    #only look for maxes, don't look for mins
#    #rwt    = np.abs(rwt);
#    
#    for k in xrange( int(nscale) ):

    
    
#        #are you higher than t-1 and t+1?
#        localmax =  np.logical_and( np.greater(rwt[t,k], rwt[tplus,k]), np.greater(rwt[t,k], rwt[tminus,k]) )
#        
#        #find the rwt value for the maxes (by multiplying by the boolean localmax matrix)
#        y =  np.multiply(localmax[:], rwt[:,k]);
#
#        #print "localmax size " + str(localmax.shape) + " y size + " + str(y.shape)
#        maxy = np.amax(y);
#            #print "maxy " + str(maxy)
#        maxmap[t,k] = (y >= maxy/par);

    #maxmap[0:50,:] = 0
    #maxmap[975:1024,:] = 0
    
    print "maxmap shape" + str( maxmap.shape)

    return maxmap