Ejemplo n.º 1
0
def curve(request, id):
    farmer = Location.objects.get(id=id)
    longitude = farmer.lng
    latitude = farmer.lat
    url = "http://vedas.sac.gov.in:8080/LeanGeo/api/band_val/NDVI_PROBA?latitude=" + latitude + "&longitude=" + longitude
    r = requests.get(url)
    data = r.json()
    x = []
    y = []
    num = 0
    for dat in data:
        x.append(dat["time"])
        y.append(dat["value"])
    #x = [index for index in xrange(0,len(data))]
    x = ar(x)
    y = ar(y)
    box = np.ones(3) / 3
    js = []
    yhat = np.convolve(y, box, mode='same')
    old_y = list(y)
    new_x = []
    x = list(x)
    for nx in x:
        new_x.append(str(nx))
    new_y = []
    yhat = list(yhat)
    for ny in yhat:
        new_y.append(ny)
    data = {"x": new_x, "y": new_y, "old_y": old_y}
    return JsonResponse(data)
Ejemplo n.º 2
0
def triangular_prism():
    width = random.randint(4,12)
    height = random.randint(3,6)
    depth = random.randint(2,8)

    points = [((0,0),(width,0)),
              ((width,0),(0,height)),
              ((0,height),(0,0))]

    point1 = (width+depth, 0+depth/2)

    point2 = (depth, height + depth/2)
    points = points + [((width, 0), point1), ((0, height), point2), (point1, point2)]
    points = ar(points)
    edges_for_labels = ar([points[2][0], points[2][1], points[0][1], point1])
    label_points = labels_for_shape(edges_for_labels)
    labels = [str(height), str(width), str(depth)]
    fig = plot_shape_byedge(points, label_points, labels)
    plt.text(width/3,height/3,'Volume = ?', horizontalalignment='center', verticalalignment='center', fontsize = 'large')
    r = random.randint(0, 999999999999999)
    fn = 'temp_img/temp' + str(r) + '.png'

    fig.savefig('media/' + fn, bbox_inches='tight', pad_inches=0, transparent=True)
    question = fn
    answer = str(width * height* depth)

    return question, answer
Ejemplo n.º 3
0
def cuboid():
    width = random.randint(4,12)
    height = random.randint(3,6)
    depth = random.randint(2,8)
    units = random.choice(['mm','cm','m'])
    points = [((0,0),(width,0)),
              ((width,0),(width,height)),
              ((width,height),(0, height)),
              ((0,height),(0,0))]

    point1 = (width+depth, 0+depth/2)
    point2 = (width+depth, height + depth/2)
    point3 = (depth, height + depth/2)
    points = points + [((width, 0), point1), ((width, height), point2), ((0, height), point3), (point1, point2), (point2, point3)]
    points = ar(points)
    edges_for_labels = ar([points[3][0], points[0][0], points[4][0], points[4][1]])
    label_points = labels_for_shape(edges_for_labels)
    labels = [str(height)+units, str(width)+units, str(depth)+units]
    fig = plot_shape_byedge(points, label_points, labels)
    plt.text(width/2,height/2,'Volume = ?', horizontalalignment='center', verticalalignment='center', fontsize = 'large')
    r = random.randint(0, 999999999999999)
    fn = 'temp_img/temp' + str(r) + '.png'

    fig.savefig('media/' + fn, bbox_inches='tight', pad_inches=0, transparent=True)
    question = fn
    answer = '$' + str(width * height* depth) + units + '^3$'

    return question, answer
Ejemplo n.º 4
0
def sinFit(xs, ys, paraW=None):
    dataX = [float(s) for s in xs]
    dataY = [float(s) for s in ys]

    def sinFunction(x, a, w, p, b):
        return a * sin(w * x + p) + b

    x = ar(dataX)
    y = ar(dataY)

    def stdev(data):
        ave = sum(data) / len(data)
        sumI = 0
        for d in data:
            sumI += (d - ave)**2
        return math.sqrt(sumI / (len(data) - 1))

    average = sum(y) / len(y)
    stv = stdev(y)
    w = paraW if (paraW is not None) else 2 * math.pi / (x[-1] - x[0])
    preP = [stv * math.sqrt(2), w, 0, average]
    print(preP)
    popt, pcov = curve_fit(sinFunction, x, y, p0=preP)
    result = [i for i in popt]
    print(result)
    print()
    return result
Ejemplo n.º 5
0
def peak_finder(array, lower, upper, count_offset):
    '''
    Peak Finder for Potassium. Needs more development
    '''
    points = ar(range(lower, upper))
    peak = list(array[lower:upper])
    counts = ar(peak)

    nentries = len(points)
    mean = lower + (upper - lower) / 2.0
    slope = 2 * (np.log(counts[-1]) - np.log(counts[0])) / (points[-1] -
                                                            points[0])
    pinit = [counts[0] / 2.0, mean, 5.0, counts[0] * count_offset, slope]

    errfunc = lambda p, x, y: gaus_plus_exp(x, p) - y
    pfit,pcov,infodict,errmsg,success = \
        optimize.leastsq(errfunc, pinit, args=(points,counts), \
            full_output=1, epsfcn=0.0001)

    if (len(counts) > len(pinit)) and pcov is not None:
        s_sq = (errfunc(pfit, points, counts)**
                2).sum() / (len(counts) - len(pinit))
        pcov = pcov * s_sq
    else:
        pcov = 0

    error = []
    for i in range(len(pfit)):
        try:
            error.append(np.absolute(pcov[i][i])**0.5)
        except:
            error.append(0.00)
    pfit_leastsq = pfit
    perr_leastsq = np.array(error)
    return pfit_leastsq, perr_leastsq
Ejemplo n.º 6
0
 def Generate(self):
     for i in range(0, 90):
         self.x.append(i / 30.)
         self.y.append(math.sin(i/30.)+random.uniform \
          (-self.noice_level,self.noice_level))
     self.x = ar(self.x)
     self.y = ar(self.y)
Ejemplo n.º 7
0
	def Generate(self):
		for i in range(0,90):
			self.x.append(i/30.)
			self.y.append(math.sin(i/30.)+random.uniform \
				(-self.noice_level,self.noice_level))
		self.x = ar(self.x)
		self.y = ar(self.y)
Ejemplo n.º 8
0
def double_peak_finder(array, lower, upper):
    '''
    Fits double gaussian + exponential to data within some window
      - fit is applied only to data within the upper/lower channel
        boundaries provided as inputs
    Arguments:
      - full array of data
      - lower and upper channel values for the fit window
    Returns:
      - list of fit parameters and list of parameter errors
    '''
    points = ar(range(lower, upper))
    peak = list(array[lower:upper])
    counts = ar(peak)

    # Initialize fit parameters based on rough estimates of mean,sigma,amp,etc.
    #  - mean estimated as center of fit window - set window accordingly
    #    - double gaussian means shifted slightly in each direction
    #  - gaussian amp and expo shift estimated based on counts at left edge
    #  - expo slope determined using fit window boundaries
    nentries = len(points)
    mean = lower + (upper - lower) / 2.0
    slope = 2 * (np.log(counts[-1]) - np.log(counts[0])) / (points[-1] -
                                                            points[0])
    pinit = [
        counts[0] / 5.0, mean - 2, 5.0, counts[0] / 5.0, mean + 2, 5.0,
        counts[0], slope
    ]

    # Currently using leastsq fit from scipy
    #   - see scipy documentation for more information
    errfunc = lambda p, x, y: double_gaus_plus_exp(x, p) - y
    pfit,pcov,infodict,errmsg,success = \
        optimize.leastsq(errfunc, pinit, args=(points,counts), \
            full_output=1, epsfcn=0.0001)

    # Calculate fit parameter uncertainties using the covariance matrix
    #  and the (fit - data) variance
    if (len(counts) > len(pinit)) and pcov is not None:
        s_sq = (errfunc(pfit, points, counts)**
                2).sum() / (len(counts) - len(pinit))
        pcov = pcov * s_sq
    else:
        pcov = 0

    error = []
    for i in range(len(pfit)):
        try:
            # This conditional is bad!!
            # Artificially sets error to zero if it's too big - remove now!
            if np.absolute(pcov[i][i])**0.5 > np.absolute(pfit[i]):
                error.append(0.00)
            else:
                error.append(np.absolute(pcov[i][i])**0.5)
        except:
            error.append(0.00)
    pfit_leastsq = pfit
    perr_leastsq = np.array(error)
    return pfit_leastsq, perr_leastsq
Ejemplo n.º 9
0
def get_unary_vector(line, direction=1):
    l = line
    if l[0] == 0:
        return ar([0, 1]) * direction
    if l[1] == 0:
        return ar([1, 0]) * direction
    else:
        k = l[1] / l[0]
        x = (1 / (1 + 1 / k**2))**0.5
        return ar([x, -x / k]) * sign_of(x) * direction
def double_peak_finder(array,lower,upper):
    '''
    Fits double gaussian + exponential to data within some window
      - fit is applied only to data within the upper/lower channel 
        boundaries provided as inputs
    Arguments:
      - full array of data
      - lower and upper channel values for the fit window
    Returns:
      - list of fit parameters and list of parameter errors
    '''
    points = ar(range(lower,upper))
    peak = list(array[lower:upper])
    counts = ar(peak)

    # Initialize fit parameters based on rough estimates of mean,sigma,amp,etc.
    #  - mean estimated as center of fit window - set window accordingly
    #    - double gaussian means shifted slightly in each direction
    #  - gaussian amp and expo shift estimated based on counts at left edge
    #  - expo slope determined using fit window boundaries 
    nentries = len(points)
    mean = lower + (upper - lower)/2.0
    slope = 2*(np.log(counts[-1])-np.log(counts[0]))/(points[-1]-points[0])
    pinit = [counts[0]/5.0,mean-2,5.0,counts[0]/5.0,mean+2,5.0,counts[0],slope]

    # Currently using leastsq fit from scipy
    #   - see scipy documentation for more information
    errfunc = lambda p, x, y: double_gaus_plus_exp(x,p) - y 
    pfit,pcov,infodict,errmsg,success = \
        optimize.leastsq(errfunc, pinit, args=(points,counts), \
            full_output=1, epsfcn=0.0001)

    # Calculate fit parameter uncertainties using the covariance matrix
    #  and the (fit - data) variance
    if (len(counts) > len(pinit)) and pcov is not None:
        s_sq = (errfunc(pfit, points, counts)**2).sum()/(len(counts)-len(pinit))
        pcov = pcov * s_sq
    else:
        pcov = 0

    error = [] 
    for i in range(len(pfit)):
        try:
          # This conditional is bad!! 
          # Artificially sets error to zero if it's too big - remove now!
          if np.absolute(pcov[i][i])**0.5 > np.absolute(pfit[i]):
            error.append( 0.00 )
          else:
            error.append(np.absolute(pcov[i][i])**0.5)
        except:
          error.append( 0.00 )
    pfit_leastsq = pfit
    perr_leastsq = np.array(error) 
    return pfit_leastsq, perr_leastsq 
Ejemplo n.º 11
0
def GaussFit(X,Y,d):
    
    LL = (np.abs(X-d.ROI_min)).argmin()
    UL = (np.abs(X-d.ROI_max)).argmin() + 1
    X2 = ar(X[LL:UL])
    Y2 = ar(Y[LL:UL])

    def gaus(x,a,x0,sigma):
        return a*exp(-(x-x0)**2/(2*sigma**2))

    popt,pcov = curve_fit(gaus,X2,Y2,p0=d.fit_manual)
    return LL,UL,popt
Ejemplo n.º 12
0
def peak_finder(array, lower, upper, count_offset):
    '''
    Fits gaussian + exponential to data within some window
      - fit is applied only to data within the upper/lower channel
        boundaries provided as inputs
    Arguments:
      - full array of data
      - lower and upper channel values for the fit window
      - count_offset used to correct exponential fit parameter for the fact that the fit is not starting at the left edge of the spectrum
    Returns:
      - list of fit parameters and list of parameter errors
    '''
    points = ar(range(lower, upper))
    peak = list(array[lower:upper])
    counts = ar(peak)

    # Initialize fit parameters based on rough estimates of mean,sigma,amp,etc.
    #  - mean estimated as center of fit window - set window accordingly
    #  - gaussian amp and expo shift estimated based on counts at left edge
    #  - expo slope determined using fit window boundaries
    nentries = len(points)
    mean = lower + (upper - lower) / 2.0
    slope = 2 * (np.log(counts[-1]) - np.log(counts[0])) / (points[-1] -
                                                            points[0])
    pinit = [counts[0], mean, 5.0, counts[0] * count_offset, slope]
    #print('Initial parameters: amp = {0}, mean = {1}, sigma = {2}, amp2 = {3}'.format(pinit[0],pinit[1],pinit[2],pinit[3]))

    # Currently using leastsq fit from scipy
    #   - see scipy documentation for more information
    errfunc = lambda p, x, y: gaus_plus_exp(x, p) - y
    pfit,pcov,infodict,errmsg,success = \
        optimize.leastsq(errfunc, pinit, args=(points,counts), \
            full_output=1, epsfcn=0.0001)
    #print('after parameters: amp= {0}, mean ={1}, sigma = {2}, amp2  = {3}'.format(pfit[0],pfit[1],pfit[2],pfit[3]))

    # Calculate fit parameter uncertainties using the covariance matrix
    #  and the (fit - data) variance
    if (len(counts) > len(pinit)) and pcov is not None:
        s_sq = (errfunc(pfit, points, counts)**
                2).sum() / (len(counts) - len(pinit))
        pcov = pcov * s_sq
    else:
        pcov = 0

    error = []
    for i in range(len(pfit)):
        try:
            error.append(np.absolute(pcov[i][i])**0.5)
        except:
            error.append(0.00)
    pfit_leastsq = pfit
    perr_leastsq = np.array(error)
    return pfit_leastsq, perr_leastsq
Ejemplo n.º 13
0
def edge_rebound_velocity(v, segment):
    if len(segment) > 2:
        return edge_rebound_velocity(v, divide_segment_into_points(segment))

    if segment[0][0] == segment[1][0]:
        return ar([-v[0], v[1]])
    if segment[0][1] == segment[1][1]:
        return ar([v[0], -v[1]])

    projv = projection_velocity_segment(v, segment)

    return 2 * projv - v
    '''global m_d
def peak_finder(array,lower,upper,count_offset): 
    '''
    Fits gaussian + exponential to data within some window
      - fit is applied only to data within the upper/lower channel 
        boundaries provided as inputs
    Arguments:
      - full array of data
      - lower and upper channel values for the fit window
      - count_offset used to correct exponential fit parameter for the fact that the fit is not starting at the left edge of the spectrum
    Returns:
      - list of fit parameters and list of parameter errors
    '''
    points = ar(range(lower,upper))
    peak = list(array[lower:upper])
    counts = ar(peak)

    # Initialize fit parameters based on rough estimates of mean,sigma,amp,etc.
    #  - mean estimated as center of fit window - set window accordingly
    #  - gaussian amp and expo shift estimated based on counts at left edge
    #  - expo slope determined using fit window boundaries 
    nentries = len(points)
    mean = lower + (upper - lower)/2.0 
    slope = 2*(np.log(counts[-1])-np.log(counts[0]))/(points[-1]-points[0])
    pinit = [counts[0],mean,5.0,counts[0]*count_offset,slope]
    #print('Initial parameters: amp = {0}, mean = {1}, sigma = {2}, amp2 = {3}'.format(pinit[0],pinit[1],pinit[2],pinit[3]))

    # Currently using leastsq fit from scipy
    #   - see scipy documentation for more information
    errfunc = lambda p, x, y: gaus_plus_exp(x,p)-y
    pfit,pcov,infodict,errmsg,success = \
        optimize.leastsq(errfunc, pinit, args=(points,counts), \
            full_output=1, epsfcn=0.0001)
    #print('after parameters: amp= {0}, mean ={1}, sigma = {2}, amp2  = {3}'.format(pfit[0],pfit[1],pfit[2],pfit[3]))
    
    # Calculate fit parameter uncertainties using the covariance matrix
    #  and the (fit - data) variance
    if (len(counts) > len(pinit)) and pcov is not None:
        s_sq = (errfunc(pfit, points, counts)**2).sum()/(len(counts)-len(pinit))
        pcov = pcov * s_sq
    else:
        pcov = 0

    error = [] 
    for i in range(len(pfit)):
        try:
          error.append(np.absolute(pcov[i][i])**0.5)
        except:
          error.append( 0.00 )
    pfit_leastsq = pfit
    perr_leastsq = np.array(error) 
    return pfit_leastsq, perr_leastsq 
Ejemplo n.º 15
0
def singlePeakGaussianFit(xs, ys):
    dataX = [float(s) for s in xs]
    dataY = [float(s) for s in ys]

    def gaus(x, a, x0, sigma):
        return a * exp(-(x - x0) ** 2 / (2 * sigma ** 2))

    x = ar(dataX)
    y = ar(dataY)
    mean = sum(x * y) / sum(y)
    sigma = np.sqrt(sum(y * (x - mean) ** 2) / sum(y))
    popt, pcov = curve_fit(gaus, x, y, p0=[1, mean, sigma])
    result = [i for i in popt]
    return result
Ejemplo n.º 16
0
 def linear_line(self, seq):
     popt = [0, 0, 0]
     pcov = [100, 100, 100]
     perr = [100, 100, 100]
     try:
         row_num = len(seq)
         X_ = ar(range(row_num), dtype='float32')
         X_ = X_.reshape((row_num, 1))
         x = maxminnorm2(X_)
         x = np.ravel(x)
         Y_ = seq.reshape((row_num, 1))
         y = maxminnorm2(Y_)
         y = np.ravel(y)
         popt, pcov = curve_fit(self.linear_line_func,
                                x,
                                y,
                                p0=[3, 1],
                                maxfev=10000)
         #popt, pcov = curve_fit(self.linear_line_func, x, y, p0=[3, 1], maxfev=10000, bounds=(-1000,[np.inf,np.inf]))
         perr = np.sqrt(np.diag(pcov))
         #print perr
         #print "-------------"
         #print popt
         #print "333-------------"
         #plt.plot(x, seq, 'b+:', label='data')
         #plt.plot(x, my_scipy.__linear_line_func(x, *popt), 'ro:', label='fit')
         #plt.legend()
         #plt.show()
     except:
         traceback.print_exc()
     finally:
         if math.isinf(perr[0]) or math.isnan(perr[0]):
             return popt, [100, 100, 100]
         return popt, perr
Ejemplo n.º 17
0
    def quadraticCurve(self, seq):
        popt = [0, 0, 0]
        pcov = [100, 100, 100]
        perr = [100, 100, 100]
        try:
            row_num = len(seq)
            X_ = ar(range(row_num), dtype='float32')
            X_ = X_.reshape((row_num, 1))
            x = maxminnorm(X_)
            x = np.ravel(x)

            Y_ = seq.reshape((row_num, 1))
            y = maxminnorm(Y_)
            y = np.ravel(y)

            popt, pcov = curve_fit(self.quadraticCurve_func,
                                   x,
                                   y,
                                   p0=[3, 4, 3],
                                   maxfev=1000)
            perr = np.sqrt(np.diag(pcov))

        #plt.plot(x, seq, 'b+:', label='data')
        #plt.plot(x, my_scipy.__quadraticCurve_func(x, *popt), 'ro:', label='fit')
        #plt.legend()
        #plt.show()
        except:
            traceback.print_exc()
        finally:
            if math.isinf(perr[0]) or math.isnan(perr[0]):
                return popt, [100, 100, 100]

            return popt, perr
Ejemplo n.º 18
0
def labels_for_shape(points):

    lbls = []
    x_len = max(points[:, 0]) - min(points[:, 0])
    y_len = max(points[:, 1]) - min(points[:, 1])

    if y_len > x_len:
        max_len = y_len
    else:
        max_len = x_len
    centroid = (np.mean(points[:, 0]), np.mean(points[:, 1]))
    print(centroid)
    delta = max_len / 5

    for i in range(len(points) - 1):

        temp_lbls = find_label_point(points[i], points[i + 1], delta)
        lbl_dists = (np.sqrt(((temp_lbls[0][0] - centroid[0])**2) +
                             ((temp_lbls[0][1] - centroid[1])**2)),
                     np.sqrt(((temp_lbls[1][0] - centroid[0])**2) +
                             ((temp_lbls[1][1] - centroid[1])**2)))
        if lbl_dists[0] > lbl_dists[1]:
            lbls.append(temp_lbls[0])
        else:
            lbls.append(temp_lbls[1])

        #for lbl in temp_lbls:
        #if is_in_shape([lbl],points) == False:
        #    lbls.append(lbl)

    return ar(lbls)
def get_cell_stats(profiles):
    diffs = []
    mus = []
    sigmas = []
    for i in range(len(profiles)):
        print(i)
        diffs.append(np.nanmax(profiles[i]) - np.nanmin(profiles[i]))
        x = ar(range(len(profiles[i])))
        y = profiles[i]
        # weighted arithmetic mean (corrected - check the section below)
        mean = np.nansum(x * y) / np.nansum(y)
        sigma = np.sqrt(np.nansum(y * (x - mean)**2) / np.nansum(y))

        def Gauss(x, a, x0, sigma):
            return a * np.exp(-(x - x0)**2 / (2 * sigma**2))

        try:
            popt, pcov = curve_fit(Gauss, x, y, p0=[np.nanmax(y), mean, sigma])
            mx, mu, sigma = popt
        except Exception as e:
            print(e)
            mu, sigma = np.nan, np.nan
        mus.append(mu)
        sigmas.append(sigma)

    diffs = np.asarray(diffs)
    mus = np.asarray(mus)
    sigmas = np.asarray(sigmas)
    return diffs, mus, sigmas
Ejemplo n.º 20
0
		def GetCCS(x, y):
			x2 = [1/value for value in x]	
			xaxis = ar(x2)
			yaxis = ar(y)	
			slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(xaxis, yaxis)		
			K = (25.05**2)/(slope/1000)			
			K0 = K*(273.15/T)*(P/760)			
			Mass = content * Charge			
			reducedMass = (Mass*Drift_Mass)/(Mass+Drift_Mass)
			CCS = (Charge/K0)*((1/(reducedMass*T))**0.5)*18495.88486	
			error = (std_err/slope)*CCS
			error_CCS = (error/CCS)*100	
			if math.isnan(CCS) == True or math.isnan(error_CCS) == True:
				return str(False), str(False)
			else:
				return CCS, error_CCS
Ejemplo n.º 21
0
def plot_shape_byedge(shape, lbl_points, lbls):
    fig = plt.figure(figsize=(4, 4))
    ax = fig.add_subplot(111)
    for edge in shape:
        plt.plot(edge[:, 0], edge[:, 1], color='#1f77b4', linewidth=4)

    for i in range(len(lbls)):
        plt.text(lbl_points[i][0],
                 lbl_points[i][1],
                 lbls[i],
                 horizontalalignment='center',
                 verticalalignment='center',
                 fontsize='xx-large')

    points = []
    for sh in shape:
        points.append(sh[0])
        points.append(sh[1])
    points = ar(points)

    delta = (max(points[:, 0]) - min(points[:, 0])) / 100

    x_min = min(points[:, 0]) - 2
    x_max = max(points[:, 0]) + 2
    y_min = min(points[:, 1]) - 2
    y_max = max(points[:, 1]) + 2

    plt.xlim(x_min, x_max)
    plt.ylim(y_min, y_max)
    ax.set_aspect('equal', adjustable='box')
    plt.axis('off')

    return fig
Ejemplo n.º 22
0
def ball_ball_rebound_velocity(ball1, ball2):
    center_line = ar([ball1.coords, ball2.coords])
    vprojection = projection_velocity_segment(ball2.v, center_line)
    m1 = ball1.mass
    m2 = ball2.mass
    v1 = ball1.v
    v2 = vprojection  #ball2.v
    return ((m1 - m2) * v1 + 2 * m2 * v2) / (m1 + m2)
Ejemplo n.º 23
0
def get_double_peaks(rows,
                     number,
                     n=1,
                     lower_limit=240,
                     upper_limit=300,
                     make_plot=False):
    entries = 12 * n
    days = (24 / n)
    i = 0
    counter = 0
    means = []
    sigmas = []
    amps = []
    while i < number * days:
        if counter < days:
            integration = rows[(i * entries) + 1:((i + 1) * entries) + 1]
            array_lst = []
            for j in integration:
                array_lst.append(make_array(j))

            integrated = sum(array_lst)
            #print integrated
            fit_pars, fit_errs = double_peak_finder(integrated, lower_limit,
                                                    upper_limit)
            mean = [fit_pars[1], fit_errs[1]]
            sigma = [fit_pars[2], fit_errs[2]]
            amp = [fit_pars[0], fit_errs[0]]
            if fit_pars[4] > fit_pars[1]:
                mean = [fit_pars[4], fit_errs[4]]
                sigma = [fit_pars[5], fit_errs[5]]
                amp = [fit_pars[3], fit_errs[3]]
            means.append(mean)
            sigmas.append(sigma)
            amps.append(amp)

            counter += 1
            i += 1
            if make_plot:
                fig = plt.figure()
                fig.patch.set_facecolor('white')
                plt.title('Spectra integrated over a day')
                plt.xlabel('channels')
                plt.ylabel('counts')
                plt.xlim(1, 500)
                x = ar(range(0, len(integrated)))
                plt.plot(x, integrated, 'b:', label='data')
                plt.plot(x,
                         double_gaus_plus_exp(x, fit_pars),
                         'ro:',
                         label='fit')
                plt.legend()
                plt.yscale('log')
                plt.show()
        else:
            counter = 0
    counter = 0

    return means, sigmas, amps
Ejemplo n.º 24
0
def circle_measure(circ_or_area, r_or_d, backwards):
    # plotting
    radius=random.randint(1,20)
    diam=2*radius
    circum=diam*math.pi
    area = math.pi * (radius**2)

    fig = plt.figure(figsize =(2,1))

    ax = fig.add_subplot(111)

    ax.set_aspect('equal')
    plt.axis('off')
    plt.xlim(-2,2)
    plt.ylim(-1.2,1.2)

    circle_r = 1
    circle_x = np.arange(-1.1,1.1,.0001)
    circle_y = np.sqrt(circle_r**2-np.square(circle_x))

    circle_x = np.append(circle_x, circle_x)
    circle_y = np.append(circle_y, -1*circle_y)

    n = random.randint(0,len(circle_x))
    if r_or_d == 0:
        rx = [0, circle_x[n]]
        ry = [0, circle_y[n]]
    else:
        rx = [circle_x[n], -1*circle_x[n]]
        ry = [circle_y[n], -1*circle_y[n]]

    r = [[rx[0],ry[0]],[rx[1],ry[1]]]
    label_points = labels_for_shape(ar(r))

    plt.plot(circle_x, circle_y, color = '#1f77b4', linewidth = 1.5, solid_capstyle='round')
    plt.plot(rx, ry, color = '#1f77b4', linewidth = 1.5,linestyle='--', solid_capstyle='round')
    if r_or_d == 0:
        plt.text(label_points[0][0],label_points[0][1],str(radius), horizontalalignment='center', verticalalignment='center', fontsize = 'small')
    else:
        plt.text(label_points[0][0],label_points[0][1],str(diam), horizontalalignment='center', verticalalignment='center', fontsize = 'small')
    
    if circ_or_area == 0:
         plt.text(-2,0.8,"C=?", fontsize = 'large')
         ans = round(circum,2)
    else:
         plt.text(-2,0.8,"A=?", fontsize = 'large')
         ans = round(area,2)


    #fig.tight_layout()
    r = random.randint(0, 999999999999999)
    fn = 'temp_img/temp' + str(r) + '.png'

    fig.savefig('media/' + fn, pad_inches=0.1, dpi=300, transparent=True, bbox_inches='tight')

    
    #return fig
    return fn, ans
Ejemplo n.º 25
0
def get_peaks2(rows, number=1, n=1, lower_limit=900, upper_limit=1020, make_plot = False,count_offset=100): 
    '''
    This is for Tl-208
    Applies  gaussian + const fits to all data over some range of time
    Arguments:
      - full list of csv data input rows
      - number of days to run over
      - number of hours to integrate each calculation over
      - lower,upper limits for fit windows
      - flag to plot each fit for diagnostics
      - count offset correction to fit parameters based on peak position
          (peaks farther from the left edge of spectrum need bigger correction)
    Returns:
      - lists of means,sigmas,amps from all gaussian fits
        - each entry in list includes the value and uncertainty
    '''
    entries = 12*n
    days = (24/n)
    print('making {} plots for each day'.format(days))
    i = 0
    counter = 0
    means = []
    sigmas = []
    amps = []
    while i < number*days:
        if counter < days:
            integration = rows[(i*entries)+1:((i+1)*entries)+1]
            array_lst = [] 
            for j in integration:
                array_lst.append(make_array(j,12))

            integrated = sum(array_lst)
            #print integrated
            fit_pars,fit_errs = peak_finder(integrated,lower_limit,upper_limit,count_offset)
            means.append([fit_pars[1],fit_errs[1]])
            sigmas.append([fit_pars[2],fit_errs[2]])
            amps.append([fit_pars[0],fit_errs[0]])

            counter +=1 
            i+=1
            if make_plot:
                fig = plt.figure()
                fig.patch.set_facecolor('white')
                plt.title('Spectra integrated over a day')
                plt.xlabel('channels')
                plt.ylabel('counts')
                plt.xlim(1,1000)
                #plt.ylim()
                x = ar(range(0,len(integrated)))
                plt.plot(x,integrated,'b:',label='data')
                plt.plot(x,gaus_plus_const(x,fit_pars),'ro:',label='fit')
                plt.legend()
                plt.yscale('log')
                plt.show()
        else:
            counter = 0
    counter = 0
    return means,sigmas,amps
Ejemplo n.º 26
0
def compose_normal_line(coefs, optcoords=None):
    if type(optcoords) != type(None):
        ss = compose_normal_line(coefs)
        c = 0 - ss[0] * optcoords[0] - ss[1] * optcoords[1]
        return ar([ss[0], ss[1], c])

    else:
        if coefs[0] == 0:
            return ar([1, 0, 0])
        if coefs[1] == 0:
            return ar([0, 1, 0])
        else:
            if coefs[1] != 1:
                return compose_normal_line(ar(coefs) / coefs[1])

            a = -1 / coefs[0]
            b = coefs[1]
            return ar([a, b, 0])
Ejemplo n.º 27
0
def get_peaks(rows,
              number=1,
              n=1,
              lower_limit=240,
              upper_limit=300,
              make_plot=False,
              count_offset=100):
    '''
    Gets single gaussian peaks in the specified window
    number is the number of days of spectras to go through
    n is the number of hours that each spectra is integrated over 
    lower_limit, upper_limit set the window to look for a peak inside
    '''
    entries = 12 * n
    days = (24 / n)
    print('making {} plots for each day'.format(days))
    i = 0
    counter = 0
    means = []
    sigmas = []
    amps = []
    while i < number * days:
        if counter < days:
            integration = rows[(i * entries) + 1:((i + 1) * entries) + 1]
            array_lst = []
            for j in integration:
                array_lst.append(make_array(j))

            integrated = sum(array_lst)
            #print integrated
            fit_pars, fit_errs = peak_finder(integrated, lower_limit,
                                             upper_limit, count_offset)
            means.append([fit_pars[1], fit_errs[1]])
            sigmas.append([fit_pars[2], fit_errs[2]])
            amps.append([fit_pars[0], fit_errs[0]])

            counter += 1
            i += 1
            if make_plot:
                fig = plt.figure()
                fig.patch.set_facecolor('white')
                plt.title('Spectra integrated over a day')
                plt.xlabel('channels')
                plt.ylabel('counts')
                plt.xlim(1, 500)
                #plt.ylim()
                x = ar(range(0, len(integrated)))
                plt.plot(x, integrated, 'b:', label='data')
                plt.plot(x, gaus_plus_exp(x, fit_pars), 'ro:', label='fit')
                plt.legend()
                plt.yscale('log')
                plt.show()
        else:
            counter = 0
    counter = 0
    return means, sigmas, amps
def getDistanceData():
    Q = []
    g = 'BoundaryDistanceArray.csv'
    with open('{}'.format(g), 'rb') as mycvsfile:
        thedatareader = csv.reader(mycvsfile)
        for row in thedatareader:
            Q.append(row)
    A = Q
    A = ar(A)
    A = A.astype(numpy.float)
    return A
Ejemplo n.º 29
0
    def rotate(self, angle=3.14159 / 4):
        # Calculating the center
        x = 0
        y = 0
        vertex_count = 0

        for point in self.vertex_list:
            x += point[0]
            y += point[1]
            vertex_count += 1

        x = x / vertex_count
        y = y / vertex_count

        center = (x, y)

        # Rotating
        self.vertex_list = dot(
            ar(self.vertex_list) - center,
            ar([[cos(angle), sin(angle)], [-sin(angle),
                                           cos(angle)]])) + center
Ejemplo n.º 30
0
def importFile(fileLoc):
    '''
    This code will open up the txt files containing the data.
    Parameters:
            fileLoc - the file location as a string
    Returns:
            two arrays of integers:
                the first array (channel) is uncalibrated energy 
                    channel
                the second array (counts) is number of counts
    '''

    with open(fileLoc, 'r') as data:
        counts = []
        channel = []
        linenum = 0
        for line in data:
            n = ""
            for char in line:
                if char != '\n' and char != ' ':
                    n += char
            counts.append(n)
            linenum += 1
        for i in range(linenum - 28):
            channel.append(i)
        counts = counts[12:2060]
        i = 0
        for num in counts:
            num = int(num)
            counts[i] = num
            i += 1
        channels = ar(channel)
        counts = ar(counts)
        for i in range(
                300
        ):  #replacing counts on low end channels with all 0s to remove noise
            counts[i] = 0

    return channel, counts
Ejemplo n.º 31
0
def get_away_direction(ball1, ball2):
    central_segment = ar([ball1.coords, ball2.coords])

    if ball1.coords[0] < ball2.coords[0]:
        direction = -1
    elif ball1.coords[0] > ball2.coords[0]:
        direction = 1
    elif ball1.coords[1] < ball2.coords[1]:
        direction = -1
    else:
        direction = 1

    return get_unary_vector(compose_equation_line(central_segment), direction)
Ejemplo n.º 32
0
def shortest_segment_point_segment(point, segment):
    if len(segment) > 2:
        return shortest_segment_point_segment(
            point, divide_segment_into_points(segment))
    try:
        if len(point[0]) > 1:
            return shortest_segment_point_segment(segment, point)
    except:
        pass
    vect1 = segment[0] - point
    vect2 = segment[1] - point

    try:
        if cos_vect(vect1, segment[0] - segment[1]) <= 0:
            return ar([point, segment[0]])
        if cos_vect(vect2, segment[1] - segment[0]) <= 0:
            return ar([point, segment[1]])
    except:
        a = 9
    normal = compose_normal_line(compose_equation_line(segment), point)
    return ar(
        [point,
         find_intersection(normal, compose_equation_line(segment))])
Ejemplo n.º 33
0
def main(in_fid):
    big_list = make_big_list(in_fid)
    
    test_list = [float(x) for x in big_list]
    print test_list
    m,s = norm.fit(test_list)
    n = len(test_list) 
    x = ar(range(n))
    y = ar(test_list)
    """
    n = len(test_list) 
    x = ar(range(n))
    y = ar(test_list)
    mean = sum(x * y) / n 
    sigma = np.sqrt(sum(y * (x - mean)**2) / n)       

    #popt ,pcov = curve_fit(gaus, x, y, p0=[max(y), mean, sigma]) 
    """
    popt ,pcov = curve_fit(gaus, x, y, p0=[203, m, s]) 
    print popt 
    #plt.plot(x, y)
    plt.plot(x,gaus(x,*popt),'ro:',label='fit')
    plt.show()
Ejemplo n.º 34
0
import pylab as plb
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy import asarray as ar,exp

x = ar(range(10)) # [0,1,2,3,4,5,6,7,8,9]
y = ar([0,1,2,3,4,5,4,3,2,1])

n = len(x)                          #the number of data
mean = sum(x*y)/n                   #note this correction
suma = sum(y*z)
sigma = sum(y*(x-mean)**2)/n        #note this correction


def gaus(x,a,x0,sigma):
    return a*exp(-(x-x0)**2/(2*sigma**2))

popt,pcov = curve_fit(gaus,x,y,p0=[1,mean,sigma])

resultado = gaus(3, 1, 2, 4)

print 'x:', x
print 'suma', suma

"""
plt.plot(x,y,'b+:',label='data')
plt.plot(x,gaus(x,*popt),'ro:',label='fit')
plt.legend()
plt.title('Fig. 3 - Fit for Time Constant')
plt.xlabel('Time (s)')
plt.ylabel('Voltage (V)')
def get_double_peaks(rows, number, n=1, lower_limit=480, upper_limit=600, make_plot = False):
    '''
    Applies double gaussian + expo fits to all data over some range of time
    Arguments:
      - full list of csv data input rows
      - number of days to run over
      - number of hours to integrate each calculation over
      - lower,upper limits for fit windows
      - flag to plot each fit for diagnostics
    Returns:
      - list of means,sigmas,amps for second gaussian in fit 
        - that's the Bi peak, so this is hard coded to work for a specific case
        - each entry in list includes the value and uncertainty
    '''
    entries = 12*n
    days = (24/n)
    i = 0
    counter = 0
    means = []
    sigmas = []
    amps = []
    while i < number*days:
        if counter < days:
            integration = rows[(i*entries)+1:((i+1)*entries)+1]
            array_lst = [] 
            for j in integration:
                array_lst.append(make_array(j,12))

            integrated = sum(array_lst)
            #print integrated
            fit_pars, fit_errs = double_peak_finder(integrated,lower_limit,upper_limit)
            mean = [fit_pars[1],fit_errs[1]]
            sigma = [fit_pars[2],fit_errs[2]]
            amp = [fit_pars[0],fit_errs[0]]
            if fit_pars[4] > fit_pars[1]:
                mean = [fit_pars[4],fit_errs[4]]
                sigma = [fit_pars[5],fit_errs[5]]
                amp = [fit_pars[3],fit_errs[3]]
            means.append(mean)
            sigmas.append(sigma)
            amps.append(amp)

            counter+=1 
            i+=1
            if make_plot:
                fig = plt.figure()
                fig.patch.set_facecolor('white')
                plt.title('Spectra integrated over a day')
                plt.xlabel('channels')
                plt.ylabel('counts')
                plt.xlim(1,1000)
                x = ar(range(0,len(integrated)))
                plt.plot(x,integrated,'b:',label='data')
                plt.plot(x,double_gaus_plus_exp(x,fit_pars),'ro:',label='fit')
                plt.legend()
                plt.yscale('log')
                plt.show()
        else:
            counter = 0
    counter = 0

    return means, sigmas, amps
Ejemplo n.º 36
0
import pylab as plb
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy import asarray as ar, exp
from math import sqrt

x = ar(range(10))
y = ar([0, 1, 2, 3, 4, 5, 4, 3, 2, 1])

n = len(x)  # the number of data
mean = sum(x * y) / n  # note this correction
sigma = sqrt(sum(y * (x - mean) ** 2) / n)  # note this correction


def gaus(x, a, x0, sigma):
    return a * exp(-(x - x0) ** 2 / (2 * sigma ** 2))


popt, pcov = curve_fit(gaus, x, y, p0=[1, mean, sigma])

plt.plot(x, y, "b+:", label="data")
plt.plot(x, gaus(x, *popt), "ro:", label="fit")
plt.legend()
plt.title("Fig. 3 - Fit for Time Constant")
plt.xlabel("Time (s)")
plt.ylabel("Voltage (V)")
plt.show()
Ejemplo n.º 37
0
def main():
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s [%(levelname)s] %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')

    logging.info('*** Starting: Compute STR ***')

    db = connect()
    components_to_compute = get_components_to_compute(db)

    mov_stack = get_config(db, "mov_stack")
    if mov_stack.count(',') == 0:
        mov_stacks = [int(mov_stack), ]
    else:
        mov_stacks = [int(mi) for mi in mov_stack.split(',')]

    goal_sampling_rate = float(get_config(db, "cc_sampling_rate"))
    maxlag = float(get_config(db, "maxlag"))
    export_format = get_config(db, 'export_format')
    if export_format == "BOTH":
        extension = ".MSEED"
    else:
        extension = "."+export_format

    # First we reset all DTT jobs to "T"odo if the REF is new for a given pair
    # for station1, station2 in get_station_pairs(db, used=True):
    #     sta1 = "%s.%s" % (station1.net, station1.sta)
    #     sta2 = "%s.%s" % (station2.net, station2.sta)
    #     pair = "%s:%s" % (sta1, sta2)
    #     if is_dtt_next_job(db, jobtype='DTT', ref=pair):
    #         logging.info(
    #           "We will recompute all STR based on the new REF for %s" % pair)
    #         reset_dtt_jobs(db, pair)
    #         update_job(db, "REF", pair, jobtype='DTT', flag='D')

    filters = get_filters(db, all=False)
    # Then we compute the jobs
    while is_dtt_next_job(db, flag='T', jobtype='MWCS'):
        jobs = get_dtt_next_job(db, flag='T', jobtype='MWCS')

        if not len(jobs):
            # edge case, should only occur when is_next returns true, but
            # get_next receives no jobs (heavily parallelised calls).
            time.sleep(np.random.random())
            continue
        pair = jobs[0].pair
        refs, days = zip(*[[job.ref, job.day] for job in jobs])

        logging.info(
            "There are MWCS jobs for some days to recompute for %s" % pair)
        
        ref_name = pair.replace('.', '_').replace(':', '_')
        sta1, sta2 = pair.split(':')
        station1 = sta1.split(".")
        station2 = sta2.split(".")

        station1 = get_station(db, station1[0], station1[1])
        station2 = get_station(db, station2[0], station2[1])

        dtt_lag = get_config(db, "dtt_lag")
        dtt_v = float(get_config(db, "dtt_v"))
        dtt_minlag = float(get_config(db, "dtt_minlag"))
        dtt_width = float(get_config(db, "dtt_width"))
        dtt_sides = get_config(db, "dtt_sides")

        if dtt_lag == "static":
            minlag = dtt_minlag
        else:
            minlag = get_interstation_distance(station1, station2, station1.coordinates) / dtt_v

        maxlag2 = minlag + dtt_width
        print("betweeen", minlag, "and", maxlag2)

        for f in get_filters(db, all=False):
            filterid = int(f.ref)

            for mov_stack in mov_stacks:
                for components in components_to_compute:
                        rf = os.path.join("STACKS", "%02i" %
                                          filterid, "REF", components, ref_name + extension)
                        if os.path.isfile(rf):
                            ref = read(rf)[0].data
                            mid = int(goal_sampling_rate*maxlag)
                            ref[mid-int(minlag*goal_sampling_rate):mid+int(minlag*goal_sampling_rate)] *= 0.
                            ref[:mid-int(maxlag2*goal_sampling_rate)] *= 0.
                            ref[mid+int(maxlag2*goal_sampling_rate):] *= 0.
                        alldays = []
                        alldeltas = []
                        allcoefs = []
                        allerrs = []
                        str_range = 0.5  ### HARD CODE!!! ###
                        nstr = 1001  ### HARD CODE!!! ###
                        ref_stretched, deltas = stretch_mat_creation(ref,
                                                                     str_range=str_range,
                                                                     nstr=nstr)
                        for day in days:
                            df = os.path.join(
                                "STACKS", "%02i" % filterid, "%03i_DAYS" %
                                mov_stack, components, ref_name, str(day) + extension)

                            if os.path.isfile(df):
                                cur = read(df)[0].data   ### read the current mseed file ###
                                cur[mid-int(minlag*goal_sampling_rate):mid+int(minlag*goal_sampling_rate)] *= 0.
                                cur[:mid-int(maxlag2*goal_sampling_rate)] *= 0.
                                cur[mid+int(maxlag2*goal_sampling_rate):] *= 0.  ### replace with zeroes at all
                                                                                 ### times outside minlag to maxlag
                                logging.debug(
                                    'Processing Stretching for: %s.%s.%02i - %s - %02i days' %
                                    (ref_name, components, filterid, day, mov_stack))

                                coeffs =[]
                                for i in range(ref_stretched.shape[0]):
                                    ci = np.corrcoef(cur,ref_stretched[i])[0,1]
                                    coeffs.append(ci)

                                tday = datetime.datetime.strptime(day, "%Y-%m-%d")
                                alldays.append(tday)
                                alldeltas.append(deltas[np.argmax(coeffs)])
                                allcoefs.append(np.max(coeffs))

                                ###### gaussian fit ######
                                def gauss_function(x, a, x0, sigma):
                                    return a*np.exp(-(x-x0)**2/(2*sigma**2))
                                x = ar(range(len(coeffs)))
                                ymax_index = coeffs.index(np.max(coeffs))
                                ymin = np.min(coeffs)
                                coeffs_shift = []
                                for i in coeffs:
                                    i += np.absolute(ymin) # make all points above zero
                                    coeffs_shift.append(i)
                                n = len(coeffs)
                                x0 = sum(x)/n
                                sigma = (sum((x-x0)**2)/n)**0.5
                                try:
                                    popt, pcov = curve_fit(gauss_function, x, coeffs_shift, [ymax_index, x0, sigma])
                                    FWHM = 2 * ((2*np.log(2))**0.5)*popt[2] # convert sigma (popt[2]) to FWHM
                                    error = FWHM / 2  ### error is half width at full maximum
                                except RuntimeError:
                                    error = np.nan # gaussian fit failed

                                allerrs.append(error)

                        df = pd.DataFrame(np.array([alldeltas,allcoefs,allerrs]).T, index=alldays, columns=["Delta", "Coeff", "Error"],)
                        output = os.path.join('STR', "%02i" % filterid, "%03i_DAYS" % mov_stack, components)
                        if not os.path.isdir(output):
                            os.makedirs(output)
                        df.to_csv(os.path.join(output, "%s.csv" % ref_name), index_label="Date")

        # THIS SHOULD BE IN THE API
        updated = False
        mappings = [{'ref': job.ref, 'flag': "D"} for job in jobs]
        while not updated:
            try:
                db.bulk_update_mappings(Job, mappings)
                db.commit()
                updated = True
            except:
                time.sleep(np.random.random())
                pass

    logging.info('*** Finished: Compute STR ***')