Пример #1
0
    def get_mouse_running_peak(self):
        """ determines the time point of the main running activity of the mouse
        """
        # get the velocity of the mouse and replace nan with zeros
        velocity = self.get_mouse_track_data('velocity')
        velocity = np.nan_to_num(velocity)

        # calculate scalar speed
        speed = np.hypot(velocity[:, 0], velocity[:, 1])

        # get smoothing range
        sigma = self.params['mouse/activity_smoothing_interval']

        # compress the data by averaging over consecutive windows
        window = max(1, int(sigma/100))
        if window > 1:
            end = int(len(velocity) / window) * window
            speed = speed[:end].reshape((-1, window)).mean(axis=1)

        # do some Gaussian smoothing to get rid of fluctuations
        filters.gaussian_filter1d(speed, sigma/window, mode='constant', cval=0,
                                  output=speed)
        
        # determine the time point of the maximal rate
        return np.argmax(speed) * window * self.time_scale
Пример #2
0
    def get_burrow_peak_activity(self, burrow_track):
        """ determines the time point of the main burrowing activity for the
        given burrow """
        if burrow_track is None:
            return None
        times = burrow_track.times
        assert is_equidistant(times)
        time_delta =  (times[-1] - times[0])/(len(times) - 1)

        # ignore the initial frames
        start = int(self.params['burrows/activity_ignore_interval']/time_delta)
        times = times[start:]
        
        if len(times) == 0:
            return None 
        
        # collect all the burrow areas
        areas = np.array([burrow.area
                          for burrow in burrow_track.burrows[start:]])
        
        # do some Gaussian smoothing to get rid of fluctuations
        sigma = self.params['burrows/activity_smoothing_interval']/time_delta
        filters.gaussian_filter1d(areas, sigma, mode='nearest', output=areas)
        
        # calculate the rate of area increase
        area_rate = np.gradient(areas)
         
        # determine the time point of the maximal rate
        return times[np.argmax(area_rate)] * self.time_scale
def visualize_energy(y):
    """Effect that expands from the center with increasing sound energy"""
    global p
    y = np.copy(y)
    gain.update(y)
    y /= gain.value
    # Scale by the width of the LED strip
    y *= float((config.N_PIXELS // 2) - 1)
    # Map color channels according to energy in the different freq bands
    scale = 0.9
    r = int(np.mean(y[:len(y) // 3]**scale))
    g = int(np.mean(y[len(y) // 3: 2 * len(y) // 3]**scale))
    b = int(np.mean(y[2 * len(y) // 3:]**scale))
    # Assign color to different frequency regions
    p[0, :r] = 255.0
    p[0, r:] = 0.0
    p[1, :g] = 255.0
    p[1, g:] = 0.0
    p[2, :b] = 255.0
    p[2, b:] = 0.0
    p_filt.update(p)
    p = np.round(p_filt.value)
    # Apply substantial blur to smooth the edges
    p[0, :] = gaussian_filter1d(p[0, :], sigma=4.0)
    p[1, :] = gaussian_filter1d(p[1, :], sigma=4.0)
    p[2, :] = gaussian_filter1d(p[2, :], sigma=4.0)
    # Set the new pixel value
    return np.concatenate((p[:, ::-1], p), axis=1)
 def filter(self, field3D):
     #return field3D - 1
     result = gaussian_filter1d(field3D, self.sigma, axis = 0)
     result = gaussian_filter1d(result, self.sigma, axis = 1)
     result = gaussian_filter1d(result, self.sigma, axis = 2)
     #print "Are they the same?", (result == field3D) 
     return result
Пример #5
0
def gre_percentile_vs_actual():
  cc = Counter(data['gre_quant'][data['is_new_gre'] == True])
  cc1 = Counter(data['gre_verbal'][data['is_new_gre'] == True])
  cum_q = [i[1] for i in sorted([(j, cc[j]) for j in cc],key= lambda x:x[0])]
  cum_v = [i[1] for i in sorted([(j, cc1[j]) for j in cc1],key= lambda x:x[0])]
  perc_q = [100*float(sum(cum_q[:i]))/sum(cum_q) for i in xrange(len(cum_q))]
  perc_v = [100*float(sum(cum_v[:i]))/sum(cum_v) for i in xrange(len(cum_v))]
  quant_act = [98, 97, 95, 94, 93, 90, 88, 86, 83, 80, 78, 75, 71, 68, 64, 60, 56, 52, 48, 45, 40, 37, 32, 28, 25, 21, 18, 15, 12, 10, 8, 6, 4, 3, 2, 2, 1, 1, 0, 0, 0][::-1]
  verb_act = [99, 99, 98, 97, 96, 95, 94, 92, 90, 87, 85, 81, 79, 74, 71, 67, 63, 59, 54, 50, 45, 41, 37, 33, 29, 25, 22, 18, 16, 13, 10, 8, 7, 5, 3, 3, 2, 1, 1, 1, 0][::-1]
  act_q_dist = ([100 - quant_act[-1]] + [(quant_act[-i]-quant_act[-i-1]) for i in xrange(1,len(quant_act))])[::-1]
  act_q_dist = gaussian_filter1d(map(float,act_q_dist),1)
  gc_q_dist = ([100 - perc_q[-1]] + [(perc_q[-i]-perc_q[-i-1]) for i in xrange(1,len(perc_q))])[::-1]
  act_v_dist = ([100 - verb_act[-1]] + [(verb_act[-i]-verb_act[-i-1]) for i in xrange(1,len(verb_act))])[::-1]
  act_v_dist = gaussian_filter1d(map(float,act_v_dist),1)
  gc_v_dist = ([100 - perc_v[-1]] + [(perc_v[-i]-perc_v[-i-1]) for i in xrange(1,len(perc_v))])[::-1]
  current_palette = sns.color_palette()
  gc_v_vals = plot_from_prob(range(130,171), gc_v_dist, 'Grad School Applicants (Reported)', current_palette[0])
  act_v_vals = plot_from_prob(range(130,171), act_v_dist, 'All GRE Test Takers', current_palette[1])
  plt.xlim((130,170))
  plt.ylabel('Probability')
  plt.xlabel('GRE Verbal')
  plt.title('GRE Quantitative Difference amongst Grad School Applicants and All Test Takers')
  plt.legend()
  plt.show()
  gc_q_vals = plot_from_prob(range(130,171), gc_q_dist, 'Grad School Applicants (Reported)', current_palette[2])
  act_q_vals = plot_from_prob(range(130,171), act_q_dist, 'All GRE Test Takers', current_palette[3])
  plt.xlim((130,170))
  plt.ylabel('Probability')
  plt.xlabel('GRE Quantitative')
  plt.title('GRE Quantitative Difference amongst Grad School Applicants and All Test Takers')
  plt.legend()
  plt.show()
    def buildParameters(self, npts, strat_all, sealevel):

        # Calculate cross-section parameters
        shoreID = np.zeros(npts)
        accom_shore = np.zeros(npts)
        sed_shore = np.zeros(npts)
        depoend = np.zeros(npts)

        shoreID[0], accom_shore[0], sed_shore[0], depoend[0] = self._buildShoreline(cs = strat_all[0],
                                            cs_b = strat_all[0], sealevel = sealevel[0], sealevel_b = sealevel[0])

        for i in range(1,npts):
            shoreID[i], accom_shore[i], sed_shore[i], depoend[i],  = self._buildShoreline(cs = strat_all[i], cs_b = strat_all[i-1], sealevel = sealevel[i], sealevel_b = sealevel[i-1])

        self.shoreID = shoreID
        self.accom_shore = accom_shore
        self.sed_shore = sed_shore
        self.depoend = depoend

        # Gaussian smooth
        self.shoreID_gs = filters.gaussian_filter1d(shoreID,sigma=1)
        self.accom_gs = filters.gaussian_filter1d(accom_shore,sigma=1)
        self.sed_gs = filters.gaussian_filter1d(sed_shore,sigma=1)
        self.depoend_gs = filters.gaussian_filter1d(depoend,sigma=1)

        return
Пример #7
0
	def generateData(self):
		image = self.getInput(0).getData()
		
		image = image.astype(numpy.float32)
	
		# compute derivatives in x
		Ix = filters.gaussian_filter1d(image, self.sigmaD, 0, 0)
		Ix = filters.gaussian_filter1d(Ix, self.sigmaD, 1, 1)
	
		# compute derivatives in y
		Iy = filters.gaussian_filter1d(image, self.sigmaD, 1, 0)
		Iy = filters.gaussian_filter1d(Iy, self.sigmaD, 0, 1)
	
		# compute components of the structure tensor
		# 2nd derivative in x
		Ixx = filters.gaussian_filter(Ix**2, self.sigmaI, 0)
		# 2nd derivative in y
		Iyy = filters.gaussian_filter(Iy**2, self.sigmaI, 0)
		# IxIy
		Ixy = filters.gaussian_filter(Ix * Iy, self.sigmaI, 0)
		
		self.getOutput(0).setData(Ix)
		self.getOutput(1).setData(Iy)
		self.getOutput(2).setData(Ixx)
		self.getOutput(3).setData(Iyy)
		self.getOutput(4).setData(Ixy)
Пример #8
0
 def get_trajectory(self, smoothing=0):
     """ returns a numpy array of positions over time """
     trajectory = np.array([obj.pos for obj in self.objects])
     if smoothing:
         filters.gaussian_filter1d(trajectory, output=trajectory,
                                   sigma=smoothing, axis=0, mode='nearest')
     return trajectory
def generate_mask_condition(n_x=10, n_y=10, n_z=10, sigma=1., threshold=0.5,
                            seed=None):
    """

    Parameters
    ----------
    n_x : int
    n_y : int
    n_z : int
    sigma : float
    threshold : float [0, 1]
    seed : int

    Returns
    -------
    mask_img : bool array of shape (n_x, n_y, n_z)
    """
    rng = check_random_state(seed)

    image = rng.rand(n_x, n_y, n_z)
    for k in [0, 1, 2]:
        gaussian_filter1d(image, sigma=sigma, output=image, axis=k)
    max_img, min_img = image.max(), image.min()
    image -= min_img
    image /= max_img - min_img
    mask_img = image > threshold
    return mask_img
    def buildSection(self, xo = None, yo = None, xm = None, ym = None,
                    pts = 100, gfilter = 5):
        """
        Extract a slice from the 3D data set and compute the stratigraphic layers.
        Parameters
        ----------
        variable: xo, yo
            Lower X,Y coordinates of the cross-section.
        variable: xm, ym
            Upper X,Y coordinates of the cross-section.
        variable: pts
            Number of points to discretise the cross-section.
        variable: gfilter
            Gaussian smoothing filter.
        """

        if xm > self.x.max():
            xm = self.x.max()

        if ym > self.y.max():
            ym = self.y.max()

        if xo < self.x.min():
            xo = self.x.min()

        if yo < self.y.min():
            yo = self.y.min()

        xsec, ysec = self._cross_section(xo, yo, xm, ym, pts)
        self.dist = np.sqrt(( xsec - xo )**2 + ( ysec - yo )**2)
        self.xsec = xsec
        self.ysec = ysec
        for k in range(self.nz):
            # Thick
            rect_B_spline = RectBivariateSpline(self.yi, self.xi, self.th[:,:,k])
            data = rect_B_spline.ev(ysec, xsec)
            secTh = filters.gaussian_filter1d(data,sigma=gfilter)
            secTh[secTh < 0] = 0
            self.secTh.append(secTh)

            # Elev
            rect_B_spline1 = RectBivariateSpline(self.yi, self.xi, self.elev[:,:,k])
            data1 = rect_B_spline1.ev(ysec, xsec)
            secElev = filters.gaussian_filter1d(data1,sigma=gfilter)
            self.secElev.append(secElev)

            # Depth
            rect_B_spline2 = RectBivariateSpline(self.yi, self.xi, self.dep[:,:,k])
            data2 = rect_B_spline2.ev(ysec, xsec)
            secDep = filters.gaussian_filter1d(data2,sigma=gfilter)
            self.secDep.append(secDep)

        # Ensure the spline interpolation does not create underlying layers above upper ones
        topsec = self.secDep[self.nz-1]
        for k in range(self.nz-2,-1,-1):
            secDep = self.secDep[k]
            self.secDep[k] = np.minimum(secDep, topsec)
            topsec = self.secDep[k]

        return
Пример #11
0
def fit_cg6(infile, crop=None, remove_peaks=[], rmin=2, rmax=10):
	#a file.cg6 has 3 columns : r, N, N*G_6
	data = np.loadtxt(infile)
	#remove the end of the table if long time correlations are wrong
	if crop is not None:
		data = data[:crop]
	#remove the begining of the table with N=0
	blank = np.where(data[:,1]==0)[0]
	if len(blank)>0 and (blank[-1] < len(data)-1):
            data = data[blank[-1]+1:]
        #remove all values for r<1.5
        data = data[np.searchsorted(data[:,0],1):]
	#smooth the data
	cg6 = gaussian_filter1d(data[:,-1]/data[:,1], rmin)
	#high-pass filter to transform the function into an oscillatory decaying function
	#extract peaks from it
	peaks, mins = find_peak_mins(cg6*data[:,0]-gaussian_filter1d(cg6*data[:,0], rmax))
	#remove the peaks with negative values
	peaks = [p for p in peaks if cg6[p]>0]
	peaks = [p for i,p in enumerate(peaks) if i not in remove_peaks]
	params = leastsq(
	    lambda p, x, y: np.log(p[0]/x*np.exp(-x/p[1]))-np.log(y),
	    [2,3],
	    args=(data[peaks,0], cg6[peaks])
	    )[0]
	return params, np.column_stack((data[:,0], cg6)), peaks
Пример #12
0
 def __init__(self, gt_files, blur_images=True):
     self.gt_files = gt_files
     self.batch_size = 32
     self.gt_images, self.bits_true, self.configs_true = next(gt_grids(
         gt_files, all=True, center='zero'))
     if blur_images:
         self.gt_files = gaussian_filter1d(self.gt_images, 2/3, axis=-1)
         self.gt_files = gaussian_filter1d(self.gt_images, 2/3, axis=-2)
     self.nb_samples = len(self.gt_images)
Пример #13
0
def generator(tag_dist, batch_size, antialiasing=1):
    s = antialiasing
    depth_scale = 1/2
    for param, mask, depth_map in generator_3d_tags_with_depth_map(
            tag_dist, batch_size, antialiasing=s, depth_scale=depth_scale):
        depth_map = gaussian_filter1d(depth_map, 2/6/depth_scale, axis=-1, mode='constant')
        depth_map = gaussian_filter1d(depth_map, 2/6/depth_scale, axis=-2, mode='constant')
        depth_map = zoom(depth_map, (1., 1., depth_scale, depth_scale))
        yield param, mask, depth_map
Пример #14
0
def get_xy_min_points(registered_image, index):
    xstart = xstarts[index]
    xend = xends[index]
    ystart = ystarts[index]
    yend = yends[index]
    board_image = registered_image[ystart:yend, xstart:xend, 0]
    xval = board_image.sum(axis=0)
    yval = board_image.sum(axis=1)
    xval = gaussian_filter1d(xval, sigma=1)
    yval = gaussian_filter1d(yval, sigma=1)
    x_min_points = get_min_points(xval)
    y_min_points = get_min_points(yval)
    return x_min_points, y_min_points
Пример #15
0
def harris(image, sigmaD=1.0, sigmaI=1.5, count=512):
	'''
	Finds Harris corner features for an input image.  The input image
	should be a 2D numpy array.  The sigmaD and sigmaI parameters define
	the differentiation and integration scales (respectively) of the
	Harris feature detector---the defaults are reasonable for many images.
	
	Returns:
	a maxPoints x 3 array, where each row contains the (x, y) 
	position, and Harris score of a feature point.  The array is sorted
	by decreasing score.
	'''
	
	image = image.astype(numpy.float32)
	h, w = image.shape[0:2]
	
	# compute image derivatives
	Ix = filters.gaussian_filter1d(image, sigmaD, 0, 0)
	Ix = filters.gaussian_filter1d(Ix, sigmaD, 1, 1)
	Iy = filters.gaussian_filter1d(image, sigmaD, 1, 0)
	Iy = filters.gaussian_filter1d(Iy, sigmaD, 0, 1)
	
	# compute elements of the structure tensor
	Ixx = filters.gaussian_filter(Ix**2, sigmaI, 0)
	Iyy = filters.gaussian_filter(Iy**2, sigmaI, 0)
	Ixy = filters.gaussian_filter(Ix * Iy, sigmaI, 0)
	
	# compute Harris feature strength, avoiding divide by zero
	imgH = (Ixx * Iyy - Ixy**2) / (Ixx + Iyy + 1e-8)
		
	# exclude points near the image border
	imgH[:16, :] = 0
	imgH[-16:, :] = 0
	imgH[:, :16] = 0
	imgH[:, -16:] = 0
	
	# non-maximum suppression in 5x5 regions
	maxH = filters.maximum_filter(imgH, (5,5))
	imgH = imgH * (imgH == maxH)
	
	# sort points by strength and find their positions
	sortIdx = numpy.argsort(imgH.flatten())[::-1]
	sortIdx = sortIdx[:count]
	yy = sortIdx / w
	xx = sortIdx % w
		
	# concatenate positions and values
	xyv = numpy.vstack((xx, yy, imgH.flatten()[sortIdx])).transpose()
	
	return xyv
Пример #16
0
    def smear(self, sigma):
        """
        Apply Gaussian smearing to spectrum y value.

        Args:
            sigma: Std dev for Gaussian smear function
        """
        diff = [self.x[i + 1] - self.x[i] for i in range(len(self.x) - 1)]
        avg_x_per_step = np.sum(diff) / len(diff)
        if len(self.ydim) == 1:
            self.y = gaussian_filter1d(self.y, sigma / avg_x_per_step)
        else:
            self.y = np.array([
                gaussian_filter1d(self.y[:, k], sigma / avg_x_per_step)
                for k in range(self.ydim[1])]).T
Пример #17
0
    def trajectory_smoothed(self, sigma):
        """ returns the mouse trajectory smoothed with a Gaussian filter of
        standard deviation `sigma` """
        trajectory = np.empty_like(self.pos)
        trajectory.fill(np.nan)
        
        # smooth position
        indices = contiguous_true_regions(np.isfinite(self.pos[:, 0]))
        for start, end in indices:
            if end - start > 1:
                filters.gaussian_filter1d(self.pos[start:end, :],
                                          sigma, axis=0, mode='nearest',
                                          output=trajectory[start:end, :])

        return trajectory
Пример #18
0
def errSpec(spec, window=25):
    # Returns an error spectrum of optical spectrum a (1D array)
    #  Uses window size to determine smoothing kernel and rolling window size
    a = cleanMe(np.array(spec))
    noise = a - gaussian_filter1d( a, window )
    errors = rolling_std( noise, window )
    return errors
Пример #19
0
def along_component(vel_array, lat, lon, m, is_long_section=True):
    """Convert velocities to along-transect component

    Positive velocities are toward southeast

    Currently only computed for long-sound transects
    """
    x, y = m(lon, lat)
    # x, y = [gaussian_filter1d(q, 3) for q in (x, y)]
    lon, lat = [gaussian_filter1d(q, 5) for q in (lon, lat)]
    _, ship_bearing = haversines(lon, lat)

    U, V = vel_array[:, :, :2].T
    tmp = 90 - np.rad2deg(np.arctan2(V, U))
    vel_bearing = (tmp + 360) % 360

    along_vel = np.hypot(U, V)*cosd(vel_bearing - ship_bearing)
    along_vel = along_vel.T

    if np.mean(np.diff(lat)) > 0:
        # Transect is northward
        along_vel *= -1

    if not is_long_section:
        # Along-transect velocity needs work for cross-sections
        # Fill it with NaNs to ensure it isn't used incorrectly
        along_vel = np.full(along_vel.shape, np.nan)

    return along_vel
Пример #20
0
def skew_detection(image_gray):
    h, w = image_gray.shape[:2]
    eigen = cv2.cornerEigenValsAndVecs(image_gray,12, 5)
    angle_sur = np.zeros(180,np.uint)
    eigen = eigen.reshape(h, w, 3, 2)
    flow = eigen[:,:,2]
    vis = image_gray.copy()
    vis[:] = (192 + np.uint32(vis)) / 2
    d = 12
    points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
    for x, y in points:
        vx, vy = np.int32(flow[int(y), int(x)]*d)
        # cv2.line(rgb, (x-vx, y-vy), (x+vx, y+vy), (0, 355, 0), 1, cv2.LINE_AA)
        ang = angle(vx,vy)
        angle_sur[(ang+180)%180] +=1

    # torr_bin = 30
    angle_sur = angle_sur.astype(np.float)
    angle_sur = (angle_sur-angle_sur.min())/(angle_sur.max()-angle_sur.min())
    angle_sur = filters.gaussian_filter1d(angle_sur,5)
    skew_v_val =  angle_sur[20:180-20].max()
    skew_v = angle_sur[30:180-30].argmax() + 30
    skew_h_A = angle_sur[0:30].max()
    skew_h_B = angle_sur[150:180].max()
    skew_h = 0
    if (skew_h_A > skew_v_val*0.3 or skew_h_B > skew_v_val*0.3):
        if skew_h_A>=skew_h_B:
            skew_h = angle_sur[0:20].argmax()
        else:
            skew_h = - angle_sur[160:180].argmax()
    return skew_h,skew_v
Пример #21
0
 def max_forces_derivative(self):
     d_max = -np.inf
     for a in [self.l_finger_tip, self.r_finger_tip]:
         for col in xrange(a.shape[1]):
             #d_max = max(d_max, np.convolve(a[:,col], [1,-1]).max())
             d_max = max(d_max, (a[:,col] - gaussian_filter1d(a[:,col],1)).max())
     return d_max
Пример #22
0
def smooth_spk(train, width=0.1, plot=False, normalize=False):
    """
    Gaussian convolution of spike trains, averaged across trials
    :param train: spikes trains (N x Length x Trials)
    :param width: width of the gaussian kernel
    :return: smo: smoothed trains
    """
    import scipy.ndimage.filters as fil

    ave = np.mean(train, axis=2)
    smo = list()
    for n in range(len(train)):
        y = fil.gaussian_filter1d(ave[n, :], sigma=width)
        if normalize:
            den = np.max(y) - np.min(y)
            y = (y - np.min(y)) / den if den != 0. else y
        smo.append(y)

    if plot:
        import matplotlib.pyplot as plt
        space = 0.
        fig = plt.figure(frameon=False, figsize=(9, 7), dpi=80, facecolor='w', edgecolor='k')
        ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])

        for n in range(len(train)):
            plt.plot(smo[n] + space)
            space += 1.
        plt.xlabel('Samples')
        plt.ylabel('Cell Num.')

    return np.array(smo)
Пример #23
0
def broad2hydra(wave, intens, obsres):
    """ Convolve spectra to match the Hydra-CTIO resolution.

    ================
    Input parameters
    ================
    wave: array_like
        Wavelenght 1-D array in Angstroms.

    intens: array_like
        Intensity 1-D array of Intensity, in arbitrary units. The lenght has
        to be the same as wl.

    obsres: float
        Value of the observed resolution Full Width at Half Maximum (FWHM) in
        Angstroms.

    =================
    Output parameters
    =================
    array_like
        The convolved intensity 1-D array.

    """
    coeffs = np.array([1.84892311e-16, -4.32973804e-12, 3.94864261e-08,
                     -1.73552121e-04, 3.61151772e-01, -2.70743632e+02])
    poly = np.poly1d(coeffs)
    sigmas = np.sqrt(poly(wave)**2 - obsres**2) / 2.3548 / (wave[1] - wave[0])
    intens2D = np.diag(intens)
    for i in range(len(sigmas)):
        intens2D[i] = gaussian_filter1d(intens2D[i], sigmas[i],
                      mode="constant", cval=0.0)
    return intens2D.sum(axis=0)
Пример #24
0
def smooth(Y, fwhm=5.0):
	'''
	Smooth a set of 1D continua.
	This method uses **scipy.ndimage.filters.gaussian_filter1d** but uses the *fwhm*
	instead of the standard deviation.
	
	:Parameters:
	
	- *Y* --- a (J x Q) numpy array
	- *fwhm* ---  Full-width at half-maximum of a Gaussian kernel used for smoothing.
	
	:Returns:
	
	- (J x Q) numpy array
	
	:Example:
	
	>>> Y0  = np.random.rand(5, 101)
	>>> Y   = spm1d.util.smooth(Y0, fwhm=10.0)
	
	.. note:: A Gaussian kernel's *fwhm* is related to its standard deviation (*sd*) as follows:
	
	>>> fwhm = sd * sqrt(8*log(2))
	'''
	sd    = fwhm / sqrt(8*log(2))
	return gaussian_filter1d(Y, sd, mode='wrap')
Пример #25
0
    def _deriveObservedSpectra(self):
        """
        Derives a 1D spectrum from the 2D input data.

        Sums the pixels around the centre of the continuum that match to the SDSS fiber size.
        Multiplies the flux in the pixels next to the last full ones to include with the
        fractional flux we would otherwise be "missing".
        """
        #y center and how many full pixels on either side we can include that would still
        #be within the SDSS fiber
        y = self.fitting['ycenter']
        ymod = self.fitting['slitPix2']

        #modify the lines of the fractional pixel information
        self.fitting['obsData'][y+ymod+1, :] *= (self.fitting['slitPixFractional'] / 2.)
        self.fitting['obsData'][y-ymod-1, :] *= (self.fitting['slitPixFractional'] / 2.)

        #sum the flux
        self.fitting['obsSpectrum'] = np.sum(self.fitting['obsData'][y-ymod-1:y+ymod+2, :], axis=0) / \
                                      self.fitting['boosting']

        #match the resolution, i.e. convolve the observed spectrum with a Gaussian
        self.fitting['obsSpectraConvolved'] = filt.gaussian_filter1d(self.fitting['obsSpectrum'],
                                                                     self.fitting['sigma'])

        #get a wavelength scale
        self.fitting['obsWavelengths'] = basics.getWavelengths(self.fitting['observed'],
                                                               len(self.fitting['obsSpectraConvolved']))
Пример #26
0
def smooth(spectrum, kernel=0):
  """Smooths the input spectrum using a user-specified Gaussian kernel.
  """

  spectrum = gaussian_filter1d(spectrum, sigma=kernel)

  return spectrum
def smooth_linestring(linestring, smooth_sigma):
    """
    Uses a gauss filter to smooth out the LineString coordinates.
    """
    smooth_x = np.array(filters.gaussian_filter1d(
        linestring.xy[0],
        smooth_sigma)
        )
    smooth_y = np.array(filters.gaussian_filter1d(
        linestring.xy[1],
        smooth_sigma)
        )
    smoothed_coords = np.hstack((smooth_x, smooth_y))
    smoothed_coords = zip(smooth_x, smooth_y)
    linestring_smoothed = LineString(smoothed_coords)
    return linestring_smoothed
Пример #28
0
def place_markers(x, spl, spacing=2.):
    fineness = 10
    xFine = np.linspace(x[0], x[-1], fineness*len(x))
    yFine = spl(xFine)
    derivFine = np.empty(len(xFine), dtype='f8')
    for i,xx in enumerate(xFine):
        derivFine[i] = spl.derivatives(xx)[1]
    derivFine = filters.gaussian_filter1d(derivFine, fineness*spacing)
    dx = np.diff(xFine)
    dy = np.diff(yFine)
    dist = np.sqrt(dx*dx + dy*dy)
    dist = np.cumsum(dist)
    nMarkers = int(dist[-1] / spacing)
    if nMarkers > 1e5:
		raise ValueError('nMarkers unreasonably high. Something has likely gone wrong.')
    markerDist = np.linspace(spacing, spacing*nMarkers, nMarkers)
    markerPos = np.empty((nMarkers, 2), dtype='f8')
    markerDeriv = np.empty(nMarkers, dtype='f8')
    cellNo = 0
    for i, (xx, d) in enumerate(zip(xFine[1:], dist)):
        if d >= (cellNo+1) * spacing:
            markerPos[cellNo, 0] = xx
            markerPos[cellNo, 1] = spl(xx)
            markerDeriv[cellNo] = derivFine[i+1] #spl.derivatives(xx)[1]
            cellNo += 1
    
    return markerPos, markerDeriv
Пример #29
0
    def gaussian_filter(self, FWHM):
        """Applies a Gaussian filter in the spectral dimension in place.

        Parameters
        ----------
        FWHM : float
            The Full Width at Half Maximum of the gaussian in the
            spectral axis units

        Raises
        ------
        ValueError if FWHM is equal or less than zero.

        SignalDimensionError if the signal dimension is not 1.

        """
        self._check_signal_dimension_equals_one()
        if FWHM <= 0:
            raise ValueError(
                "FWHM must be greater than zero")
        axis = self.axes_manager.signal_axes[0]
        FWHM *= 1 / axis.scale
        self.data = gaussian_filter1d(
            self.data,
            axis=axis.index_in_array,
            sigma=FWHM / 2.35482)
        self.events.data_changed.trigger(obj=self)
Пример #30
0
    def smooth_spikes(self, lap, plot=True):

        if lap != 0:
            widget = {"run": pg.PlotWidget(), "whl": pg.PlotWidget()}
            space = 0
            win = self.extract_setions(lap)
            #   firing rates smoothed per panel
            fr_smo = {"run": list(), "whl": list()}
            for neuron in range(self.numN):
                if "neuron {}".format(neuron) in self.spk_per_neuron:
                    spk = self.spk_per_neuron["neuron {}".format(neuron)]
                    for panel, w in win.iteritems():
                        idx = np.where(np.logical_and(spk > w[0], spk <= w[1]))
                        y = np.zeros(w[1] - w[0] + 1)
                        y[spk[idx] - w[0]] = 1.0
                        fr = sfil.gaussian_filter1d(y, sigma=self.fs * 0.1)
                        fr_smo[panel].append(fr)
                        if plot:
                            time = np.linspace(0, (w[1] - w[0]) / self.fs, num=len(fr))
                            if np.mean(fr) > (0.5 / self.fs):
                                widget[panel].plot(time, fr / max(fr) + space, pen=(space, self.numN))

                    space += 1

            if plot:
                for key, pw in widget.iteritems():
                    self.layout.addWidget(pw, self.panels_count[key], self.panels[key])
                    self.panels_count[key] += 1
                    if self.rasterWidget:
                        pw.setXLink(self.rasterWidget[key])
            self.fr_smo = fr_smo
Пример #31
0
def draw_on_img(gray, edges, vm, mag_arr, y1=470, y2=650, x1=None, x2=None, thicc=20):
    '''
    Draw edges on grayscale and get lane boundry region of interest
    
    Parameters:
        
        gray : np.array, grayscale of original image
        
        edges : list, return of get_edges_thetas()
        
        vm : list, return of get_edges_thetas()
        
        mag_arr : np.array, return of edf_comp()
        
        y1 : scalar, search region limit (lower y)
        
        y2 : scalar, search region limit (greater y)
        
        x1 : scalar, search region limit (lower x)
        
        x2 : scalar, search region limit (greater x)
        
        thicc : scalar, line thickness
        
    Return:
    
        gray_lines : np.array, line drawn grayscale image
        
        lbrois : list
        
            [lbroi1, lbroi2, ..., lbroiN]
            
            lbroiN : Nth edge area for next phase of algorithm
    
    '''
    ang_buf = [deque([]), deque([])]
    for i in range(len(vm)):
        ang_buf[i].append(vm[i])
    gray_lines = np.copy(gray)
    h, w = mag_arr.shape
    h_degs = vm_to_hough(vm)
    lbrois = []
    for i in range(len(edges)):
        lbroi = np.zeros_like(gray)
        edge_im = edges[i]
        theta = h_degs[i]
        acc, diag, theta = weighted_hough(edge_im, theta, mag_arr)
        smooth_acc = gaussian_filter1d(acc, sigma=51)
        x = np.arange(0, 2*diag+1, 1)
        maxm = argrelextrema(smooth_acc, np.greater)
        maxm = x[maxm]
        
        points = []
        for maxima in maxm:
            points.append((maxima, smooth_acc[maxima]))
        points.sort(key=itemgetter(1))
        
        rho = points[-1][0] - diag
        
        line_img = line(rho, theta, h, w, thicc=thicc)
        
        coors = np.zeros(4).astype(np.uint32)
        
        coors[0] = 0 if y1 is None else y1-EXTENSION
        coors[1] = h if y2 is None else y2
        coors[2] = 0 if x1 is None else x1
        coors[3] = w if x2 is None else x2
        
        #for g in range(4):
            #coors[g] = int(coors[g])
            #print(coors[g])
        lbroi[coors[0]:coors[1], coors[2]:coors[3]] += line_img
        lbrois.append(lbroi)
        gray_lines[coors[0]:coors[1], coors[2]:coors[3]] += line_img
    
    return gray_lines, lbrois, ang_buf
            '''

            for x in range(10, 100):
                pca_eig, pca_vec = pca(v_arr[:x, :])
                projected[
                    x] = pca_vec[0] * v_x_arr[x] + pca_vec[1] * v_y_arr[x]

            for x in range(100, arr_size):
                pca_eig, pca_vec = pca(v_arr[x - 100:x, :])
                projected[
                    x] = pca_vec[0] * v_x_arr[x] + pca_vec[1] * v_y_arr[x]

            for x in range(1, arr_size):
                projected[x] = projected[x - 1] * 0.8 + projected[x] * 0.2

            smoothened = filt.gaussian_filter1d(projected,
                                                150 * np.var(projected))

            plt.subplot(4, 1, 1)
            plt.plot(projected)

            plt.subplot(4, 1, 2)

            peaks = findPeaks(smoothened)

            for x in range(len(peaks)):
                if (peaks[x] == 1):
                    plt.plot(x, smoothened[x], 'r.')

            plt.plot(smoothened)

            plt.subplot(4, 1, 3)
Пример #33
0
def smooth_gaussian_processor(sample, k=2):
    data = sample
    assert len(np.shape(np.array(data))) == 1, 'Please only give smooth_gaussian_processor a one dimensional input.'
    smoothed = gaussian_filter1d(data, k)
    return smoothed
Пример #34
0
def slidingWindowsEval(image):
    windows_size = 16
    stride = 1
    height = image.shape[0]
    data_sets = []

    for i in range(0, image.shape[1] - windows_size + 1, stride):
        data = image[0:height, i:i + windows_size]
        data = cv2.resize(data, (23, 23))
        data = cv2.equalizeHist(data)
        data = data.astype(np.float) / 255
        data = np.expand_dims(data, 3)
        data_sets.append(data)

    res = model2.predict(np.array(data_sets))

    pin = res
    p = 1 - (res.T)[1]
    p = f.gaussian_filter1d(np.array(p, dtype=np.float), 3)
    lmin = l.argrelmax(np.array(p), order=3)[0]
    interval = []
    for i in xrange(len(lmin) - 1):
        interval.append(lmin[i + 1] - lmin[i])

    if(len(interval) > 3):
        mid = get_median(interval)
    else:
        return []
    pin = np.array(pin)
    res = searchOptimalCuttingPoint(image, pin, 0, mid, 3)

    cutting_pts = res[1]
    last = cutting_pts[-1] + mid
    if last < image.shape[1]:
        cutting_pts.append(last)
    else:
        cutting_pts.append(image.shape[1] - 1)
    name = ""
    confidence = 0.00
    seg_block = []
    for x in xrange(1, len(cutting_pts)):
        if x != len(cutting_pts) - 1 and x != 1:
            section = image[0:36, cutting_pts[x - 1] - 2:cutting_pts[x] + 2]
        elif x == 1:
            c_head = cutting_pts[x - 1] - 2
            if c_head < 0:
                c_head = 0
            c_tail = cutting_pts[x] + 2
            section = image[0:36, c_head:c_tail]
        elif x == len(cutting_pts) - 1:
            end = cutting_pts[x]
            diff = image.shape[1] - end
            c_head = cutting_pts[x - 1]
            c_tail = cutting_pts[x]
            if diff < 7:
                section = image[0:36, c_head - 5:c_tail + 5]
            else:
                diff -= 1
                section = image[0:36, c_head - diff:c_tail + diff]
        elif x == 2:
            section = image[0:36, cutting_pts[x - 1] -
                            3:cutting_pts[x - 1] + mid]
        else:
            section = image[0:36, cutting_pts[x - 1]:cutting_pts[x]]
        seg_block.append(section)
    refined = refineCrop(seg_block, mid - 1)
    for i, one in enumerate(refined):
        res_pre = cRP.SimplePredict(one, i)
        confidence += res_pre[0]
        name += res_pre[1]
    return refined, name, confidence
Пример #35
0
def blur_image(X, sigma=1):
    X = gaussian_filter1d(X, sigma, axis=1)
    X = gaussian_filter1d(X, sigma, axis=2)
    return X
Пример #36
0
def dos(calc_dir, 
        what_to_plot={'total' : {'spins' : ['summed'],
                                 'orbitals' : ['all']}},
        colors_and_labels = {'total-summed-all' : {'color' : 'black',
                                                   'label' : 'total'}},
        xlim=(0, 0.1), ylim=(-10, 4), 
        xticks=(False, [0, 0.1]), yticks=(False, [-10, 4]), 
        xlabel=r'$DOS/e^-$', ylabel=r'$E-E_F\/(eV)$',
        legend=True,
        smearing=0.2,
        shift='Fermi', normalization='electron',
        cb_shift=False,
        vb_shift=False,
        show=False,
        doscar='DOSCAR.lobster',
        ):
    """
    Args:
        calc_dir (str) - path to calculation with DOSCAR
        what_to_plot (dict) - {element or 'total' (str) : {'spins' : list of spins to include ('summed', 'up', and or 'down'),
                                                           'orbitals' : list of orbitals to include (str)}}
        colors_and_labels (dict) - {element-spin-orbital (str) : {'color' : color (str),
                                                                  'label' : label (str)}}
        xlim (tuple) - (xmin (float), xmax (float))
        ylim (tuple) - (ymin (float), ymax (float))
        xticks (tuple) - (bool to show label or not, (xtick0, xtick1, ...))
        yticks (tuple) - (bool to show label or not, (ytick0, ytick1, ...))
        xlabel (str) - x-axis label
        ylabel (str) - y-axis label
        legend (bool) - include legend or not
        smearing (float or False) - std. dev. for Gaussian smearing of DOS or False for no smearing
        shift (float or 'Fermi') - if 'Fermi', make Fermi level 0; else shift energies by shift
        cb_shift (tuple or False) - shift all energies >= cb_shift[0] (float) by cb_shift[1] (float)
        vb_shift (tuple or False) - shift all energies <= vb_shift[0] (float) by vb_shift[1] (float)             
        normalization ('electron', 'atom', or False) - divide populations by number of electrons, number of atoms, or not at all
        show (bool) - if True, show figure; else just return ax
                   
    Returns:
        matplotlib axes object
    """
    set_rc_params()    
    if show == True:
        fig = plt.figure(figsize=(2.5,4))
        ax = plt.subplot(111)
    if 'lobster' in doscar:
        Efermi = 0.
    else:
        Efermi = VASPBasicAnalysis(calc_dir).Efermi        
    if shift == 'Fermi':
        shift = -Efermi
    if normalization == 'electron':
        normalization = VASPBasicAnalysis(calc_dir).params_from_outcar(num_params=['NELECT'], str_params=[])['NELECT']
    elif normalization == 'atom':
        normalization = VASPBasicAnalysis(calc_dir).nsites
    occupied_up_to = Efermi + shift
    print(occupied_up_to)
    dos_lw = 1    
    for element in what_to_plot:
        for spin in what_to_plot[element]['spins']:
            for orbital in what_to_plot[element]['orbitals']:
                tag = '-'.join([element, spin, orbital])
                color = colors_and_labels[tag]['color']
                label = colors_and_labels[tag]['label']
                d = VASPDOSAnalysis(calc_dir, doscar=doscar).energies_to_populations(element=element,
                                                                      orbital=orbital,
                                                                      spin=spin)
                if spin == 'down':
                    flip_sign = True
                else:
                    flip_sign = False
                d = ProcessDOS(d, shift=shift, 
                               flip_sign=flip_sign,
                               normalization=normalization,
                               cb_shift=cb_shift,
                               vb_shift=vb_shift).energies_to_populations
                               
                energies = sorted(list(d.keys()))
                populations = [d[E] for E in energies]
                occ_energies = [E for E in energies if E <= occupied_up_to]
                occ_populations = [d[E] for E in occ_energies]
                unocc_energies = [E for E in energies if E > occupied_up_to]
                unocc_populations = [d[E] for E in unocc_energies]    
                if smearing:
                    occ_populations = gaussian_filter1d(occ_populations, smearing)
                    unocc_populations = gaussian_filter1d(unocc_populations, smearing)
                ax = plt.plot(occ_populations, occ_energies, color=color, label=label, alpha=0.9, lw=dos_lw)
                ax = plt.plot(unocc_populations, unocc_energies, color=color, label='__nolegend__', alpha=0.9, lw=dos_lw)                    
                ax = plt.fill_betweenx(occ_energies, occ_populations, color=color, alpha=0.2, lw=0)                               
                                  
    ax = plt.xticks(xticks[1])
    ax = plt.yticks(yticks[1])
    if not xticks[0]:
        ax = plt.gca().xaxis.set_ticklabels([])      
    if not yticks[0]:
        ax = plt.gca().yaxis.set_ticklabels([])
    ax = plt.xlabel(xlabel)
    ax = plt.ylabel(ylabel)
    ax = plt.xlim(xlim)
    ax = plt.ylim(ylim)    
    if legend:
        ax = plt.legend(loc='upper right')
    if show:
        plt.show()
    return ax
Пример #37
0
def blur_image(X, sigma=1):
    X_np = X.cpu().clone().numpy()
    X_np = gaussian_filter1d(X_np, sigma, axis=2)
    X_np = gaussian_filter1d(X_np, sigma, axis=3)
    X.copy_(torch.Tensor(X_np).type_as(X))
    return X
Пример #38
0
def normalize_length():
    # resample the datasets
    datasets_norm = []
    for task_idx, task_data in enumerate(datasets_raw):
        print('Resampling data of task: ' + task_name_list[task_idx])
        demo_norm_temp = []
        for demo_data in task_data:
            time_stamp = demo_data['stamp']
            grid = np.linspace(0, time_stamp[-1], len_norm)
            # filter the datasets
            left_hand_filtered = gaussian_filter1d(demo_data['left_hand'].T,
                                                   sigma=sigma).T
            left_joints_filtered = gaussian_filter1d(
                demo_data['left_joints'].T, sigma=sigma).T
            # normalize the datasets
            left_hand_norm = griddata(time_stamp,
                                      left_hand_filtered,
                                      grid,
                                      method='linear')
            left_joints_norm = griddata(time_stamp,
                                        left_joints_filtered,
                                        grid,
                                        method='linear')
            # append them to list
            demo_norm_temp.append({
                'alpha': time_stamp[-1],
                'left_hand': left_hand_norm,
                'left_joints': left_joints_norm
            })
        datasets_norm.append(demo_norm_temp)

    # preprocessing for the norm data
    datasets4train = []
    for task_idx, demo_list in enumerate(data_index):
        data = [datasets_norm[task_idx][i] for i in demo_list]
        datasets4train.append(data)
    y_full = np.array([]).reshape(0, num_joints)
    for task_idx, task_data in enumerate(datasets4train):
        print('Preprocessing data for task: ' + task_name_list[task_idx])
        for demo_data in task_data:
            h = np.hstack([demo_data['left_hand'], demo_data['left_joints']])
            y_full = np.vstack([y_full, h])
    min_max_scaler = preprocessing.MinMaxScaler()
    datasets_norm_full = min_max_scaler.fit_transform(y_full)
    # construct a data structure to train the model
    datasets_norm_preproc = []
    for task_idx in range(len(datasets4train)):
        datasets_temp = []
        for demo_idx in range(num_demo):
            temp = datasets_norm_full[
                (task_idx * num_demo + demo_idx) *
                len_norm:(task_idx * num_demo + demo_idx) * len_norm +
                len_norm, :]
            datasets_temp.append({
                'left_hand':
                temp[:, 0:6],
                'left_joints':
                temp[:, 6:9],
                'alpha':
                datasets4train[task_idx][demo_idx]['alpha']
            })
        datasets_norm_preproc.append(datasets_temp)
def filter_frames(frames):
    return np.apply_along_axis(lambda x: gaussian_filter1d(x, 1),
                               arr=frames,
                               axis=0)
Пример #40
0
def img_hist(image, hist_filter_sigma=2):
    hist = cv2.calcHist([image], [0], None, [256], [0, 256])
    hist = np.reshape(hist, (len(hist)))
    return gaussian_filter1d(hist, hist_filter_sigma)
Пример #41
0
                    results_per_layer_back, RAT['T'].T)),1)

# calculated photogenerated current (Jsc with 100% EQE)

spectr_flux = LightSource(source_type='standard', version='AM1.5g', x=wavelengths,
                           output_units='photon_flux_per_m', concentration=1).spectrum(wavelengths)[1]

Jph_Si = q * np.trapz(RAT['A_bulk'][0] * spectr_flux, wavelengths)/10 # mA/cm2
Jph_Perovskite =  q * np.trapz(results_per_layer_front[:,3] * spectr_flux, wavelengths)/10 # mA/cm2

pal = sns.cubehelix_palette(13, start=.5, rot=-.9)
pal.reverse()

from scipy.ndimage.filters import gaussian_filter1d

ysmoothed = gaussian_filter1d(allres, sigma=2, axis=0)

bulk_A_text= ysmoothed[:,4]

# plot total R, A, T
fig = plt.figure(figsize=(5,4))
ax = plt.subplot(111)
ax.stackplot(options['wavelengths']*1e9, ysmoothed.T,
              labels=['Ag', 'ITO', 'aSi-n', 'aSi-i', 'c-Si (bulk)', 'aSi-i', 'aSi-p',
                      'Perovskite','C$_{60}$','IZO',
            'MgF$_2$', 'R$_{escape}$', 'R$_0$'], colors = pal)
lgd=ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlabel('Wavelength (nm)')
ax.set_ylabel('R/A/T')
ax.set_xlim(300, 1200)
ax.set_ylim(0, 1)
Пример #42
0
def mainProg(videoFileName='3.mp4',
             jsonFileName='sendToServer.json',
             framesToProcess=1000,
             x_1=275,
             y_1=246,
             x_2=326,
             y_2=317,
             smootheningParam=150,
             isPC=False):
    if isPC:
        import matplotlib.pyplot as plt
    else:
        import serial
    if videoFileName == '0':
        cap = cv2.VideoCapture(0)
    else:
        cap = cv2.VideoCapture(videoFileName)
    if isPC:
        cv2.namedWindow('Edges')
    arr_size = framesToProcess
    arr = np.zeros(arr_size)
    cog_x_arr = np.zeros(arr_size)
    cog_y_arr = np.zeros(arr_size)
    v_x_arr = np.zeros(arr_size)
    v_y_arr = np.zeros(arr_size)
    projected = np.zeros(arr_size)
    smoothened = np.zeros(arr_size)

    # plt.ion()

    count = 0

    # y, x = 457, 220
    # k = 5
    # x1, y1, x2, y2 = x - k, y - k, x + k, y + k
    '''Put points here'''
    y1, x1 = y_1, x_1
    y2, x2 = y_2, x_2

    while (1):
        ret, frame = cap.read()
        if ret == True:

            gray_vid = cv2.cvtColor(frame, cv2.IMREAD_GRAYSCALE)
            edged_frame = cv2.Canny(frame, 100, 200)
            if isPC:
                cv2.imshow('Original', frame)
                cv2.rectangle(edged_frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
                cv2.imshow('Edges', edged_frame)

            win = edged_frame[x1:x2, y1:y2]
            h, w = win.shape

            cog_x = np.sum(win * np.arange(w)) / np.sum(win)
            cog_y = np.sum(win * np.arange(h).reshape(h, 1)) / np.sum(win)

            cog_x_arr[1:arr_size] = cog_x_arr[:arr_size - 1]
            cog_x_arr[0] = cog_x
            cog_y_arr[1:arr_size] = cog_y_arr[:arr_size - 1]
            cog_y_arr[0] = cog_y

            v_x_arr[1:arr_size] = v_x_arr[:arr_size - 1]
            v_x_arr[0] = cog_x_arr[1] - cog_x_arr[0]
            if (np.isnan(v_x_arr[0])):
                v_x_arr[0] = v_x_arr[1]

            v_y_arr[1:arr_size] = v_y_arr[:arr_size - 1]
            v_y_arr[0] = cog_y_arr[1] - cog_y_arr[0]
            if (np.isnan(v_y_arr[0])):
                v_y_arr[0] = v_y_arr[1]

            if (count == arr_size):

                v_arr = np.concatenate((v_x_arr, v_y_arr), axis=0).reshape(
                    (arr_size, 2))
                '''print("x", v_x_arr)
                print("y", v_y_arr)
                print("arr", v_arr)'''

                for x in range(10, 100):
                    pca_eig, pca_vec = pca(v_arr[:x, :])
                    projected[
                        x] = pca_vec[0] * v_x_arr[x] + pca_vec[1] * v_y_arr[x]

                for x in range(100, arr_size):
                    pca_eig, pca_vec = pca(v_arr[x - 100:x, :])
                    projected[
                        x] = pca_vec[0] * v_x_arr[x] + pca_vec[1] * v_y_arr[x]

                for x in range(1, arr_size):
                    projected[x] = projected[x - 1] * 0.8 + projected[x] * 0.2

                if smootheningParam > 0:
                    smoothened = filt.gaussian_filter1d(
                        projected, smootheningParam * np.var(projected))
                else:
                    smoothened = projected

                peaks = findPeaks(smoothened)

                if isPC:
                    plt.subplot(4, 1, 1)
                    plt.plot(projected)

                    plt.subplot(4, 1, 2)

                    for x in range(len(peaks)):
                        if (peaks[x] == 1):
                            plt.plot(x, smoothened[x], 'r.')

                    plt.plot(smoothened)

                now = datetime.datetime.now()

                dictJSON = {}

                smoothened = (smoothened - np.mean(smoothened)) / np.sqrt(
                    np.var(smoothened))
                data = []
                for x in range(len(smoothened)):
                    data.append(smoothened[x])
                dictJSON['data'] = data
                dictJSON['TimeStamp'] = now.strftime("%Y-%m-%d %H:%M")
                #                dictJSON['FPS'] = '30'

                filePathJSON = jsonFileName
                json.dump(dictJSON,
                          codecs.open(filePathJSON, 'w', encoding='utf-8'),
                          separators=(',', ':'),
                          sort_keys=True,
                          indent=4)
                jsonString = json.dumps(dictJSON,
                                        sort_keys=True,
                                        indent=4,
                                        separators=(',', ': '))
                if isPC:
                    plt.subplot(4, 1, 3)
                fs = 0.03
                f, pow_x = signal.welch(projected, fs, nperseg=1000)
                if isPC:
                    plt.plot(f[0:100], pow_x[0:100])

                reportName = now.strftime("./report/rep%Y-%m-%d %H:%M.pdf")
                if isPC:
                    plt.subplot(4, 1, 4)
                    plt.text(
                        0, 0,
                        ("REPORT : " + reportName + " \nTime for a breath: " +
                         str(findTime(peaks, 30))))

                    plt.savefig(reportName)

                    plt.show()
                    return jsonString
                else:
                    return jsonString
            count += 1
            print('frame :' + str(count) + ' of ' + str(arr_size) + ' = ' +
                  str(100 * count / arr_size) + '% complete',
                  end='\r',
                  flush=True)
            if isPC:
                k = cv2.waitKey(30) & 0xff
                if k == 27:
                    break
        else:
            break
    cap.release()
Пример #43
0
 def replot(self):
     self.x.append(self.tickCount)
     self.y.append(self.entropy())
     yNew = gaussian_filter1d(self.y, sigma=1.8)
     self.master.chart.plot(self.x, yNew)
    def run(self):
        # Get the config of the current effect
        effect_config = self._device.device_config["effects"]["effect_twinkle"]
        led_count = self._device.device_config["LED_Count"]

        # Rising Star array format: [[r,g,b], [start_position, end_position], percent_brightness]

        # Reset output array
        self.output = np.zeros((3, self._device.device_config["LED_Count"]))

        # Random add off the stars, depending on speed settings
        if random.randrange(0, 100, 1) <= effect_config["star_appears_speed"]:
            # add a star only if the list is not full
            if len(self.rising_stars) < effect_config["stars_count"]:
                gradient = self._config["gradients"][effect_config["gradient"]]
                number_of_colors = len(gradient)
                selected_color_index = random.randrange(0, number_of_colors, 1)

                star_start_position = random.randrange(0, led_count, 1)
                star_end_position = star_start_position + effect_config[
                    "stars_length"]

                # Check if end position still in array
                if star_end_position > led_count - 1:
                    star_end_position = led_count - 1

                # Add the new rising star with a random color out of the gradient selection.
                self.rising_stars.append([[
                    gradient[selected_color_index][0],
                    gradient[selected_color_index][1],
                    gradient[selected_color_index][2]
                ], [star_start_position, star_end_position], 1])

        remove_stars_rising = []

        # Set the new rising stars value
        for current_star in self.rising_stars:
            current_star[
                2] = current_star[2] + effect_config["star_rising_speed"]
            # Only allow 100 percent
            if current_star[2] > 100:
                current_star[2] = 100

            if current_star[2] == 100:
                self.descending_stars.append(current_star)
                remove_stars_rising.append(current_star)
                # the star will be created in the de
            else:
                self.output[0, current_star[1][0]:current_star[1][1]] = int(
                    current_star[0][0] * (current_star[2] / 100))
                self.output[1, current_star[1][0]:current_star[1][1]] = int(
                    current_star[0][1] * (current_star[2] / 100))
                self.output[2, current_star[1][0]:current_star[1][1]] = int(
                    current_star[0][2] * (current_star[2] / 100))

        # remove the stars from the rising array
        for current_star_to_remove in remove_stars_rising:
            self.rising_stars.remove(current_star_to_remove)

        remove_stars_descending = []

        # Set the new descending stars value
        for current_star in self.descending_stars:
            current_star[
                2] = current_star[2] - effect_config["star_descending_speed"]
            # Only allow 0 percent
            if current_star[2] < 0:
                current_star[2] = 0

            if current_star[2] == 0:
                remove_stars_descending.append(current_star)

            self.output[0, current_star[1][0]:current_star[1][1]] = int(
                current_star[0][0] * (current_star[2] / 100))
            self.output[1, current_star[1][0]:current_star[1][1]] = int(
                current_star[0][1] * (current_star[2] / 100))
            self.output[2, current_star[1][0]:current_star[1][1]] = int(
                current_star[0][2] * (current_star[2] / 100))

        # remove the stars from the descending array
        for current_star_to_remove in remove_stars_descending:
            self.descending_stars.remove(current_star_to_remove)

        self.output = gaussian_filter1d(self.output,
                                        sigma=effect_config["blur"])

        # Add the output array to the queue
        self.queue_output_array_blocking(self.output)
Пример #45
0
def convolve_F(F, wvl_hi, FWHM):
    return gaussian_filter1d(F, FWHM / np.abs(wvl_hi[1] - wvl_hi[0]) / 2.355)
Пример #46
0
def cohp(calc_dir,
         pairs_to_plot=['total'],
         colors_and_labels = {'total' : {'color' : 'black',
                                         'label' : 'total'}},
         tdos=False,                                        
         xlim=(-0.5, 0.5), ylim=(-10, 4), 
         xticks=(False, [-0.5, 0.5]), yticks=(False, [-10, 4]),
         xlabel=r'$-COHP/e^-$', ylabel=r'$E-E_F\/(eV)$',
         legend=True,
         smearing=1,
         shift=0, normalization='electron',
         show=False,
         zero_line='horizontal'):
    """
    Args:
        calc_dir (str) - path to calculation with DOSCAR
        pairs_to_plot (list) - list of 'el1_el2' to plot and/or 'total'
        colors_and_labels (dict) - {pair (str) : {'color' : color (str),
                                                  'label' : label (str)}}
        tdos (str or bool) - if not False, 'DOSCAR' or 'DOSCAR.losbter' to retrieve tDOS from
        xlim (tuple) - (xmin (float), xmax (float))
        ylim (tuple) - (ymin (float), ymax (float))
        xticks (tuple) - (bool to show label or not, (xtick0, xtick1, ...))
        yticks (tuple) - (bool to show label or not, (ytick0, ytick1, ...))
        xlabel (str) - x-axis label
        ylabel (str) - y-axis label
        legend (bool) - include legend or not
        smearing (float or False) - std. dev. for Gaussian smearing of DOS or False for no smearing
        shift (float or 'Fermi') - if 'Fermi', make Fermi level 0; else shift energies by shift
        normalization ('electron', 'atom', or False) - divide populations by number of electrons, number of atoms, or not at all
        show (bool) - if True, show figure; else just return ax
        zero_line (str) - if 'horizontal', 'vertical', 'both', or False
                   
    Returns:
        matplotlib axes object
    """
    set_rc_params()    
    if show == True:
        fig = plt.figure(figsize=(2.5,4))
        ax = plt.subplot(111)         
    if normalization == 'electron':
        normalization = VASPBasicAnalysis(calc_dir).params_from_outcar(num_params=['NELECT'], str_params=[])['NELECT']
    elif normalization == 'atom':
        normalization = VASPBasicAnalysis(calc_dir).nsites
    occupied_up_to = shift
    dos_lw = 1
    if isinstance(tdos, str):
        d = VASPDOSAnalysis(calc_dir, doscar=tdos).energies_to_populations()
        if 'lobster' not in tdos:
            shift -= VASPBasicAnalysis(calc_dir).Efermi
        d = ProcessDOS(d, shift=shift, normalization=normalization).energies_to_populations
        energies = sorted(list(d.keys()))
        populations = [d[E] for E in energies]
        occ_energies = [E for E in energies if E <= occupied_up_to]
        occ_populations = [d[E] for E in occ_energies]
        unocc_energies = [E for E in energies if E > occupied_up_to]
        unocc_populations = [d[E] for E in unocc_energies]    
        color = 'black'
        label = 'tDOS'
        if smearing:
            occ_populations = gaussian_filter1d(occ_populations, smearing)
            unocc_populations = gaussian_filter1d(unocc_populations, smearing)
        ax = plt.plot(occ_populations, occ_energies, color=color, label=label, alpha=0.9, lw=dos_lw)
        ax = plt.plot(unocc_populations, unocc_energies, color=color, label='__nolegend__', alpha=0.9, lw=dos_lw)                    
        ax = plt.fill_betweenx(occ_energies, occ_populations, color=color, alpha=0.2, lw=0)         
    for pair in pairs_to_plot:
        color = colors_and_labels[pair]['color']
        label = colors_and_labels[pair]['label']
        d = LOBSTERAnalysis(calc_dir).energies_to_populations(element_pair=pair)
        flip_sign = True
        d = ProcessDOS(d, shift=shift, 
                       flip_sign=flip_sign,
                       normalization=normalization).energies_to_populations
        energies = sorted(list(d.keys()))
        populations = [d[E] for E in energies]
        occ_energies = [E for E in energies if E <= occupied_up_to]
        occ_populations = [d[E] for E in occ_energies]
        unocc_energies = [E for E in energies if E > occupied_up_to]
        unocc_populations = [d[E] for E in unocc_energies]
        if smearing:
            occ_populations = gaussian_filter1d(occ_populations, smearing)
            unocc_populations = gaussian_filter1d(unocc_populations, smearing)
        ax = plt.plot(occ_populations, occ_energies, color=color, label=label, alpha=0.9, lw=dos_lw)
        ax = plt.plot(unocc_populations, unocc_energies, color=color, label='__nolegend__', alpha=0.9, lw=dos_lw) 
        ax = plt.fill_betweenx(occ_energies, occ_populations, color=color, alpha=0.2, lw=0)
    ax = plt.xticks(xticks[1])
    ax = plt.yticks(yticks[1])
    if not xticks[0]:
        ax = plt.gca().xaxis.set_ticklabels([])      
    if not yticks[0]:
        ax = plt.gca().yaxis.set_ticklabels([])
    ax = plt.xlabel(xlabel)
    ax = plt.ylabel(ylabel)
    ax = plt.xlim(xlim)
    ax = plt.ylim(ylim)
    if zero_line in ['horizontal', 'both']:
        ax = plt.plot(xlim, [0, 0], lw=1, ls='--', color='black')
    if zero_line in ['vertical', 'both']:
        ax = plt.plot([0, 0], ylim, lw=1, ls='--', color='black')      
    if legend:
        ax = plt.legend(loc='upper right')
    if show:
        plt.show()
    return ax
Пример #47
0
def gaussian_smooth(signal, sigma):
    return gaussian_filter1d(signal, sigma)
Пример #48
0
def initialize_flowlines(gdir, div_id=None):
    """ Transforms the geometrical Centerlines in the more "physical"
    "Inversion Flowlines".

    This interpolates the centerlines on a regular spacing (i.e. not the
    grid's (i, j) indices. Cuts out the tail of the tributaries to make more
    realistic junctions. Also checks for low and negative slopes and corrects
    them by interpolation.

    Parameters
    ----------
    gdir : oggm.GlacierDirectory
    """

    # variables
    if div_id == 0 and not gdir.has_file('centerlines', div_id=div_id):
        # downstream lines haven't been computed
        return

    cls = gdir.read_pickle('centerlines', div_id=div_id)

    poly = gdir.read_pickle('geometries', div_id=div_id)
    poly = poly['polygon_pix'].buffer(0.5)  # a small buffer around to be sure

    # Initialise the flowlines
    dx = cfg.PARAMS['flowline_dx']
    do_filter = cfg.PARAMS['filter_min_slope']
    lid = int(cfg.PARAMS['flowline_junction_pix'])
    fls = []

    # Topo for heights
    fpath = gdir.get_filepath('gridded_data', div_id=div_id)
    with netCDF4.Dataset(fpath) as nc:
        topo = nc.variables['topo_smoothed'][:]

    # Bilinear interpolation
    # Geometries coordinates are in "pixel centered" convention, i.e
    # (0, 0) is also located in the center of the pixel
    xy = (np.arange(0, gdir.grid.ny - 0.1,
                    1), np.arange(0, gdir.grid.nx - 0.1, 1))
    interpolator = RegularGridInterpolator(xy, topo)

    # Smooth window
    sw = cfg.PARAMS['flowline_height_smooth']

    for ic, cl in enumerate(cls):
        points = line_interpol(cl.line, dx)

        # For tributaries, remove the tail
        if ic < (len(cls) - 1):
            points = points[0:-lid]

        new_line = shpg.LineString(points)

        # Interpolate heights
        xx, yy = new_line.xy
        hgts = interpolator((yy, xx))
        assert len(hgts) >= 5

        # Check where the glacier is and where not
        isglacier = [poly.contains(shpg.Point(x, y)) for x, y in zip(xx, yy)]
        if div_id != 0:
            assert np.all(isglacier)

        # If smoothing, this is the moment
        hgts = gaussian_filter1d(hgts, sw)

        # Check for min slope issues and correct if needed
        if do_filter:
            # Correct only where glacier
            nhgts = _filter_small_slopes(hgts[isglacier], dx * gdir.grid.dx)
            isfin = np.isfinite(nhgts)
            assert np.any(isfin)
            perc_bad = np.sum(~isfin) / len(isfin)
            if perc_bad > 0.8:
                log.warning('{}: more than {:.0%} of the flowline is cropped '
                            'due to negative slopes.'.format(
                                gdir.rgi_id, perc_bad))

            hgts[isglacier] = nhgts
            sp = np.min(np.where(np.isfinite(nhgts))[0])
            while len(hgts[sp:]) < 5:
                sp -= 1
            hgts = utils.interp_nans(hgts[sp:])
            isglacier = isglacier[sp:]
            assert np.all(np.isfinite(hgts))
            assert len(hgts) >= 5
            new_line = shpg.LineString(points[sp:])

        l = Centerline(new_line, dx=dx, surface_h=hgts, is_glacier=isglacier)
        l.order = cl.order
        fls.append(l)

    # All objects are initialized, now we can link them.
    for cl, fl in zip(cls, fls):
        if cl.flows_to is None:
            continue
        fl.set_flows_to(fls[cls.index(cl.flows_to)])

    # Write the data
    gdir.write_pickle(fls, 'inversion_flowlines', div_id=div_id)
Пример #49
0
def extrema(input_array,
            extrema_type='min',
            output_mask=None,
            smooth_sigma=None,
            search_nt=None):
    """ Detects and returns extrema positions in genome-shaped arrays.

        Searches input_array (assumed to be genome-shaped) for local maxima or minima (see
        extrema_type) and returns an array of positions as [[strand, nt_pos]....] as well
        as the value at that position.
        
        Parameters:
        ----------
        input_array : numpy array of shape (2, len(genome))
            Local extrema are found and recorded from this array.

        extrema_type : 'min' (default) or 'max'
            Type of extrema to search for.
        
        output_mask : None (default) or boolean array of same shape as input_array
            If None, function extrema at all positions are considered and returned. Otherwise, user
            defined mask is used and only positions set to True are returned.

        smooth_sigma : None (default) or sigma scipy.ndimage.filters.gaussian_filter1d (float) 
            If None, no smoothing is conducted. If a float, this value is passed to the gaussian
            smoothing function as the sigma value for smoothing.

        search_nt : None (default) or int
            Distance to search (in both directions) for true extrema. Useful if smoothing 
            arguements have been passed as search is conducted on original (not smoothed)
            array.

        Returns:
        ----------
        positions : positions of extrema, numpy array of shape (n extrema, 2)
            Found extrema positions are returned with first column denoting strand and second column
            denoting genomic position.
        
        values : values of extrema on input_array, numpy array of shape (n extrema,)
            Potentially useful for downstream sorting of events by value.
    """
    if smooth_sigma is None:  # if without smoothing, use input array as search array
        search_array = input_array.copy()
    else:  # otherwise pass smooth_sigma to gaussian_filter1d as the sigma
        search_array = gaussian_filter1d(input_array, smooth_sigma)
    # search for extrema
    if extrema_type == 'min':
        extrema_pos = np.asarray(argrelmin(search_array, axis=1))
    elif extrema_type == 'max':
        extrema_pos = np.asarray(argrelmax(search_array, axis=1))
    else:
        raise ValueError('Unhandled extrema type.')
    # further refine placement of extrema on original, unsmoothed array using regionfunc if search_nt is not None
    if search_nt is not None:
        if extrema_type == 'min':
            # check surrounding regions (+/- search_nt) for any lower/higher extrema positions
            relative_extrema = ga.regionfunc(np.argmin,
                                             extrema_pos.T,
                                             input_array,
                                             addl_nt=(search_nt, search_nt),
                                             wrt='genome')
        elif extrema_type == 'max':
            relative_extrema = ga.regionfunc(np.argmax,
                                             extrema_pos.T,
                                             input_array,
                                             addl_nt=(search_nt, search_nt),
                                             wrt='genome')
        else:
            raise ValueError('Unhandled extrema type.')
        extrema_pos = np.asarray([
            extrema_pos.T[:, 0],
            extrema_pos.T[:, 1] + relative_extrema - search_nt
        ])  # convert relative extrema to actual genomic position
    # get values for position on original input_array, prepare for final output
    positions = extrema_pos.T
    values = input_array[tuple(extrema_pos)]
    # remove any extrema which are in False positions on output_mask
    if output_mask is not None:
        positions = positions[output_mask[tuple(extrema_pos)]]
        values = values[output_mask[tuple(extrema_pos)]]
    return positions, values
Пример #50
0
def load_mt(path):
    x = np.load(path, allow_pickle=True)[()]['x']
    y = np.load(path, allow_pickle=True)[()]['y']

    y = gaussian_filter1d(y, sigma=3)
    return trim(x, y, 2000)
Пример #51
0
plt.savefig('figs\long2_stacked_2020.png')

#%%
long2 = long.groupby(['time','data'])['points'].mean().unstack()
long2.sort_values(long2.columns.max()).plot(kind='barh',stacked=False,figsize=(15,20), colormap='autumn')
plt.savefig('figs\long2_2020.png')

#%%
long3 = long[long['data'] == max(long['data'])]
times = long3['time'].unique()
from scipy.ndimage.filters import gaussian_filter1d

fig= plt.figure(figsize=(15,10))

for t in times:
    sublong = long3[long3['time'] == t]
    ysmoothed = gaussian_filter1d(sublong.chance, sigma=3)
    plt.plot(sublong.pos,ysmoothed,label=t)
plt.legend(loc=1,ncol=5,fontsize='medium')
plt.xticks(np.arange(1, 21, 1))
plt.grid()
plt.xlim(1,20)
plt.ylim(0)
plt.ylabel('Chances (%)')
plt.xlabel('Position')
plt.title('Chances for each team falling into x^th position at the end of the Brasileirão')
plt.savefig('figs\long3_2020.png')


# %%
Пример #52
0
manager = BAPHYExperiment(batch=batch, cellid=site)
rec = manager.get_recording(**{
    'rasterfs': rasterfs,
    'pupil': True,
    'resp': True
})
rec['resp'] = rec['resp'].rasterize()
# extract epochs
soundfile = '/auto/users/hellerc/code/baphy/Config/lbhb/SoundObjects/@NaturalSounds/sounds_set4/00cat668_rec7_ferret_oxford_male_chopped_excerpt1.wav'
epoch = 'STIM_00Oxford_male2b.wav'
r = rec['resp'].extract_epoch(epoch)
p = rec['pupil'].extract_epoch(epoch)

fs = rasterfs
psth = sf.gaussian_filter1d(r.mean(axis=(0, 1)), sigma) * fs
psth1 = sf.gaussian_filter1d(r[rep1, :, :].mean(axis=0), sigma) * fs
psth2 = sf.gaussian_filter1d(r[rep2, :, :].mean(axis=0), sigma) * fs
spk_times1 = np.where(r[rep1, :, :])
spk_times2 = np.where(r[rep2, :, :])
mean_pupil1 = p[rep1].mean(axis=-1).squeeze()
mean_pupil2 = p[rep2].mean(axis=-1).squeeze()

# psths
time = np.linspace(-2, (psth.shape[0] / rasterfs) - 2, psth.shape[0])
p1ax.plot(time, psth, color='grey', lw=1)
p1ax.plot(time, psth1, color='firebrick', lw=1)

p2ax.plot(time, psth, color='grey', lw=1)
p2ax.plot(time, psth2, color='navy', lw=1)
Пример #53
0
def estimate_gaussian_peak(self, x_axis, data, params):
    """ Provides a gaussian offset peak estimator.

    @param numpy.array x_axis: 1D axis values
    @param numpy.array data: 1D data, should have the same dimension as x_axis.
    @param lmfit.Parameters params: object includes parameter dictionary which
                                    can be set

    @return tuple (error, params):

        Explanation of the return parameter:
            int error: error code (0:OK, -1:error)
            Parameters object params: set parameters of initial values
    """

    error = self._check_1D_input(x_axis=x_axis, data=data, params=params)

    # If the estimator is not good enough one can start improvement with
    # a convolution

    # auxiliary variables
    stepsize = abs(x_axis[1] - x_axis[0])
    n_steps = len(x_axis)

    # Smooth the provided data, so that noise fluctuations will not disturb the
    # parameter estimation. This value performs the best in many scenarios:
    std_dev = 2
    data_smoothed = filters.gaussian_filter1d(data, std_dev)

    # Define constraints:
    # maximal and minimal the length of the given array to the right and to the
    # left:
    center_min = (x_axis[0]) - n_steps * stepsize
    center_max = (x_axis[-1]) + n_steps * stepsize
    ampl_min = 0
    sigma_min = stepsize
    sigma_max = 3 * (x_axis[-1] - x_axis[0])

    # set parameters:
    offset = data_smoothed.min()
    params['offset'].set(value=offset)

    # it is more reliable to select the maximal value rather then
    # calculating the first moment of the gaussian distribution (which is the
    # mean value), since it is unreliable if the distribution begins or ends at
    # the edges of the data (but it helps a lot for standard deviation):
    mean_val_calc = np.sum(x_axis * data_smoothed) / np.sum(data_smoothed)
    params['center'].set(value=x_axis[np.argmax(data_smoothed)],
                         min=center_min,
                         max=center_max)

    # calculate the second moment of the gaussian distribution:
    #   int (x^2 * f(x) dx) :
    mom2 = np.sum((x_axis)**2 * data_smoothed) / np.sum(data_smoothed)

    # and use the standard formula to obtain the standard deviation:
    #   sigma^2 = int( (x - mean)^2 f(x) dx ) = int (x^2 * f(x) dx) - mean^2

    # If the mean is situated at the edges of the distribution then this
    # procedure performs better then setting the initial value for sigma to
    # 1/3 of the length of the distribution since the calculated value for the
    # mean is then higher, which will decrease eventually the initial value for
    # sigma. But if the peak values is within the distribution the standard
    # deviation formula performs even better:
    params['sigma'].set(value=np.sqrt(abs(mom2 - mean_val_calc**2)),
                        min=sigma_min,
                        max=sigma_max)
    # params['sigma'].set(value=(x_axis.max() - x_axis.min()) / 3.)

    # Do not set the maximal amplitude value based on the distribution, since
    # the fit will fail if the peak is at the edges or beyond the range of the
    # x values.
    params['amplitude'].set(value=data_smoothed.max() - data_smoothed.min(),
                            min=ampl_min)

    return error, params
Пример #54
0
    pixels[2, 10:19] = oblue  # Set 3rd pixel blue      
    #YELLOW
    pixels[0, 20:29] = yred  # Set 1st pixel red
    pixels[1, 20:29] = ygreen # Set 2nd pixel green
    pixels[2, 20:29] = yblue  # Set 3rd pixel blue 
    #GREEN
    pixels[0, 30:39] = gred  # Set 1st pixel red
    pixels[1, 30:39] = ggreen # Set 2nd pixel green
    pixels[2, 30:39] = gblue  # Set 3rd pixel blue   
    #BLUE
    pixels[0, 40:49] = bred  # Set 1st pixel red
    pixels[1, 40:49] = bgreen # Set 2nd pixel green
    pixels[2, 40:49] = bblue  # Set 3rd pixel blue 
    #PURPLE
    pixels[0, 50:55] = pred  # Set 1st pixel red
    pixels[1, 50:55] = pgreen # Set 2nd pixel green
    pixels[2, 50:55] = pblue  # Set 3rd pixel blue  
    #BLEND RED
    pixels[0, 56:60] = rred  # Set 1st pixel red
    pixels[1, 56:60] = rgreen # Set 2nd pixel green
    pixels[2, 56:60] = rblue  # Set 3rd pixel blue   
    print('Starting Dynamic Rainbow LED pattern')
    # Apply substantial blur to smooth the edges
    pixels[0, :] = gaussian_filter1d(pixels[0, :], sigma=4.0)
    pixels[1, :] = gaussian_filter1d(pixels[1, :], sigma=4.0)
    pixels[2, :] = gaussian_filter1d(pixels[2, :], sigma=4.0)
    while True:
        pixels = np.roll(pixels, 1, axis=1)
        update()
        time.sleep(0.05)
Пример #55
0
def CountTheOnes(inputArray):
    CountOfOnes = 0
    iterationPlace = 0
    consecutiveOnes  = 0
    
    while iterationPlace < len(inputArray):
        if inputArray[iterationPlace] == 1:
            consecutiveOnes = consecutiveOnes + 1
            iterationPlace = iterationPlace + 1
        elif inputArray[iterationPlace] == -1:
            if consecutiveOnes >= 6:
                CountOfOnes = CountOfOnes + 1
            consecutiveOnes = 0
            iterationPlace = iterationPlace + 1
    return CountOfOnes

arr_1 = [255,253,255,262,253,252,252,250,262,258,254,258,252,256,258,253,252,252,254,256,251,253,257,256,230,303,327,424,512,416,185,103,136,303,569,746,471,247,231,570,592,637,397,182,115,209,485,680,639,316,270,217,247,457,484,638,447,218,119,177,404,617,706,346,277,238,217,447,525,691,415,182,127,177,394,615,688,326,258,267,337,470,533,612,338,197,124,191,498,629,612,328,233,227,236,501,534,594,386,173,126,173,329,614,579,386,265,213,196,311,571,543,596,258,151,122,156,397,632,613,368,229,210,247,460,478,580,487,241,150,143,286,541,742,556,420,345,295,544,633,651,480,224,126,102,236,668,600,493,340,238,222,182,474,550,639,426,204,126,149,355,581,666,390,251,260,213,373,498,588,486,235,139,152,343,575,504,396,291,243,256,323,588,591,588,275,149,119,169,477,631,587,412,251,260,258,453,562,607,476,235,127,122,328,570,686,485,287,266,263,413,528,643,470,198,156,197,385,518,602,512,331,299,438,583,699,502,242,210,192,260,541,735,592,371,312,369,507,646,553,307,153,134,246,531,666,476,282,296,293,418,535,618,422,194,138,175,403,590,635,370,258,257,206,382,564,636,465,198,145,152,364,556,678,404,274,256,306,539,583,606,372,181,139,199,524,605,662,370,273,294,319,570,596,575,287,153,137,214,520,645,633,338,289,261,351,490,595,567,251,192,163,285,563,720,351,273,359,573,401,326,188,183,146,326,221,300,247,299,256,514,249,255,263,258,248,251,256,256,256,247,255,257,249,249,252,257,256,257,251,254,259,256,256,255,252,253,259,253,254,256,254,250,256,254,257,258,249,254,253,261,252,250,251,252,258,255,250,253,255,256,257,255,255,255,254,256,253,252,259,255,254,257,253,256,255,252,253,257,256,254,252,256,255,254,257,259,252,252,255,254,257,257,254,251,253,258,257,254,253,257,252,253,255,257,252,253,255,253,255,253,256,253,256,255,251,254,255,253,254,257,252,258,254,256,254,255,253,258,255,253,255,256,256,254,258,257,254,256,257,257,255,255,253,255,256,255,256,253,255,257,253,258,256,255,254,257,254,254,256,256,254,253,253,258,257,254,255]
#GetData = getData('48_steps.csv')
GetData = get_Data()
GetStartAndStop, standerDV, meanSD = StartingandStopingPointFinder(GetData)
GetSlop = getSlop(gaussian_filter1d(GetStartAndStop, sigma = 3))
GetCount = CountTheOnes(GetSlop)
print('steps count is:', GetCount)

#plt.plot(GetData)
#plt.plot(x_1)
#plt.plot(GetSlop)
#plt.show()


Пример #56
0
ndet, nsamples = np.shape(dd)
#### Selection of detectors for which the signal is obvious
good_dets = [37, 45, 49, 70, 77, 90, 106, 109, 110]
best_det = 37
theTES = best_det
###### TOD Example #####
TimeSigPlot(time, dd, theTES)

###### TOD Power Spectrum #####
frange = [0.3, 15]  #range of plot frequencies desired
filt = 5
spectrum, freq = mlab.psd(dd[theTES, :],
                          Fs=FREQ_SAMPLING,
                          NFFT=nsamples,
                          window=mlab.window_hanning)
filtered_spec = f.gaussian_filter1d(spectrum, filt)

FreqResp(freq, frange, filtered_spec, theTES, fff)

###NEW PLOT WITH FILTERED OVERPLOT
#### Filtering out the signal from the PT
freqs_pt = [1.72383, 3.24323, 3.44727, 5.69583, 6.7533, 9.64412, 12.9874]
bw_0 = 0.005

notch = ft.notch_array(freqs_pt, bw_0)

FiltFreqResp(theTES, frange, fff, filt, dd, notch, FREQ_SAMPLING, nsamples,
             freq, spectrum, filtered_spec)

############################################################################
### Fold the data at the modulation period of the fibers
Пример #57
0
def plot_overlay_psth(rec_dir,
                      unit,
                      din_map,
                      plot_window=[-1500, 2500],
                      bin_size=250,
                      bin_step=25,
                      dig_ins=None,
                      smoothing_width=3,
                      save_file=None):
    '''
    Plots overlayed PSTHs for all tastants or a specified subset

    Parameters
    ----------
    rec_dir: str
    unit: int
    plot_window: list of int, time window for plotting in ms
    bin_size: int, window size for binning spikes in ms
    bin_step: int, step size for binning spikes in ms
    dig_ins: list of int (optional)
        which digital inputs to plot PSTHs for, None (default) plots all
    save_file: str (optional), full path to save file, if None, saves in Overlay_PSTHs subfolder
    '''
    if isinstance(unit, str):
        unit = dio.h5io.parse_unit_number(unit)

    if dig_ins is None:
        dig_ins = din_map.query('spike_array==True').channel.values

    if save_file is None:
        save_dir = os.path.join(rec_dir, 'Overlay_PSTHs')
        save_file = os.path.join(save_dir, 'Overlay_PSTH_unit%03d' % unit)
        if not os.path.isdir(save_dir):
            os.mkdir(save_dir)

    fig, ax = plt.subplots(figsize=(20, 15))
    for din in dig_ins:
        name = din_map.query('channel==@din').name.values[0]
        time, spike_train = dio.h5io.get_spike_data(rec_dir, unit, din)
        psth_time, fr = sas.get_binned_firing_rate(time, spike_train, bin_size,
                                                   bin_step)

        mean_fr = np.mean(fr, axis=0)
        sem_fr = sem(fr, axis=0)

        t_idx = np.where((psth_time >= plot_window[0])
                         & (psth_time <= plot_window[1]))[0]
        psth_time = psth_time[t_idx]
        mean_fr = mean_fr[t_idx]
        sem_fr = sem_fr[t_idx]
        mean_fr = gaussian_filter1d(mean_fr, smoothing_width)

        ax.fill_between(psth_time,
                        mean_fr - sem_fr,
                        mean_fr + sem_fr,
                        alpha=0.3)
        ax.plot(psth_time, mean_fr, linewidth=3, label=name)

    ax.set_title('Peri-stimulus Firing Rate Plot\nUnit %i' % unit, fontsize=34)
    ax.set_xlabel('Time (ms)', fontsize=28)
    ax.set_ylabel('Firing Rate (Hz)', fontsize=28)
    plt.xticks(fontsize=18)
    plt.yticks(fontsize=18)
    ax.autoscale(enable=True, axis='x', tight=True)
    ax.legend(loc='best')
    ax.axvline(0, color='red', linestyle='--')
    fig.savefig(save_file)
    plt.close('all')
Пример #58
0
def feat_cutoff(feat, spks_per_bin=20, sigma=5, min_num_bins=50):
    '''
    Computes the approximate fraction of spikes missing from a spike feature distribution for a
    given unit, assuming the distribution is symmetric.

    Inspired by metric described in Hill et al. (2011) J Neurosci 31: 8699-8705.

    Parameters
    ----------
    feat : ndarray
        The spikes' feature values.
    spks_per_bin : int (optional)
        The number of spikes per bin from which to compute the spike feature histogram.
    sigma : int (optional)
        The standard deviation for the gaussian kernel used to compute the pdf from the spike
        feature histogram.
    min_num_bins : int (optional)
        The minimum number of bins used to compute the spike feature histogram.

    Returns
    -------
    fraction_missing : float
        The fraction of missing spikes (0-0.5). *Note: If more than 50% of spikes are missing, an
        accurate estimate isn't possible.
    pdf : ndarray
        The computed pdf of the spike feature histogram.
    cutoff_idx : int
        The index for `pdf` at which point `pdf` is no longer symmetrical around the peak. (This
        is returned for plotting purposes).

    See Also
    --------
    plot.feat_cutoff

    Examples
    --------
    1) Determine the fraction of spikes missing from unit 1 based on the recorded unit's spike
    amplitudes, assuming the distribution of the unit's spike amplitudes is symmetric.
        # Get unit 1 amplitudes from a unit bunch, and compute fraction spikes missing.
        >>> feat = units_b['amps']['1']
        >>> fraction_missing = bb.plot.feat_cutoff(feat)
    '''

    # Ensure minimum number of spikes requirement is met.
    error_str = 'The number of spikes in this unit is {0}, ' \
                'but it must be at least {1}'.format(len(feat), spks_per_bin * min_num_bins)
    assert (len(feat) > (spks_per_bin * min_num_bins)), error_str

    # compute the spike feature histogram and pdf:
    num_bins = int(len(feat) / spks_per_bin)
    hist, bins = np.histogram(feat, num_bins, density=True)
    pdf = filters.gaussian_filter1d(hist, sigma)

    # Find where the distribution stops being symmetric around the peak:
    peak_idx = np.argmax(pdf)
    max_idx_sym_around_peak = np.argmin(np.abs(pdf[peak_idx:] - pdf[0]))
    cutoff_idx = peak_idx + max_idx_sym_around_peak

    # compute fraction missing from the tail of the pdf (the area where pdf stops being
    # symmetric around peak).
    fraction_missing = np.sum(pdf[cutoff_idx:]) / np.sum(pdf)
    fraction_missing = 0.5 if (fraction_missing > 0.5) else fraction_missing

    return fraction_missing, pdf, cutoff_idx
Пример #59
0
                    for sn in range(len(o)):
                        nP -= putDown(sn, nP)
                        on_ = pickUp(lastPickUp[sn], int(t / 60), sn)

                        if (nP + on_) < vehicleCapacity:
                            nP += on_
                            totalPassengers += on_
                        else:
                            totalPassengers += (vehicleCapacity - nP)
                            nP = vehicleCapacity

                        lastPickUp[sn] = int(t / 60)
                        t += time2Next[sn]
                    t0 += frequency * 60
                y.append(totalPassengers)
            y = fil.gaussian_filter1d(y, 10)
            if tn == 0:
                plt.plot(range(1, 30),
                         y,
                         c=clrs[vehicleCapacity],
                         label=str(vehicleCapacity) +
                         ' people capacity\n100% take up')
            elif tn == 1:
                plt.plot(range(1, 30),
                         y,
                         ls='--',
                         c=clrs[vehicleCapacity],
                         label=str(vehicleCapacity) +
                         ' people capacity\n50% take up')
            else:
                plt.plot(range(1, 30),
Пример #60
0
                               index_col=0)
data_to_plot_la5 = pd.read_csv('resultfile_lookahead_vijf',
                               sep=',',
                               index_col=0)
data_to_plot_la6 = pd.read_csv('resultfile_lookahead_zes',
                               sep=',',
                               index_col=0)

averages = [
    data_to_plot_la2.Stability.mean(),
    data_to_plot_la3.Stability.mean(),
    data_to_plot_la4.Stability.mean(),
    data_to_plot_la5.Stability.mean(),
    data_to_plot_la6.Stability.mean()
]
ysmoothed = gaussian_filter1d(averages, sigma=1)

lookaheads = [2, 3, 4, 5, 6]

# Set interval on x-axis
#plt.xticks(np.arange(min(data["Stability"]), max(data["Stability"])+1, 1.0))

# Name labels
plt.xlabel('Number of steps')
plt.ylabel('Stability')
#plt.xticks(np.arange(0,6, 1.0))
plt.xticks([2, 3, 4, 5, 6], ['2', '3', '4', '5', '6'])

# Name title
plt.title('Lookahead algorithm')
plt.plot([2, 3, 4, 5, 6], ysmoothed)