Example #1
0
def slice_jackknife(z, zmin=0.02, zmax=0.5, cube_cmpc_depth=150, dz=0.001):
	z1 = z[(z >= zmin) & (z <= zmax)]
	pdf = np.histogram(z1, bins=np.linspace(zmin, zmax, int((zmax - zmin)/dz)))
	fp = pdf[0] / float(pdf[0].sum())
	cdf = np.cumsum(fp)
	zm = pdf[1][:-1]

	nbin = 10
	fracs = np.linspace(0., 1., nbin + 1)
	ze = np.interp(fracs, cdf, zm)
	min_depth = np.diff(cmpc(ze) * 0.7).min()
	while np.float32(min_depth) <= cube_cmpc_depth:
		nbin -= 1
		fracs = np.linspace(0., 1., nbin + 1)
		ze1 = np.interp(fracs, cdf, zm)
		min_depth = np.diff(cmpc(ze1) * 0.7).min()
		ze = ze1
		#print('nbin = %i'%nbin)
		#print(cmpc(ze) * 0.7)
		#print(np.diff(cmpc(ze) * 0.7))
		#print('\n')
	new_z_edge = np.concatenate(([zmin], ze[1:-1], [zmax]))
	print('nbin = %i'%(len(new_z_edge) - 1))
	print('z-edges:')
	print(new_z_edge)
	print('cMpc diffs:')
	print(np.diff(cmpc(new_z_edge) * 0.7))
	return new_z_edge
Example #2
0
def log_diff_exp(x, axis=0):
    """ Calculates the logarithm of the diffs of e to the power of input 'x'. The method tries to avoid
        overflows by using the relationship: log(diff(exp(x))) = alpha + log(diff(exp(x-alpha))).
        
    :Parameter:
        x:    data.
             -type: float or numpy array 
          
        axis: Sums along the given axis.
             -type: int
        
    :Return:
        Logarithm of the sum of exp of x. 
       -type: float or numpy array.
        
    """
    alpha = x.max(axis) - numx.log(numx.finfo(numx.float64).max)/2.0
    if axis == 1:
        return numx.squeeze(alpha + numx.log(
                                             numx.diff(
                                                       numx.exp(x.T - alpha)
                                                       , n=1, axis=0)))
    else:
        return numx.squeeze(alpha + numx.log(
                                             numx.diff(
                                                       numx.exp(x - alpha)
                                                       , n=1, axis=0)))
Example #3
0
def summarize_features_mfcc(mfccs, v=False):
    """
    Given mfcc matrix, return summary for a window
    :param mfccs: NxM matrix
        mfcc matrix
    :param i_start: int
        index for beginning of window
    :param i_end: int
        index for end of window
    :return: 1xL array
        feature vector
    """

    # Summarize features
    features = np.max(mfccs, axis=1)
    features = np.append(features, np.mean(mfccs, axis=1))
    features = np.append(features, np.std(mfccs, axis=1))
    d_mfccs = np.diff(mfccs, axis=1)
    features = np.append(features, np.mean(d_mfccs, axis=1))
    features = np.append(features, np.std(d_mfccs, axis=1))
    d_d_mfccs = np.diff(d_mfccs, axis=1)
    features = np.append(features, np.mean(d_d_mfccs, axis=1))
    features = np.append(features, np.std(d_d_mfccs, axis=1))

    # print np.shape(d_d_mfccs)
    # print np.shape(features)
    return np.reshape(features, (1, len(features)))
Example #4
0
    def draw(self, event):
        """Draw the widget

        Parameters
        ----------
        event : instance of Event
            The draw event.
        """
        super(Console, self).draw(event)
        if event is None:
            raise RuntimeError('Event cannot be None')
        xform = event.get_full_transform()
        tr = (event.document_to_framebuffer *
              event.framebuffer_to_render)
        logical_scale = np.diff(tr.map(([0, 1], [1, 0])), axis=0)[0, :2]
        tr = event.document_to_framebuffer
        log_to_phy = np.mean(np.diff(tr.map(([0, 1], [1, 0])), axis=0)[0, :2])
        n_pix = (self.font_size / 72.) * 92.  # num of pixels tall
        # The -2 here is because the char_height has a gap built in
        font_scale = max(n_pix / float((self._char_height-2)), 1)
        self._resize_buffers(font_scale)
        self._do_pending_writes()
        self._program['u_origin'] = xform.map((0, 0, 0, 1))
        self._program.prepare()
        self._program['u_logical_scale'] = font_scale * logical_scale
        self._program['u_color'] = self.text_color.rgba
        self._program['u_physical_scale'] = font_scale * log_to_phy
        self._program['a_position'] = self._position
        self._program['a_bytes_012'] = VertexBuffer(self._bytes_012)
        self._program['a_bytes_345'] = VertexBuffer(self._bytes_345)
        set_state(depth_test=False, blend=True,
                  blend_func=('src_alpha', 'one_minus_src_alpha'))
        self._program.draw('points')
Example #5
0
  def compute_metric(self):

    gfloprate = 0
    if self.ts.pmc_type == 'amd64' :
      gfloprate += self.arc(self.ts.data[0])
      
    if self.ts.pmc_type == 'intel_hsw':
      # print "Haswell chips do not have FLOP counters"
      return

    if self.ts.pmc_type == 'intel_snb':
      schema = self.ts.j.get_schema('intel_snb')
      if 'ERROR' in schema: return
      data = self.ts.j.aggregate_stats('intel_snb')

      try:
        flops = numpy.diff(data[0][:,schema['SSE_DOUBLE_SCALAR'].index] + 2*data[0][:,schema['SSE_DOUBLE_PACKED'].index] + 
                           4*data[0][:,schema['SIMD_DOUBLE_256'].index])/numpy.diff(self.ts.t)
      except: 
        flops = numpy.diff(data[0][:,schema['SSE_D_ALL'].index] + 4*data[0][:,schema['SIMD_D_256'].index])/numpy.diff(self.ts.t)

      flops = flops/data[1]

    self.metric = tmean(flops)/1.0e9

    return
Example #6
0
def grad_kappa_plot(env, model, obj_index, which='x', with_contours=False, only_contours=False, clevels=30, with_colorbar=True):
    obj, data = model['obj,data'][obj_index]

    R = obj.basis.mapextent

    grid = obj.basis.kappa_grid(data)
    grid = grid.copy()

    kw = default_kw(R)
    kw['vmin'] = -1
    kw['vmax'] =  2

    if not only_contours:
        print '!!!!!!', grid.shape
        if which == 'x': grid = np.diff(grid, axis=1)
        if which == 'y': grid = np.diff(grid, axis=0)
        print '!!!!!!', grid.shape
        pl.matshow(grid, **kw)
        if with_colorbar: 
            glspl.colorbar()

    if with_contours:
        kw.pop('cmap')
        pl.over(contour, grid, clevels, extend='both', colors='k', alpha=0.7, **kw)

    pl.xlabel('arcsec')
    pl.ylabel('arcsec')
def getOmega(dels):    
#    for k in range(1,dels.delta_d.shape[0])
    N = dels.delta_d.shape[1]
    delta_t = dels.delta_t
    delta_d = dels.delta_d
    
    a_t = np.diff(delta_t)
    a_t = a_t[:,0:-1]
    
    a_d = np.diff(delta_t[:,::-1])
    a_d = a_d[:,::-1]
    a_d = a_d[:,1::]
    
    b_t = np.diff(delta_d)
    b_t = b_t[:,0:-1]
    
    b_d = np.diff(delta_d[:,::-1])
    b_d = b_d[:,::-1]
    b_d = b_d[:,1::]    
    
    c_t = 0.25*(np.abs(a_t)+np.abs(b_t))*np.sign(a_t)*np.sign(b_t)*(np.sign(a_t)*np.sign(b_t)-1)
    c_d = 0.25*(np.abs(a_d)+np.abs(b_d))*np.sign(a_d)*np.sign(b_d)*(np.sign(a_d)*np.sign(b_d)-1)
    Omega = 1.0/(2*N)*(c_t.mean(axis=0) + c_d.mean(axis=0))

    return Omega
Example #8
0
def EN_CID(y):
    """
    CID measure from Batista, G. E. A. P. A., Keogh, E. J., Tataw, O. M. & de
    Souza, V. M. A. CID: an efficient complexity-invariant distance for time
    series. Data Min Knowl. Disc. 28, 634-669 (2014).
    
    Arguments
    ---------

    y: a nitime time-series object, or numpy vector

    """

    # Make the input a row vector of numbers:
    y = makeRowVector(vectorize(y))

    # Prepare the output dictionary
    out = {}
    
     # Original definition (in Table 2 of paper cited above)
    out['CE1'] = np.sqrt(np.mean(np.power(np.diff(y),2))); # sum -> mean to deal with non-equal time-series lengths

    # Definition corresponding to the line segment example in Fig. 9 of the paper
    # cited above (using Pythagoras's theorum):
    out['CE2'] = np.mean(np.sqrt(1 + np.power(np.diff(y),2)));

    return out
    def test_mean_std_12bit(self):
        # Input 12-bit, with an 8-bit color target
        input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
        color_target = np.tile(np.arange(256)[:, None, None], (1, 1, 3))

        luts = hm.mean_std_luts(input_scene.astype(np.uint16),
                                color_target.astype(np.uint8))

        np.testing.assert_array_equal(luts[0], luts[1])
        np.testing.assert_array_equal(luts[1], luts[2])

        lut = luts[0]
        assert np.all(lut[:8] == 0)
        assert np.all(lut[-8:] == 4096)
        assert np.diff(lut[8:-8]).min() == 1
        assert np.diff(lut[8:-8]).max() == 2

        # Input 12-bit, with a 12-bit color target
        input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
        color_target = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))

        luts = hm.mean_std_luts(input_scene.astype(np.uint16),
                                color_target.astype(np.uint16))

        # Should be a 1 to 1 look-up-table...
        np.testing.assert_array_equal(luts[0], np.arange(4097))
Example #10
0
def hist_mask(data, threshold=.95, keep='lower'):
    """
    Returns boolean mask of values below a frequency percentage threshold (0-1).

    Args:
        -data (1D array)
        -threshold (float): 0-1
        -keep (str): lower, greater, middle. If middle, threshold is ignored,
                     and a single cluster is searched out.
    """

    bins = len(data)/100 if keep.lower() == 'middle' else len(data) / 2
    freq, val = np.histogram(data, bins=bins)
    freq = freq / np.sum(freq).astype(float)  # Normalize frequency data

    if keep.lower() in ('lower', 'upper'):
        cutoff_value = val[np.where(np.diff(np.cumsum(freq) < threshold))[0] + 1]
        cutoff_value = val[1] if len(cutoff_value)==0 else cutoff_value
        if keep.lower() == 'lower':
            return data < cutoff_value
        else:
            return data > cutoff_value
    else:
        histmask = np.ones(data.shape[0], dtype=bool)  # Initializing mask with all True values

        # Slowly increment the parameter until a strong single central cluster is found
        for param in np.arange(0.0005, .02, .0003):
            cutoff_values = val[np.where(np.diff(freq < param))[0]]
            if len(cutoff_values) == 2:
                histmask &= data > cutoff_values[0]
                histmask &= data < cutoff_values[1]
                return histmask
        else:
            return data > -100000.  # Return an all-true mask
            print("Warning: Histogram filter not finding a good parameter to form a central cluster. Please try again.")
Example #11
0
 def sample_line_segment_mm_s(start_xy_mm, end_xy_mm, dt_s, mW=None, max_mm=5.0):
     """ Given a line segment in mm space, map it to galvo space.
         To make the line straight in mm space, samples may be added to 
         more-closely approximate a straight line.
         Returns: An array of shape nx3 (if mW is None) or nx4 (if mW is not None) 
                     of points time deltas in mm and seconds,
                     excluding start_xy_mm and including end_xy_mm,
                     possibly including samples along the way.
     """
     import FLP
     from numpy.linalg import norm
     dist_mm = norm(np.asarray(end_xy_mm) - start_xy_mm)
     if dist_mm <= max_mm:
         if mW is None:
             return np.array((tuple(end_xy_mm) + (dt_s,),)) # Just the end sample.
         else:
             return np.array((tuple(end_xy_mm) + (dt_s, mW),)) # Just the end sample.
     samples_s = np.linspace(0, dt_s, np.ceil(dist_mm / max_mm) + 1)
     timeRange_s = (0, dt_s)
     if mW is None:
         return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
                              np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
                              np.diff(samples_s)])
     else:
         return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
                              np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
                              np.diff(samples_s),
                              mW * np.ones_like(samples_s[1:])])
def findspikes(t,x,dxdt):
    """Searching for spikes in a TODL LTC2442 data series. The algorithm
searches for the given threshold. If it is found and the subsequent
data exceeds the threshold as well with a negative sign its defined as
a spike

    Args:
        t: time 
        x: data
        dxdt: Threshold for rejection, a working rejection for FP07 is 0.1 [V/s]

    """
    #print('Despiking')
    dt = np.diff(t)
    dx = np.diff(x)
    spikes = np.zeros(np.shape(t))
    for i in range(1,len(dt)-1):
        dxdt1 = dx[i]/dt[i]
        dxdt2 = dx[i+1]/dt[i+1]
        if(abs(dxdt1) > dxdt):
            if(abs(dxdt1) > dxdt):
                if(np.sign(dxdt1) == -np.sign(dxdt2)):
                    spikes[i+1] = 1
                    
    #print('Done despiking')
    return spikes
Example #13
0
def get_resampling_matrix(global_grid,local_grid):
    """Build the rectangular matrix that linearly resamples from the global grid to a local grid.

    The local grid range must be contained within the global grid range.

    Args:
        global_grid(numpy.ndarray): Sorted array of n global grid wavelengths.
        local_grid(numpy.ndarray): Sorted array of m local grid wavelengths.

    Returns:
        numpy.ndarray: Array of (m,n) matrix elements that perform the linear resampling.
    """
    assert np.all(np.diff(global_grid) > 0),'Global grid is not strictly increasing.'
    assert np.all(np.diff(local_grid) > 0),'Local grid is not strictly increasing.'
    # Locate each local wavelength in the global grid.
    global_index = np.searchsorted(global_grid,local_grid)
    assert local_grid[0] >= global_grid[0],'Local grid extends below global grid.'
    assert local_grid[-1] <= global_grid[-1],'Local grid extends above global grid.'
    # Lookup the global-grid bracketing interval (xlo,xhi) for each local grid point.
    # Note that this gives xlo = global_grid[-1] if local_grid[0] == global_grid[0]
    # but this is fine since the coefficient of xlo will be zero.
    global_xhi = global_grid[global_index]
    global_xlo = global_grid[global_index-1]
    # Create the rectangular interpolation matrix to return.
    alpha = (local_grid - global_xlo)/(global_xhi - global_xlo)
    local_index = np.arange(len(local_grid),dtype=int)
    matrix = np.zeros((len(local_grid),len(global_grid)))
    matrix[local_index,global_index] = alpha
    matrix[local_index,global_index-1] = 1 - alpha
    return matrix
Example #14
0
    def _get_ladder_adjustment(self, time, betas0, ratios):
        """
        Execute temperature adjustment according to dynamics outlined in
        `arXiv:1501.05823 <http://arxiv.org/abs/1501.05823>`_.

        """

        # Some sanity checks on the ladder...
        assert np.all(np.diff(betas0) < 1), \
                'Temperatures should be in ascending order.'
        assert betas0[0] == 1, \
                'Bottom temperature should be 1.'

        betas = betas0.copy()

        # Modulate temperature adjustments with a hyperbolic decay.
        decay = self.adaptation_lag / (time + self.adaptation_lag)
        kappa = decay / self.adaptation_time

        # Construct temperature adjustments.
        dSs = kappa * (ratios[:-1] - ratios[1:])

        # Compute new ladder (hottest and coldest chains don't move).
        deltaTs = np.diff(1 / betas[:-1])
        deltaTs *= np.exp(dSs)
        betas[1:-1] = 1 / (np.cumsum(deltaTs) + 1 / betas[0])

        assert np.all(np.diff(betas) < 0), \
                'Temperatures not correctly ordered following temperature dynamics: {:}'.format(betas)

        # Don't mutate the ladder here; let the client code do that.
        return betas - betas0
Example #15
0
 def __init__(self,turn,elem,single,name,s,x,xp,y,yp,pc,de,tau,**args):
     apc=float(pc[0])*1e9
     ade=float(de[0])
     self.m0=self.pmass
     en=np.sqrt(apc**2+self.pmass**2)
     self.e0=en-ade
     self.p0c=np.sqrt(self.e0**2-self.m0**2)
     # structure
     self.elem=np.array(elem,dtype=int)
     self.turn=np.array(turn,dtype=int)
     d0=np.where(np.diff(self.elem)!=0)[0][0]+1
     d1=(np.where(np.diff(self.turn)!=0)[0][0]+1)/d0
     d2=len(self.turn)/d1/d0
     self.single=np.array(single,dtype=int)
     self.name=np.array(name,dtype=str)
     self.s =np.array(s ,dtype=float)
     self.x =np.array(x ,dtype=float)
     self.y =np.array(y ,dtype=float)
     self.tau=-np.array(tau,dtype=float)*self.clight
     opd=np.array(pc,dtype=float)*(1e9/self.p0c)
     self.delta=opd-1
     self.pt=np.array(de,dtype=float)/self.p0c
     self.px=np.array(xp,dtype=float)*opd
     self.py=np.array(yp,dtype=float)*opd
     for nn,vv in self.__dict__.items():
         if hasattr(vv,'__len__') and len(vv)==d0*d1*d2:
             setattr(self,nn,vv.reshape(d2,d1,d0))
def find_zero_crossings(laplacian_of_img):
    result = np.zeros((laplacian_of_img.shape[0], laplacian_of_img.shape[1]), dtype=np.int)
    # Array indicating if values are positive or negative
    image_array_signs = np.sign(laplacian_of_img)
    
    # Difference along xaxis
    xdiff = np.diff(image_array_signs, axis=1)
    xzero_crossings = np.where(xdiff)
    # Output of where gives two arrays...combine the result to obtain [x,y] coordinate pairs
    xzero_crossings = np.dstack((xzero_crossings[0], xzero_crossings[1]))[0]
        
    #difference along yaxis
    ydiff = np.diff(image_array_signs, axis=0)
    yzero_crossings = np.where(ydiff)
    # Output of where gives two arrays...combine the result to obtain [x,y] coordinate pairs
    yzero_crossings = np.dstack((yzero_crossings[0], yzero_crossings[1]))[0]

    xzero_crossings_rows = xzero_crossings.view([('', xzero_crossings.dtype)] * xzero_crossings.shape[1])
    yzero_crossings_rows = yzero_crossings.view([('', yzero_crossings.dtype)] * yzero_crossings.shape[1])
    # Obtain the tuples of xzero_crossings which are not found in yzero_crossings
    diff = np.setdiff1d(xzero_crossings_rows, yzero_crossings_rows).view(xzero_crossings_rows.dtype).reshape(-1, xzero_crossings_rows.shape[1])

    # The format of diff cannot be used in append due to different "shape" of yzero_crossings and diff.
    diff_formatted = []
    for index in range(0, len(diff)):
        diff_formatted.append(diff[index][0]) 
    diff_a, diff_b = zip(*diff_formatted)
    difference_result = np.dstack((diff_a, diff_b))[0]

    # Append the zero crossings inside yzero_crossings with the remaining x,y coordinates
    zero_crossings = np.append(yzero_crossings, difference_result, axis=0)
    for tuple in zero_crossings:
        result[tuple[0], tuple[1]] = 120
    return result
Example #17
0
def zero_crossings(y_axis, window = 11):
    """
    Algorithm to find zero crossings. Smoothens the curve and finds the
    zero-crossings by looking for a sign change.


    keyword arguments:
    y_axis -- A list containg the signal over which to find zero-crossings
    window -- the dimension of the smoothing window; should be an odd integer
        (default: 11)

    return -- the index for each zero-crossing
    """
    # smooth the curve
    length = len(y_axis)
    x_axis = np.asarray(range(length), int)

    # discard tail of smoothed signal
    y_axis = _smooth(y_axis, window)[:length]
    zero_crossings = np.where(np.diff(np.sign(y_axis)))[0]
    indices = [x_axis[index] for index in zero_crossings]

    # check if zero-crossings are valid
    diff = np.diff(indices)
    if diff.std() / diff.mean() > 0.2:
        print diff.std() / diff.mean()
        print np.diff(indices)
        raise(ValueError,
            "False zero-crossings found, indicates problem {0} or {1}".format(
            "with smoothing window", "problem with offset"))
    # check if any zero crossings were found
    if len(zero_crossings) < 1:
        raise(ValueError, "No zero crossings found")

    return indices
Example #18
0
    def _process(self, matrix, key=None):

        if len(matrix.vdims) != 1:
            raise ValueError("Input matrix to gradient operation must "
                             "have single value dimension.")

        matrix_dim = matrix.vdims[0]

        data = matrix.data
        r, c = data.shape

        if  matrix_dim.cyclic and (None in matrix_dim.range):
            raise Exception("Cyclic range must be specified to compute "
                            "the gradient of cyclic quantities")
        cyclic_range = None if not matrix_dim.cyclic else np.diff(matrix_dim.range)
        if cyclic_range is not None:
            # shift values such that wrapping works ok
            data = data - matrix_dim.range[0]

        dx = np.diff(data, 1, axis=1)[0:r-1, 0:c-1]
        dy = np.diff(data, 1, axis=0)[0:r-1, 0:c-1]

        if cyclic_range is not None: # Wrap into the specified range
            # Convert negative differences to an equivalent positive value
            dx = dx % cyclic_range
            dy = dy % cyclic_range
            #
            # Prefer small jumps
            dx_negatives = dx - cyclic_range
            dy_negatives = dy - cyclic_range
            dx = np.where(np.abs(dx_negatives)<dx, dx_negatives, dx)
            dy = np.where(np.abs(dy_negatives)<dy, dy_negatives, dy)

        return Image(np.sqrt(dx * dx + dy * dy), matrix.bounds, group=self.p.group)
Example #19
0
def _decorate_contour_segment(data, stride=1, options={}, tomax=True, labelled=False, outline=None, aspect=1):
	default_options = {'scale': 0.2,
			'scale_units': 'dots',
			'headaxislength': 2,
			'headlength': 2,
			'headwidth': 2,
			'minshaft': 1,
			'units': 'dots',
			#'angles': 'xy',
			'edgecolor': outline,
			'linewidth': 0 if outline is None else 0.2
		}
	default_options.update(options)

	x = data[::stride,0]
	y = data[::stride,1]

	sign = 1 if tomax else -1
	dx = -sign*np.diff(y)*aspect
	dy = sign*np.diff(x)
	l = np.sqrt(dx**2+dy**2)
	dx /= l
	dy /= l

	x = 0.5*(x+np.roll(x,-1))
	y = 0.5*(y+np.roll(y,-1))

	if labelled:
		x,y,dx,dy = x[1:-2], y[1:-2], dx[1:-1], dy[1:-1]
	else:
		x,y = x[:-1], y[:-1]

	plt.quiver(x, y, dx, dy, **default_options)
Example #20
0
  def plot_thist(self,ax,index,xscale=1.0,yscale=1.0,xlabel='',ylabel='',
                 do_rate=False):
    d=[]
    for k in self.ts.j.hosts.keys():
      v=self.ts.assemble(index,k,0)
      if do_rate:
        d.append(numpy.divide(numpy.diff(v),numpy.diff(self.ts.t)))
      else:
        d.append((v[:-1]+v[1:])/2.0)
    a=numpy.array(d)

    h=[]
    mn=numpy.min(a)
    mn=min(0.,mn)
    mx=numpy.max(a)
    n=float(len(self.ts.j.hosts.keys()))
    for i in range(len(self.ts.t)-1):
      hist=numpy.histogram(a[:,i],30,(mn,mx))
      h.append(hist[0])

    h2=numpy.transpose(numpy.array(h))

    ax.pcolor(self.ts.t/xscale,hist[1]/yscale,h2,
              edgecolors='none',rasterized=True,cmap='spectral')
    self.setlabels(ax,self.ts,index,xlabel,ylabel,yscale)
    ax.autoscale(tight=True)
Example #21
0
def _find_smoothest2(xk, yk):
    N = len(xk)-1
    Np1 = N+1
    # find pseudo-inverse of B directly.
    Bd = np.empty((Np1,N))
    for k in range(-N,N):
        if (k<0):
            l = np.arange(-k,Np1)
            v = (l+k+1)
            if ((k+1) % 2):
                v = -v
        else:
            l = np.arange(k,N)
            v = N-l
            if ((k % 2)):
                v = -v
        _setdiag(Bd,k,v)
    Bd /= (Np1)
    V2 = np.ones((Np1,))
    V2[1::2] = -1
    V2 /= math.sqrt(Np1)
    dk = np.diff(xk)
    b = 2*np.diff(yk)/dk
    J = np.zeros((N-1,N+1))
    idk = 1.0/dk
    _setdiag(J,0,idk[:-1])
    _setdiag(J,1,-idk[1:]-idk[:-1])
    _setdiag(J,2,idk[1:])
    A = dot(J.T,J)
    val = dot(V2,dot(A,V2))
    res1 = dot(np.outer(V2,V2)/val,A)
    mk = dot(np.eye(Np1)-res1,dot(Bd,b))
    return mk 
Example #22
0
def _compute_divided_differences(xvals, fvals, N=None, full=True, forward=True):
    """Return a matrix of divided differences for the xvals, fvals pairs

    DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i

    If full is False, just return the main diagonal(or last row):
      f[a], f[a, b] and f[a, b, c].
    If forward is False, return f[c], f[b, c], f[a, b, c]."""
    if full:
        if forward:
            xvals = np.asarray(xvals)
        else:
            xvals = np.array(xvals)[::-1]
        M = len(xvals)
        N = M if N is None else min(N, M)
        DD = np.zeros([M, N])
        DD[:, 0] = fvals[:]
        for i in range(1, N):
            DD[i:, i] = np.diff(DD[i - 1:, i - 1]) / (xvals[i:] - xvals[:M - i])
        return DD

    xvals = np.asarray(xvals)
    dd = np.array(fvals)
    row = np.array(fvals)
    idx2Use = (0 if forward else -1)
    dd[0] = fvals[idx2Use]
    for i in range(1, len(xvals)):
        denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]
        row = np.diff(row)[:] / denom
        dd[i] = row[idx2Use]
    return dd
def create_grid_polygons(x,y):
    """
    Creates a list of grid polygons (rectangles) in well-known text (WKT) format from evenly spaced x and y vectors.

    Args:
        x (1d numpy array): vector of x-values
        y (1d numpy array): vector of y-values

    Returns:
        list: grid polygons in WKT format
    """
    import numpy as np
    import pdb

    xdiff = np.diff(x)
    if np.std(xdiff)>1e-10:
        raise ValueError('Uneven longitude spacing.')
    dx = np.mean(xdiff)

    ydiff = np.diff(y)
    if np.std(ydiff)>1e-10:
        raise ValueError('Uneven latitude spacing.')
    dy = np.mean(ydiff)

    logger.debug('Spacing is ({},{})'.format(dx,dy))
    xmatr,ymatr = np.meshgrid(x,y)

    rows = []
    for (i,j),x_ij in np.ndenumerate(xmatr):
        y_ij = ymatr[i,j]
        x1,y1 = x_ij-dx/2.,y_ij-dy/2.
        x2,y2 = x_ij+dx/2.,y_ij+dy/2.
        rows.append((i,j,x_ij,y_ij,'POLYGON(({x1} {y1},{x1} {y2},{x2} {y2},{x2} {y1},{x1} {y1}))'.format(x1=x1,y1=y1,x2=x2,y2=y2)))

    return rows
Example #24
0
def xCoordinates(sobel_img):
    num_rows = float(len(sobel_img)) # get number of x values

    # sum along y axis
    vert_sum = np.sum(sobel_img,axis=0)
    # make it an average value (divide by # of x values)
    vert_sum = np.divide(vert_sum,num_rows)

    x = np.arange(0,len(vert_sum)) # for graphing
    xnew = np.arange(0,len(vert_sum),50) # for smoothing
    #smooth
    y_smooth = spline(x, vert_sum, xnew)

    #make a sin curve 1/3 of the width of image
    img_width, img_height = sobel_img.shape
    z = np.arange(0,int(img_width/3),1)
    def f(x):
        return np.sin(x/90)*-15 + 25

    f = [f(i) for i in z] # make sine into an array

    # convolve sine and the vertical sum
    y_conv = np.convolve(vert_sum, f,'same')

    # detect local minima
    mins = (np.diff(np.sign(np.diff(y_conv))) > 0).nonzero()[0] + 1

    return mins
Example #25
0
def yCoordinates(sobel_img):
    num_col = float(len(sobel_img[0])) #number of y values
    # sum along x axis
    horiz_sum = np.sum(sobel_img, axis=1)
    #average value
    horiz_sum = np.divide(horiz_sum, num_col)

    y = np.arange(0, len(horiz_sum))
    ynew = np.arange(0, len(horiz_sum))
    x_smooth = spline(y, horiz_sum, ynew)

    #make a sin curve 1/3 of the height
    img_width, img_height = sobel_img.shape
    z = np.arange(0,int(img_height/3),1)
    def f(x):
        return np.sin(x/90)*-15 + 25

    f = [f(i) for i in z] # make sine into an array

    # convolve sine and the vertical sum
    y_conv = np.convolve(horiz_sum, f,'same')

    # detect local minima
    mins = (np.diff(np.sign(np.diff(y_conv))) > 0).nonzero()[0] + 1

    return mins
Example #26
0
    def _buildInterp(self, x, y, z, pot):
        """ Private function to build interpolation arrays using potential
        array `pot`. Assumes that only the positive part of z is in the array,
        so reflects the array in the (x, y) plane.
        """
        self.xmin = x[0]
        self.xmax = x[-1]
        self.ymin = y[0]
        self.ymax = y[-1]
        self.zmin = -z[-1]
        self.zmax = z[-1]

        # Field in negative z direction. Reverse the order in this axis.
        potNeg = pot[...,-1:0:-1]
        # Concatenate positive and negative z direction arrays.
        _z = np.hstack((-z[-1:0:-1], z))
        _pot = np.dstack((potNeg, pot))

        self.bInterpolator = Interpolator((x, y, _z), _pot)

        # Build difference derivative arrays
        self.dx = x[1]-x[0]
        self.dy = y[1]-y[0]
        self.dz = z[1]-z[0]
        dbdx = np.diff(_pot, axis=0)/self.dx
        dbdy = np.diff(_pot, axis=1)/self.dy
        dbdz = np.diff(_pot, axis=2)/self.dz
        x_dbdx = x[:-1]+self.dx/2
        y_dbdy = y[:-1]+self.dy/2
        z_dbdz = _z[:-1]+self.dz/2

        self.dBdxInterp = Interpolator((x_dbdx, y, _z), dbdx)
        self.dBdyInterp = Interpolator((x, y_dbdy, _z), dbdy)
        self.dBdzInterp = Interpolator((x, y, z_dbdz), dbdz)
Example #27
0
	def check_obs_scheme(self):
		" Checks the internal validity of provided observation schemes "

		# check sub_pops
		idx_union = np.sort(self._sub_pops[0])
		i = 1
		while idx_union.size < self._p and i < len(self._sub_pops):
			idx_union = np.union1d(idx_union, self._sub_pops[i]) 
			i += 1
		if idx_union.size != self._p or np.any(idx_union!=np.arange(self._p)):
			raise Exception(('all subpopulations together have to cover '
			'exactly all included observed varibles y_i in y.'
			'This is not the case. Change the difinition of '
			'subpopulations in variable sub_pops or reduce '
			'the number of observed variables p. '
			'The union of indices of all subpopulations is'),
			idx_union )

		# check obs_time
		if not self._obs_time[-1]==self._T:
			raise Exception(('Entries of obs_time give the respective ends of '
							'the periods of observation for any '
							'subpopulation. Hence the last entry of obs_time '
							'has to be the full recording length. The last '
							'entry of obs_time before is '), self._obs_time[-1])

		if np.any(np.diff(self._obs_time)<1):
			raise Exception(('lengths of observation have to be at least 1. '
							'Minimal observation time for a subpopulation: '),
							np.min(np.diff(self._obs_time)))

		# check obs_pops
		if not self._obs_time.size == self._obs_pops.size:
			raise Exception(('each entry of obs_pops gives the index of the '
							'subpopulation observed up to the respective '
							'time given in obs_time. Thus the sizes of the '
							'two arrays have to match. They do not. '
							'no. of subpop. switch points and no. of '
							'subpopulations ovserved up to switch points '
							'are '), (self._obs_time.size, self._obs_pops.size))

		idx_pops = np.sort(np.unique(self._obs_pops))
		if not np.min(idx_pops)==0:
			raise Exception(('first subpopulation has to have index 0, but '
							'is given the index '), np.min(idx_pops))
		elif not idx_pops.size == len(self._sub_pops):
			raise Exception(('number of specified subpopulations in variable '
							'sub_pops does not meet the number of '
							'subpopulations indexed in variable obs_pops. '
							'Delete subpopulations that are never observed, '
							'or change the observed subpopulations in '
							'variable obs_pops accordingly. The number of '
							'indexed subpopulations is '),
							len(self._sub_pops))
		elif not np.all(np.diff(idx_pops)==1):
			raise Exception(('subpopulation indices have to be consecutive '
							'integers from 0 to the total number of '
							'subpopulations. This is not the case. '
							'Given subpopulation indices are '),
							idx_pops)
Example #28
0
def test_power():
    a = 5.  # shape
    samples = 10000
    s1 = np.random.power(a, samples)
    s2 = common.rand_pow_array(a, samples)

    plt.figure('power test')
    count1, bins1, ignored1 = plt.hist(s1,
                                       bins=30,
                                       label='numpy',
                                       histtype='step')
    x = np.linspace(0, 1, 100)
    y = a * x**(a - 1.0)
    normed_y1 = samples * np.diff(bins1)[0] * y
    plt.plot(x, normed_y1, label='numpy.random.power fit')

    count2, bins2, ignored2 = plt.hist(s2,
                                       bins=30,
                                       label='joinmarket',
                                       histtype='step')
    normed_y2 = samples * np.diff(bins2)[0] * y
    plt.plot(x, normed_y2, label='common.rand_pow_array fit')
    plt.title('testing power distribution')
    plt.legend(loc='upper left')
    plt.show()
def get_blotter_pnl(order_qty, filled_qty, filled_price, cum_position, data, drawdown):
    #import pdb; pdb.set_trace()
    mid = midpoint(data)
    cash = np.sum(filled_qty * filled_price) * (-1.0)
    open_cash = cum_position[-1] * mid[-1]
    pnl = cash + open_cash
    pnl_t = np.cumsum(cum_position[:-1] * np.diff(mid))
    spread = np.cumsum((mid - filled_price) * filled_qty)
    pnl_t = spread[1:] + pnl_t
    assert abs(pnl - pnl_t[-1]) < 0.01

    running_max = np.maximum.accumulate(pnl_t)
    idx = np.where(pnl_t - running_max < drawdown)[0]
    if len(idx) > 0:
        stop_idx = np.min(idx)
        cum_position[(stop_idx+1):] = 0.0
        pnl_t = np.cumsum(cum_position[:-1] * np.diff(mid))
        order_qty[(stop_idx+1):] = 0.0
        filled_qty[(stop_idx+1):] = 0.0
        spread = np.cumsum((mid - filled_price) * filled_qty)
        pnl_t = spread[1:] + pnl_t

    order_volume = np.sum(np.abs(order_qty))
    trade_volume = np.sum(np.abs(filled_qty))

    
    result = np.array([(pnl_t[-1], np.min(pnl_t), np.max(pnl_t),
                        np.min(cum_position), np.max(cum_position), trade_volume,
                        order_volume, trade_volume * 1.0 / order_volume)],
                      dtype = [('total_pnl', 'f'), ('min_pnl', 'f'),
                               ('max_pnl', 'f'), ('min_position', int),
                               ('max_position', int), ('volume', int),
                               ('order_volume', int), ('fill_ratio', float)])
    return result
Example #30
0
def test_volatility():
    # Test volatility
    G = np.zeros([3, 3, 3])
    G[0, 1, [0, 1, 2]] = 1
    G[0, 2, 1] = 1
    G[1, 2, 2] = 1
    G = G + G.transpose([1, 0, 2])
    # global volatility
    v_global = teneto.networkmeasures.volatility(G)
    # v volatility per time point
    v_time = teneto.networkmeasures.volatility(G, calc='time')
    v_tr = np.array([2/6, 4/6])
    if not v_global == np.mean(v_tr):
        raise AssertionError()
    if not all(v_time == v_tr):
        raise AssertionError()
    # event displacement
    v_er_tr = np.array([0, 2/6, 2/6])
    v_er = teneto.networkmeasures.volatility(
        G, calc='event_displacement', event_displacement=0)
    if not all(v_er == v_er_tr):
        raise AssertionError()
    # vol per node
    v_edge = teneto.networkmeasures.volatility(G, calc='edge')
    if not np.all(v_edge == np.mean(np.abs(np.diff(G)), axis=-1)):
        raise AssertionError()
    v_node = teneto.networkmeasures.volatility(G, calc='node')
    if not np.all(v_node == np.mean(
        np.mean(np.abs(np.diff(G)), axis=-1), axis=-1)):
        raise AssertionError()
                                      default_a=np.power(10, peak_thresh / 20))
    #print('freqs',dph)
    #print('amps',a)
    res.append((X_, ph, a, dph, da))

# synthesize results
(Xk, phk, ak, dphk, dak) = res[10]
(Xk1, phk1, ak1, dphk1, dak1) = res[11]
ph = cat_to_cols(phk, phk1)
a = cat_to_cols(ak, ak1)
dph = cat_to_cols(dphk, dphk1)
da = cat_to_cols(dak, dak1)
amp, th = pa.synth_peaks(ph, a, dph, da, return_th_a=True)
amp_trans, th_trans = pa.synth_peaks(ph,
                                     a,
                                     dph * resynth_ptrans,
                                     da,
                                     return_th_a=True)
n = np.arange(pa.H)
fig, ax = plt.subplots(3, 1)
ax[0].plot(n, th[0, :], label='orig')
ax[0].plot(n, th_trans[0, :], label='trans')
ax[0].legend()
ax[1].plot(n[:-1], np.diff(th[0, :]), label='orig')
ax[1].plot(n[:-1], np.diff(th_trans[0, :]), label='trans')
ax[1].legend()
xdata = np.arange(N_FFT / 2 + 1) * sr / N_FFT
ax[2].plot(xdata, 20 * np.log10(np.abs(Xk)))
ax[2].scatter(dph[0, 0] / (2 * np.pi) * sr, 20 * np.log10(a[0, 0]))
plt.show()
Example #32
0
def extract_3D_grids(fields, verbosity=False):
    '''Extract grids from ultra fields and data
    Assume fieldnames of a specific format
    Assume constant grid for data with suppressed zeros.
    Return grids'''

    # Extract first two grids from headers
    # keys of the form: "kTe1.00 hNu11.124198, sig_C(nu->nu',T_e)/sig_{Th} [1/keV] vs hNu' [keV]" with comma optional
    Tgrid = np.unique([float_comma(key.split()[0][3:]) for key in fields])
    Efromgrid = np.unique([float_comma(key.split()[1][3:]) for key in fields])

    # Extract last grid from data
    # grid stored in leftmost index of data array
    Etogrid = np.unique(np.concatenate([dat[0, :] for dat in fields.values()]))

    # Make MG boundary grid
    G = max(Efromgrid.size, Etogrid.size)
    # because they're printed differently,
    # no guarantee Efrom and Eto have the same grid
    Eavg = Efromgrid if (G == Efromgrid.size) else Etogrid
    #
    # having no information about group bounds, try even spacing
    reldE = (Eavg[1:] - Eavg[:-1]) / np.maximum(Eavg[1:], Eavg[:-1])
    absdE = np.diff(Eavg)
    relvar = np.sqrt(np.var(reldE) / (G - 1))
    absvar = np.sqrt(np.var(absdE) / (G - 1)) / np.mean(absdE)
    if relvar < 1e-4:
        if verbosity:
            print('Trying evenly-log-spaced grid')
        reldEavg = 1.0 - np.power(Eavg[-1] / Eavg[0], -1.0 / (G - 1))
        E0 = Eavg[0] * np.sqrt(1 - reldEavg)
        EG = Eavg[-1] / np.sqrt(1 - reldEavg)
        E0 = float('{:.1e}'.format(E0))
        EG = float('{:.1e}'.format(EG))
        Ebdrgrid = np.geomspace(E0, EG, G + 1)
        EavgCheck = np.sqrt(Ebdrgrid[1:] * Ebdrgrid[:-1])
    elif absvar < 1e-2:
        if verbosity:
            print('Trying evenly-linearly-spaced grid')
        absdEavg = (Eavg[-1] - Eavg[0]) / (G - 1)
        E0 = max(0, Eavg[0] - absdEavg / 2)
        EG = Eavg[-1] + absdEavg / 2
        E0 = float('{:.1e}'.format(E0))
        EG = float('{:.1e}'.format(EG))
        Ebdrgrid = np.linspace(E0, EG, G + 1)
        EavgCheck = 0.5 * (Ebdrgrid[1:] + Ebdrgrid[:-1])
    else:
        EavgCheck = np.zeros(G)
    #
    # If heuristic spacing is bad, fall back on more general spacing
    reldiff = np.max(np.abs(Eavg - EavgCheck) / np.maximum(EavgCheck, 0.1))
    absdiff = np.max(np.abs(Eavg - EavgCheck)) / np.mean(Eavg)
    if verbosity:
        print('Trying')
        print(Ebdrgrid)
        print('with errors {} and {}'.format(reldiff, absdiff))
    if reldiff > 1e-3 or absdiff > 2e-3:
        if verbosity:
            print('Falling back on self-consistent grid')
        d2 = np.diff(Eavg) / 2
        dE = np.zeros(G)
        dE[1:] += d2
        dE[:-1] += d2
        d2[0] = min(Eavg[0], d2[0])
        dE[0] += d2[0]
        dE[-1] += d2[-1]
        Ebdrgrid = np.zeros(G + 1)
        Ebdrgrid[0] = Eavg[0] - d2[0]
        Ebdrgrid[1:] = dE
        Ebdrgrid = np.cumsum(Ebdrgrid)

    # Check self-consistency of grids
    assert np.all(np.logical_and(Eavg < Ebdrgrid[1:], Eavg > Ebdrgrid[:-1]))

    # Make dictionary and handle corner case of length-1 grids
    grids = {'T': Tgrid, 'Efrom': Efromgrid, 'Eto': Etogrid, 'Ebdr': Ebdrgrid}
    for key in grids:
        # Corner case
        try:
            len(grids[key])
        except TypeError:
            grids[key] = grids[key] * np.ones(1)
        # Print extracted grids
        if verbosity:
            print(key)
            print(grids[key])

    # Return dictionary of grids
    return grids
def analyze_dts(ts_dump, errs):
    
    dts_old = np.zeros([0])
    a = 0
    b = 0
    jump_size = list()
    exc_steps = list()

    print "\nAnalyzing dt..."
    
    for k in range(len(ts_dump)):

        dts = errors.analyze_timestamps(ts_dump[str(k)].astype(np.uintc), np.asarray(errs[k]).astype(np.uintc))

        dts = np.append(dts_old, dts)
        dts_old = dts

        a += len(np.where(np.diff(ts_dump[str(k)]) < 0)[0])
        
        all_dt = np.diff(ts_dump[str(k)])
        Exc_steps = np.where(all_dt > 60E4)[0]
        exc_steps.append(Exc_steps)

        b += len(Exc_steps)
        exc_ssize = np.mean(all_dt[Exc_steps])

        if np.isnan(exc_ssize):
            exc_ssize = 0

            if k == 0:
                fig2 = plt.figure()
                plt.hist(all_dt[all_dt < 60E4]*qutau.get_timebase()*1E9, bins = 100, color = 'orange', histtype = 'stepfilled')
                plt.title('Histogram for dt, without the excessive jumps.')
                plt.xlabel('dt (ns)')
                plt.ylabel('Clicks')
                configure_axes()
                plt.show()

        else:
            jump_size.append(exc_ssize)
            
            fig2 = plt.figure()
            plt.hist(all_dt[all_dt < 60E4]*qutau.get_timebase()*1E9, bins = 100, color = 'red')
            plt.title('Excessive jumps excluded (dump %d).'%k)
            plt.xlabel('dt (ns)')
            plt.ylabel('Clicks')
            configure_axes()
           
            fig3 = plt.figure()
            plt.hist(all_dt[all_dt > 60E4]*qutau.get_timebase()*1E6, bins = 100, color = 'red')
            plt.title('Excessive jumps only (dump %d).'%k)
            plt.xlabel('dt (ms)')
            plt.ylabel('Clicks')
            configure_axes()

            
    print "Found %d negative jumps and %d excessive possitive jumps. The mean positive jump stepsize of the excessive jumps is %.4e"\
            %(a, b, np.mean(jump_size)*qutau.get_timebase())
        
            
    return dts, exc_steps
Example #34
0
def plot_NormGFP_Horizon(lcells, num_levels, fileName,frame_signal_start,frame_signal_end,frame_experiment_start):
    
    fig, axarr = plt.subplots(len(lcells),1, sharex=True,figsize=(12,len(lcells)))
    #f.subplots_adjust(vspace=0)
    
    for iax, this_cell in enumerate(lcells):
        
        if len(lcells)>1:
            ax=axarr[iax]
        else:
            ax = axarr

       
        ax.axvline(x=frame_signal_start,color='r',linestyle='dashed',zorder=3)
        ax.axvline(x=frame_signal_end,color='b',linestyle='dashed',zorder=3)
        
        
        gfps=[x for x in this_cell['GFP']]
        time=this_cell['roiFrames']
        #print(time,len(time))
        #print(gfps,len(gfps))
        #print(frame_signal_start-frame_experiment_start)
        
        gfpsm=[]
        for i,t in enumerate(time):
            if(t<frame_signal_start-frame_experiment_start):
                gfpsm.append(gfps[i])
        #print(gfpsm)
        gfpsm=np.mean(gfpsm)
        relativeIntensity=np.add(-0.,gfps)/gfpsm-1
       # print(relativeIntensity)
       
        if len(time)>1:

            #ax.plot(time, np.add(0.5,.5*relativeIntensity), 'k-', alpha=.5, linewidth=1)
            ax.fill_between([time[0], time[-1]], 0., 1., color='y', alpha=0.2)

            zero_crossings = np.where(np.diff(np.sign(relativeIntensity)))[0]
            
            zero_crossings=np.hstack((0,zero_crossings, len(relativeIntensity)-1))
            i0=0
            
            
            for ix in zero_crossings:
                try:
                    if relativeIntensity[ix]<0:
                        if ix<len(time)-1 and ix>1:
                            tN=np.interp(0.,[relativeIntensity[ix],relativeIntensity[ix+1]],[time[ix],time[ix+1]])
                            yN=0
                        else:
                            tN=time[-1]
                            yN=relativeIntensity[-1]

                        if i0>1:
                            t0=np.interp(0.,[relativeIntensity[i0],relativeIntensity[i0-1]],[time[i0],time[i0-1]])
                            y0=0.
                        else:
                            t0=time[0]
                            y0=relativeIntensity[0]
                        if ix>0:
                            xtimes=np.hstack((t0,time[i0:ix+1],tN))
                            xrelativeIntensity=np.hstack((y0, relativeIntensity[i0:ix+1], yN))

                            for this_level in range(0,num_levels):
                                ax.fill_between(xtimes, num_levels*this_level+1, np.add(1+this_level,num_levels*xrelativeIntensity), color='r', alpha=1/num_levels)
                    else:   
                        if ix<len(time)-1 :
                            tN=np.interp(0.,[relativeIntensity[ix+1],relativeIntensity[ix]],[time[ix+1],time[ix]])
                            yN=0
                        else:
                            tN=time[-1]
                            yN=relativeIntensity[-1]

                        if i0>1:
                            t0=np.interp(0.,[relativeIntensity[i0-1],relativeIntensity[i0]],[time[i0-1],time[i0]])
                            y0=0
                        else:
                            t0=time[0]
                            y0=relativeIntensity[0]

                        if ix>0:
                            xtimes=np.hstack((t0,time[i0:ix+1],tN))

                            xrelativeIntensity=np.hstack((y0, relativeIntensity[i0:ix+1], yN))

                            for this_level in range(0,num_levels):
                                ax.fill_between(xtimes, -num_levels*this_level, np.add(-this_level,num_levels*xrelativeIntensity), color='g', alpha=1/num_levels)
                except IndexError:
                    continue
                i0=ix+1
                


        #ax.plot([0,np.max(time)],[0.5,0.5],'k:', linewidth=1, alpha=0.5)
        #ax.plot([0,np.max(time)],[1,1],'k-', linewidth=1, alpha=0.5)
        #ax.plot([0,np.max(time)],[-0,-0],'k-', linewidth=1, alpha=0.5)

        ax.text(frame_experiment_start-2,.5,'Cell %s'%this_cell['trackID'],FontSize=14, verticalalignment='center',horizontalalignment='right')
        ax.set_yticks([])

        ax.set_ylim([0,1])
        ax.set_xlim([frame_experiment_start, np.max(time)])
        
        if iax==len(lcells)-1:
            ax.set_xlabel('Time (frames)',FontSize=14)
            ax.set_xticks(np.arange(frame_experiment_start,np.max(time),10))
        else:
            ax.set_xlabel('',FontSize=16)
            ax.set_xticks([])
    plt.savefig(fileName)
Example #35
0
def PMTPulseFinder(ev,
                    base_samples=np.intp(80),
                    amp_gains=np.array([1, 1], dtype=np.float64),
                    threshold=np.float64(4)):

    # default output
    default_output = dict(
        ch=np.int32([-1]),
        iTrace=np.int32([-1]),
        baseline=np.float64([-1]),
        baserms=np.float64([-1]),
        pulse_area=np.float64([-1]),
        pulse_height=np.float64([-1]),
        pulse_istart=np.int32([-1]),
        pulse_iend=np.int32([-1]),
        pulse_ipeak=np.int32([-1])
    )

    if not (ev['PMTtraces']['loaded'] and
            (ev['PMTtraces']['t0_frac'].shape[0] > 0)
            ):
        return default_output

    # ADC to voltage with lost points removed
    ls = ev['PMTtraces']['lost_samples'][:, 0]  # two channels are identical
    lost_samples_min = np.min(ls[ls > 0])
    pmtV, satBool = scale(ev['PMTtraces']['v_offset'],
                          ev['PMTtraces']['v_scale'],
                          ev['PMTtraces']['traces'],
                          -127, 126, lost_samples_min, amp_gains)

    out = default_output

    if base_samples > lost_samples_min:
        base_samples = np.intp(lost_samples_min * .5)

    baseline = np.mean(pmtV[:, :base_samples], axis=1)
    baserms = np.sqrt(np.var(pmtV[:, :base_samples], axis=1))

#    out['PMT_t0_sec'] = ev['PMTtraces']['t0_sec'][:, 0]
#    out['PMT_t0_frac'] = ev['PMTtraces']['t0_frac'][:, 0]

    pmtV_bsub = pmtV - baseline[:, None]  # expand as column vectors

    # points over threshold
    overThresh = np.int32(pmtV_bsub < -threshold * baserms[:, None])

    # start and end indices of the peaks
    iPeakStart = np.nonzero(
        np.hstack(
            (overThresh[:, 0][:, None], np.diff(overThresh, n=1, axis=1))) == 1)
    iPeakEnd = np.nonzero(
        np.hstack(
            (np.diff(overThresh, n=1, axis=1), -overThresh[:, -1][:, None])) == -1)

    # Raise error if starts and ends not match
    if iPeakStart[0].shape != iPeakEnd[0].shape:
        raise ValueError("Shape mismatch!")

    # extend the peaks to the points where baseline are crossed
    # first left shift to the left boundaries
    dt = 1
    crossBase = np.zeros(iPeakStart[0].shape, dtype=bool)  # all false
    while not all(crossBase):
        # left shift the column values
        iPeakStart[1][~crossBase] -= dt
        # stop if crosses the baseline or index becomes less than 0
        iPeakStart[1][iPeakStart[1] < 0] = 0
        crossBase[np.logical_or(iPeakStart[1] == 0, pmtV_bsub[
                                iPeakStart] > 0)] = True

    # right shift
    crossBase = np.zeros(iPeakStart[0].shape, dtype=bool)  # all false
    while not all(crossBase):
        # left shift the column values
        iPeakEnd[1][~crossBase] += dt
        # stop if crosses the baseline or index becomes less than 0
        iPeakEnd[1][iPeakEnd[1] > pmtV.shape[1] - 1] = pmtV.shape[1] - 1
        crossBase[np.logical_or(iPeakEnd[1] == pmtV.shape[1] -
                                1, pmtV_bsub[iPeakEnd] > 0)] = True

    # merge overlap peaks
    nonOverlapStart = np.ones(iPeakStart[0].shape, dtype=bool)
    nonOverlapStart[1:] = ~np.logical_and(np.diff(iPeakStart[0]) == 0,
                                          np.diff(iPeakStart[1]) == 0)

    nonOverlapEnd = np.ones(iPeakEnd[0].shape, dtype=bool)
    nonOverlapEnd[1:] = ~np.logical_and(np.diff(iPeakEnd[0]) == 0,
                                        np.diff(iPeakEnd[1]) == 0)

    if any(np.logical_xor(nonOverlapStart, nonOverlapEnd)):
        raise ValueError("Numbers of peak starts and ends don't match")

    out['iTrace'] = iPeakStart[0][nonOverlapStart]
    out['baseline'] = baseline[iPeakStart[0][nonOverlapStart]]
    out['baserms'] = baserms[iPeakStart[0][nonOverlapStart]]
    out['pulse_istart'] = iPeakStart[1][nonOverlapStart]
    out['pulse_iend'] = iPeakEnd[1][nonOverlapStart]
    out['ch'] = np.zeros((np.sum(nonOverlapStart),), dtype=np.int32)
    out['ch'][satBool[iPeakStart[0][nonOverlapStart]]] = 1

    out['pulse_area'] = np.zeros((np.sum(nonOverlapStart),), dtype=np.float64)
    out['pulse_height'] = np.zeros(
        (np.sum(nonOverlapStart),), dtype=np.float64)
    out['pulse_ipeak'] = np.zeros((np.sum(nonOverlapStart),), dtype=np.int32)

    # calculate the baseline crossing points and do the integration
    # may be faster in a for loop since no need to store intermediate results
    i = 0
    for iPulse in np.nonzero(nonOverlapStart)[0]:
        # start crossing point

        x0 = np.interp(0,
                       [pmtV_bsub[(iPeakStart[0][iPulse], iPeakStart[1][iPulse])],
                        pmtV_bsub[(iPeakStart[0][iPulse], iPeakStart[1][iPulse] + 1)]],
                       [iPeakStart[1][iPulse], iPeakStart[1][iPulse] + 1])
        # end crossing point
        x1 = np.interp(0,
                       [pmtV_bsub[(iPeakEnd[0][iPulse], iPeakEnd[1][iPulse] - 1)],
                        pmtV_bsub[(iPeakEnd[0][iPulse], iPeakEnd[1][iPulse])]],
                       [iPeakEnd[1][iPulse] - 1, iPeakEnd[1][iPulse]])

        area = np.trapz(np.hstack((0,
                                   pmtV_bsub[iPeakStart[0][iPulse]][(iPeakStart[
                                       1][iPulse] + 1):iPeakEnd[1][iPulse]],
                                   0)),
                        np.hstack((x0,
                                   np.arange((iPeakStart[1][iPulse] + 1), iPeakEnd[1][iPulse]), x1)))
        iPeak = iPeakStart[1][iPulse] + np.ma.argmin(pmtV_bsub[iPeakStart[0][iPulse]][
            iPeakStart[1][iPulse]:iPeakEnd[1][iPulse]])

        height = pmtV_bsub[iPeakStart[0][iPulse]][iPeak]

        out['pulse_area'][i] = area
        out['pulse_height'][i] = height
        out['pulse_ipeak'][i] = iPeak
        i += 1

    return out
Example #36
0
def PMTPulseFinder2(ev,
                   base_samples=np.intp(80),
                   amp_gains=np.array([1, 1], dtype=np.float64),
                   threshold=np.float64(3.5)):

    # default output
    default_output = dict(
        ch=np.int32([-1]),
        iTrace=np.int32([-1]),
        nPulse=np.int32([-1]),
        iPulse=np.int32([-1]),
        nSatADC=np.int32([-1]),
        baseline=np.float64([-1]),
        baserms=np.float64([-1]),
        pulse_area=np.float64([-1]),
        pulse_height=np.float64([-1]),
        pulse_istart=np.int32([-1]),
        pulse_iend=np.int32([-1]),
        pulse_ipeak=np.int32([-1])
    )
    try:
        if not (ev['PMTtraces']['loaded'] and
                (ev['PMTtraces']['t0_frac'].shape[0] > 0)):
            return default_output

        out = default_output

        # ADC to voltage with lost points removed
        ls = ev['PMTtraces']['lost_samples'][:, 0]  # two channels are identical
        lost_samples_min = np.min(ls[ls > 0])

        pmtV = scale2(ev['PMTtraces']['v_offset'],
                      ev['PMTtraces']['v_scale'],
                      ev['PMTtraces']['traces'],
                      lost_samples_min, amp_gains)

        if base_samples > lost_samples_min:
            base_samples = np.intp(lost_samples_min * .5)

        baseline = np.mean(pmtV[:, :, :base_samples], axis=2)
        baserms = np.sqrt(np.var(pmtV[:, :, :base_samples], axis=2))

    #    out['PMT_t0_sec'] = ev['PMTtraces']['t0_sec'][:, 0]
    #    out['PMT_t0_frac'] = ev['PMTtraces']['t0_frac'][:, 0]

        pmtV = pmtV - baseline[:, :, None]  # expand as column vectors

        # points and traces over threshold
        overThreshV = pmtV < -threshold * baserms[:, :, None]
        overThreshTrace = np.any(overThreshV, axis=2)
        # if no samples in a trace is over threshold, set its first sample
        # over threshold for ease of processing
        firstADC = overThreshV[:, :, 0]
        firstADC[~overThreshTrace] = True
        overThreshV[:, :, 0] = firstADC
        # convert to integer for further manipulations
        overThreshV = overThreshV.astype(int)
        # pdb.set_trace()
        # start and end indices of the peaks
        iPeakStart = np.nonzero(
            np.concatenate(
                (overThreshV[:, :, 0][:, :, None], np.diff(overThreshV, n=1, axis=2)), axis=2) == 1)
        iPeakEnd = np.nonzero(
            np.concatenate(
                (np.diff(overThreshV, n=1, axis=2), -overThreshV[:, :, -1][:, :, None]), axis=2) == -1)
        # pdb.set_trace()
        # extend the peaks to the points where the baseline are crossed
        # first left shift to the left boundaries
        dt = 1
        crossBase = np.zeros(iPeakStart[0].shape, dtype=bool)  # all false
        while not all(crossBase):
            # left shift the column values
            iPeakStart[2][~crossBase] -= dt
            # stop if crosses the baseline or index becomes less than 0
            iPeakStart[2][iPeakStart[2] < 0] = 0
            crossBase[np.logical_or(iPeakStart[2] == 0, pmtV[
                                    iPeakStart] > 0)] = True

        # right shift
        crossBase = np.zeros(iPeakEnd[0].shape, dtype=bool)  # all false
        while not all(crossBase):
            # left shift the column values
            iPeakEnd[2][~crossBase] += dt
            # stop if crosses the baseline or index becomes less than 0
            iPeakEnd[2][iPeakEnd[2] > (pmtV.shape[2] - 1)] = pmtV.shape[2] - 1
            crossBase[np.logical_or(iPeakEnd[2] == pmtV.shape[2] - 1,
                                    pmtV[iPeakEnd] > 0)] = True

        # merge overlap peaks
        nonOverlapStart = np.ones(iPeakStart[0].shape, dtype=bool)
        nonOverlapStart[1:] = ~np.logical_and(
            np.logical_and(np.diff(iPeakStart[0]) == 0, np.diff(iPeakStart[1]) == 0),
            np.diff(iPeakStart[2]) == 0)

        nonOverlapEnd = np.ones(iPeakEnd[0].shape, dtype=bool)
        nonOverlapEnd[1:] = ~np.logical_and(
            np.logical_and(np.diff(iPeakEnd[0]) == 0, np.diff(iPeakEnd[1]) == 0),
            np.diff(iPeakEnd[2]) == 0)

        if any(np.logical_xor(nonOverlapStart, nonOverlapEnd)):
            raise ValueError("Numbers of peak starts and ends don't match")

        # Output will include all pulses and the traces not over the threshold
        nLines = iPeakStart[0][nonOverlapStart].size
        out['ch'] = np.zeros((nLines,), dtype=np.int32) - 1
        out['iTrace'] = np.zeros((nLines,), dtype=np.int32) - 1
        out['nPulse'] = np.zeros((nLines,), dtype=np.int32) - 1
        out['iPulse'] = np.zeros((nLines,), dtype=np.int32) - 1
        out['nSatADC'] = np.zeros((nLines,), dtype=np.int32) - 1
        out['baseline'] = np.zeros((nLines,), dtype=np.float64) - 1
        out['baserms'] = np.zeros((nLines,), dtype=np.float64) - 1
        out['pulse_area'] = np.zeros((nLines,), dtype=np.float64) - 1
        out['pulse_height'] = np.zeros((nLines,), dtype=np.float64) - 1
        out['pulse_istart'] = np.zeros((nLines,), dtype=np.int32) - 1
        out['pulse_iend'] = np.zeros((nLines,), dtype=np.int32) - 1
        out['pulse_ipeak'] = np.zeros((nLines,), dtype=np.int32) - 1
        # pdb.set_trace()
        iTrace0 = 0
        ch0 = 0
        nPulse = 0
        iPulse = 0
        for iLine in range(nLines):
            iTrace = iPeakStart[0][nonOverlapStart][iLine]
            ch = iPeakStart[1][nonOverlapStart][iLine]

            out['ch'][iLine] = ch
            out['iTrace'][iLine] = iTrace
            out['baseline'][iLine] = baseline[iTrace, ch]
            out['baserms'][iLine] = baserms[iTrace, ch]

            if overThreshTrace[iTrace, ch]:  # has pulses
                if (iTrace == iTrace0) and (ch == ch0):  # same trace
                    nPulse += 1
                    iPulse += 1
                else:
                    iTrace0 = iTrace
                    ch0 = ch
                    nPulse = 1
                    iPulse = 1

                iStart = iPeakStart[2][nonOverlapStart][iLine]
                iEnd = iPeakEnd[2][nonOverlapEnd][iLine]

                # start crossing point
                x0 = np.interp(0, pmtV[iTrace, ch, iStart:(
                    iStart + 2)], np.arange(iStart, iStart + 2))
                # end crossing point
                x1 = np.interp(0, pmtV[iTrace, ch, (iEnd - 1):(iEnd + 1)], np.arange(iEnd - 1, iEnd + 1))
                # pdb.set_trace()
                area = np.trapz(
                    np.hstack((0, pmtV[iTrace, ch, (iStart + 1):iEnd], 0)),
                    np.hstack((x0, np.arange(iStart + 1, iEnd), x1)))
                iPeak = iStart + np.ma.argmin(pmtV[iTrace, ch, iStart:iEnd])
                height = pmtV[iTrace, ch, iPeak]
                # pdb.set_trace()
                # Saturated ADC's
                nSatADC = np.sum(np.logical_or(ev['PMTtraces']['traces'][iTrace, ch, iStart:iEnd] < -127,
                                               (ev['PMTtraces']['traces'][iTrace, ch, iStart:iEnd] > 126)))

                out['nPulse'][(iLine - nPulse+1):(iLine+1)] = nPulse
                out['iPulse'][iLine] = iPulse
                out['nSatADC'][iLine] = nSatADC
                out['pulse_area'][iLine] = area
                out['pulse_height'][iLine] = height
                out['pulse_ipeak'][iLine] = iPeak
                out['pulse_istart'][iLine] = iStart
                out['pulse_iend'][iLine] = iEnd
            else:
                out['nPulse'][iLine] = 0
                out['iPulse'][iLine] = 0
                out['nSatADC'][iLine] = 0
                out['pulse_area'][iLine] = 0
                out['pulse_height'][iLine] = 0
                out['pulse_istart'][iLine] = 0
                out['pulse_iend'][iLine] = 0
                out['pulse_ipeak'][iLine] = 0

        return out
    except:
        return default_output
Example #37
0
def find_bpm_in_range(corrected_autocorrelation,
                      min_bpm,
                      max_bpm,
                      duration,
                      fine_adjust_recursion=3):
    length = len(corrected_autocorrelation)
    first_offset = int(60 / max_bpm / duration * length)
    first_index = length // 2 + first_offset
    last_offset = int(60 / min_bpm / duration * length)
    last_index = length // 2 + last_offset

    interesting_part = corrected_autocorrelation[first_index:last_index]
    n = 1  # uneven integer, not choosing one leads to worse results
    dd_interesting_part = np.concatenate(
        [np.zeros(n),
         np.diff(np.diff(interesting_part, n), n),
         np.zeros(n)]) / (n**2)  # dd is the second derivative

    index_beat_length = argmax(interesting_part) + first_offset
    index_beat_length_dd = argmax(-dd_interesting_part) + first_offset
    bpm = length / index_beat_length * 60 / duration
    bpm_dd = length / index_beat_length_dd * 60 / duration

    def tempos_are_similar(a, b):
        return abs(a - b) <= 1 or abs(2 * a - b) <= 1 or abs(a - 2 * b) <= 1

    if config.DEBUG_BPM_FINDER:
        x_range = range(first_offset, last_offset)
        plt.plot(x_range, interesting_part)
        plt.plot(x_range, -dd_interesting_part)
        x_range = range(last_offset)
        plt.plot(
            x_range,
            corrected_autocorrelation[length // 2:length // 2 + last_offset])
        plt.scatter(
            index_beat_length,
            interesting_part[index_beat_length - first_offset],
            label=str(bpm) + " bpm",
        )
        plt.scatter(
            index_beat_length_dd,
            -dd_interesting_part[index_beat_length_dd - first_offset],
            label=str(bpm_dd) + " bpm",
        )
        plt.legend()
        plt.show()

    if not tempos_are_similar(index_beat_length, index_beat_length_dd):
        # Compare the quality of the findings by comparing the autocorrelation for 2, 3 and 4 beats
        beats = np.array([2, 3, 4])
        scores = np.zeros(len(beats))
        scores_dd = np.zeros(len(beats))
        for i, n in enumerate(beats):
            scores[i] = corrected_autocorrelation[length // 2 +
                                                  n * index_beat_length]
            scores_dd[i] = corrected_autocorrelation[length // 2 +
                                                     n * index_beat_length_dd]

        if sum(scores) > sum(scores_dd):
            rough_bpm = bpm
        else:
            rough_bpm = bpm_dd

        if config.DEBUG_BPM_FINDER:
            print(" Non-trivial rhythm")
            print("  index:", index_beat_length, index_beat_length_dd)
            print("  bpm:", bpm, bpm_dd)

            x_range = range(last_offset * 2)
            plt.plot(
                x_range,
                corrected_autocorrelation[length // 2:length // 2 +
                                          last_offset * 2],
            )
            plt.scatter(
                index_beat_length,
                interesting_part[index_beat_length - first_offset],
                label=str(bpm) + " bpm",
            )
            plt.scatter(
                index_beat_length_dd,
                -dd_interesting_part[index_beat_length_dd - first_offset],
                label=str(bpm_dd) + " bpm",
            )
            plt.scatter(beats * index_beat_length,
                        scores,
                        label=str(bpm) + " bpm beats")
            plt.scatter(
                beats * index_beat_length_dd,
                scores_dd,
                label=str(bpm_dd) + " bpm beats",
            )
            plt.legend()
            plt.show()
    else:
        rough_bpm = bpm
        if config.DEBUG_BPM_FINDER:
            print(" Tempos match: ", bpm, bpm_dd)

    if fine_adjust_recursion and (60 / rough_bpm < duration / 5):
        variation = 0.08
        if config.DEBUG_BPM_FINDER:
            print(
                "",
                fine_adjust_recursion,
                "bpm-Range",
                rough_bpm * 0.5 * (1 - variation),
                rough_bpm * 0.5 * (1 + variation),
            )
        return 2 * find_bpm_in_range(
            corrected_autocorrelation,
            rough_bpm * 0.5 * 0.95,
            rough_bpm * 0.5 * 1.05,
            duration,
            fine_adjust_recursion - 1,
        )
    else:
        return rough_bpm
Example #38
0
def _calc_undistor_intercept_perspective(list_hor_lines, list_ver_lines,
                                         equal_dist=True, scale="mean",
                                         optimizing=True):
    """
    Calculate the intercepts of undistorted lines from perspective distortion.

    Parameters
    ----------
    list_hor_lines : list of 2D-arrays
        List of the (y,x)-coordinates of points on each horizontal line.
    list_ver_lines : list of 2D-arrays
        List of the (y,x)-coordinates of points on each vertical line.
    equal_dist : bool
        Use the condition that lines are equidistant if True.
    scale : {'mean', 'median', 'min', 'max'}
        Scale option for the undistorted grid.
    optimizing : bool
        Apply optimization for finding line-distance if True.

    Returns
    -------
    u_intercept_hor : array_like
        1D array. List of undistorted intercepts of the horizontal lines.
    u_intercept_ver : array_like
        1D array. List of undistorted intercepts of the vertical lines.
    """
    list_coef_hor, list_coef_ver = _generate_linear_coef(list_hor_lines,
                                                         list_ver_lines)
    num_hline, num_vline = len(list_hor_lines), len(list_ver_lines)
    pos_hor, pos_ver = num_hline // 2, num_vline // 2
    num_use = min(num_hline // 2 - 1, num_vline // 2 - 1)
    (posh1, posh2) = (max(0, pos_hor - num_use),
                      min(num_hline, pos_hor + num_use + 1))
    (posv1, posv2) = (max(0, pos_ver - num_use),
                      min(num_vline, pos_ver + num_use + 1))
    if scale == "max":
        dist_hor = np.max(np.abs(np.diff(list_coef_hor[posh1: posh2, 1])))
        dist_ver = np.max(np.abs(np.diff(list_coef_ver[posv1: posv2, 1])))
    elif scale == "min":
        dist_hor = np.min(np.abs(np.diff(list_coef_hor[posh1: posh2, 1])))
        dist_ver = np.min(np.abs(np.diff(list_coef_ver[posv1: posv2, 1])))
    elif scale == "median":
        dist_hor = np.median(np.abs(np.diff(list_coef_hor[posh1: posh2, 1])))
        dist_ver = np.median(np.abs(np.diff(list_coef_ver[posv1: posv2, 1])))
    else:
        dist_hor = np.mean(np.abs(np.diff(list_coef_hor[posh1: posh2, 1])))
        dist_ver = np.mean(np.abs(np.diff(list_coef_ver[posv1: posv2, 1])))
    if optimizing is True:
        dist_hor = _optimize_intercept_perspective(dist_hor, pos_hor,
                                                   list_coef_hor[:, 1])
        dist_ver = _optimize_intercept_perspective(dist_ver, pos_ver,
                                                   list_coef_ver[:, 1])
    if equal_dist is True:
        if scale == "max":
            dist = max(dist_hor, dist_ver)
        elif scale == "min":
            dist = min(dist_hor, dist_ver)
        else:
            dist = (dist_hor + dist_ver) * 0.5
        dist_hor = dist_ver = dist
    u_intercept_hor = np.zeros(num_hline, dtype=np.float32)
    u_intercept_ver = np.zeros(num_vline, dtype=np.float32)
    for i in range(num_hline):
        dist = (i - pos_hor) * dist_hor
        u_intercept_hor[i] = dist + list_coef_hor[pos_hor, 1]
    for i in range(num_vline):
        dist = (i - pos_ver) * dist_ver
        u_intercept_ver[i] = dist + list_coef_ver[pos_ver, 1]
    return u_intercept_hor, u_intercept_ver
def plot_distribution(data, fun_name, label_name, n_bins, run, 
                      discrete = False, min_bin_width = 0, 
                      fig_swept = None, run_label = 'PDF', color = u'b',
                      dataXLim = None, dataYLim = None):

    mean_data = np.mean(data)
    std_data = np.std(data)

    # Plot raw data
    fig0 = plt.figure(figsize=(6,5))

    if discrete:
        # discrete bin numbers
        d = max(min(np.diff(np.unique(np.asarray(data)))), min_bin_width)
        left_of_first_bin = min(data) - float(d)/2
        right_of_last_bin = max(data) + float(d)/2
        bins = np.arange(left_of_first_bin, right_of_last_bin + d, d)

        plt.hist(data, bins, alpha=0.5, density=True)
    else:
        plt.hist(data, bins = n_bins, alpha=0.5, density=True)

    ax = fig0.gca()

    # Update plots
    ax.set_ylim(ax.get_ylim())
    ax.set_xlabel(label_name)
    ax.set_ylabel('Frequency')

    # Plot for comparison
    fig1 = plt.figure(figsize=(6,5))

    if discrete:
        # discrete bin numbers
        plt.hist(data, bins, alpha=0.5, density=True)
    else:
        plt.hist(data, bins = n_bins, alpha=0.5, density=True)
    
    ax = fig1.gca()

    # Update plots
    ax.set_ylim(ax.get_ylim())
    ax.set_xlabel(label_name)
    ax.set_ylabel('Frequency')

    # Display
    if fig_swept is None:
        fig2 = plt.figure(figsize=(6,5))
    else:
        fig2 = fig_swept
    
    ax2 = fig2.gca()

    if discrete:
        data_bins = bins
    else:
        data_bins = n_bins

    best_fit_name, best_fit_params, best_10_fits = best_fit_distribution(data, data_bins, ax)

    best_dist = getattr(st, best_fit_name)
    print('Best fit: %s' %(best_fit_name.upper()) )
    # Make PDF with best params 
    pdf = make_pdf(best_dist, best_fit_params)
    pdf.plot(lw=2, color = color, label=run_label, legend=True, ax=ax2)

    param_names = (best_dist.shapes + ', loc, scale').split(', ') if best_dist.shapes else ['loc', 'scale']
    param_str = ', '.join(['{}={:0.2f}'.format(k,v) for k,v in zip(param_names, best_fit_params)])
    dist_str = '{}({})'.format(best_fit_name, param_str)

    if discrete:
        # discrete bin numbers
        ax2.hist(data, bins, color = color, alpha=0.5, label = 'data', density=True)
    else:
        ax2.hist(data, bins = n_bins, color = color, alpha=0.5, label = 'data', density=True)
    
    # Save plot limits
    if dataYLim is None and dataXLim is None:
        dataYLim = ax2.get_ylim()
        dataXLim = ax2.get_xlim()
    else:
        # Update plots
        ax2.set_xlim(dataXLim)
        ax2.set_ylim(dataYLim)

    ax2.tick_params(axis='both', which='major', labelsize=14) 
    ax2.set_xlabel(label_name, fontsize=14)
    ax2.set_ylabel('Probability density', fontsize=14)

    fig0.savefig('data/RAW_%s_r%i.pdf' %(fun_name,run), 
        format='pdf', dpi=100,bbox_inches='tight')
    
    if fig_swept is None:
        fig2.savefig('data/PDF_%s_r%i.pdf' %(fun_name,run), 
                format='pdf', dpi=100,bbox_inches='tight')

    if fig_swept is None:    
        plt.close('all')
    else:
        plt.close(fig0)
        plt.close(fig1)
    
    return dataXLim, dataYLim, mean_data, std_data
def set_piano_roll_to_instrument(piano_roll,
                                 instrument,
                                 velocity=100,
                                 tempo=120.0,
                                 beat_resolution=16):
    # Calculate time per pixel
    tpp = 60.0 / tempo / float(beat_resolution)
    threshold = 60.0 / tempo / 4
    phrase_end_time = 60.0 / tempo * 4 * piano_roll.shape[0]
    # Create piano_roll_search that captures note onsets and offsets
    piano_roll = piano_roll.reshape(
        (piano_roll.shape[0] * piano_roll.shape[1], piano_roll.shape[2]))
    piano_roll_diff = np.concatenate((np.zeros(
        (1, 128), dtype=int), piano_roll, np.zeros((1, 128), dtype=int)))
    piano_roll_search = np.diff(piano_roll_diff.astype(int), axis=0)
    # Iterate through all possible(128) pitches

    for note_num in range(128):
        # Search for notes
        start_idx = (piano_roll_search[:, note_num] > 0).nonzero()
        start_time = list(tpp * (start_idx[0].astype(float)))
        # print('start_time:', start_time)
        # print(len(start_time))
        end_idx = (piano_roll_search[:, note_num] < 0).nonzero()
        end_time = list(tpp * (end_idx[0].astype(float)))
        # print('end_time:', end_time)
        # print(len(end_time))
        duration = [pair[1] - pair[0] for pair in zip(start_time, end_time)]
        # print('duration each note:', duration)
        # print(len(duration))

        temp_start_time = [i for i in start_time]
        temp_end_time = [i for i in end_time]

        for i in range(len(start_time)):
            # print(start_time)
            if start_time[i] in temp_start_time and i != len(start_time) - 1:
                # print('i and start_time:', i, start_time[i])
                t = []
                current_idx = temp_start_time.index(start_time[i])
                for j in range(current_idx + 1, len(temp_start_time)):
                    # print(j, temp_start_time[j])
                    if temp_start_time[
                            j] < start_time[i] + threshold and temp_end_time[
                                j] <= start_time[i] + threshold:
                        # print('popped start time:', temp_start_time[j])
                        t.append(j)
                        # print('popped temp_start_time:', t)
                for _ in t:
                    temp_start_time.pop(t[0])
                    temp_end_time.pop(t[0])
                # print('popped temp_start_time:', temp_start_time)

        start_time = temp_start_time
        # print('After checking, start_time:', start_time)
        # print(len(start_time))
        end_time = temp_end_time
        # print('After checking, end_time:', end_time)
        # print(len(end_time))
        duration = [pair[1] - pair[0] for pair in zip(start_time, end_time)]
        # print('After checking, duration each note:', duration)
        # print(len(duration))

        if len(end_time) < len(start_time):
            d = len(start_time) - len(end_time)
            start_time = start_time[:-d]
        # Iterate through all the searched notes
        for idx in range(len(start_time)):
            if duration[idx] >= threshold:
                # Create an Note object with corresponding note number, start time and end time
                note = pretty_midi.Note(velocity=velocity,
                                        pitch=note_num,
                                        start=start_time[idx],
                                        end=end_time[idx])
                # Add the note to the Instrument object
                instrument.notes.append(note)
            else:
                if start_time[idx] + threshold <= phrase_end_time:
                    # Create an Note object with corresponding note number, start time and end time
                    note = pretty_midi.Note(velocity=velocity,
                                            pitch=note_num,
                                            start=start_time[idx],
                                            end=start_time[idx] + threshold)
                else:
                    # Create an Note object with corresponding note number, start time and end time
                    note = pretty_midi.Note(velocity=velocity,
                                            pitch=note_num,
                                            start=start_time[idx],
                                            end=phrase_end_time)
                # Add the note to the Instrument object
                instrument.notes.append(note)
    # Sort the notes by their start time
    instrument.notes.sort(key=lambda note: note.start)
# lambdas?
print('simulated probabilities:', p_e:= np.mean(pass_on, axis=0), 'expected:',
        probabilities, '\ndiff:', p_e - probabilities, end='\n\n')
print('simultated lambdas:', l_e := np.mean(gaps, axis=0), 'expected:',
        lambdas, '\ndiff:', l_e - lambdas, end='\n\n')

# concatenate arrays and remove unwanted
supervisor = np.sort(np.concatenate(times)[np.concatenate(pass_on)])

# remove any past the last entry of the shortest simulation (to ensure all
# streams run for the same amount of time)
supervisor = supervisor[supervisor < np.min(times[-1,:])]

# print the final estimate
print('the mean time between customers for the supervisor was',
        res := np.diff(supervisor).mean(), '\nThis is accurate to'
        f' {np.abs(res - 1155/2648)/1155*2648 * 100:.2f}%.')

# Bonus: save data for visualization
import json

data_size = 500  # number of samples to save
maxtime = supervisor[data_size]

save_path = (Path(__file__).parent / 'data' /
        'simple_probability_question_dat.json')

data_export = [
    {
        'probability': p,
        'lambda': l,
Example #42
0
    def resample(self, samplingTime=0.1, masterChannel=None):
        """ Resamples all data groups into one data group having defined
        sampling interval or sharing same master channel

        Parameters
        ----------------
        samplingTime : float
            resampling interval
        **or**
        masterChannel : str
            master channel name used for all channels

        Notes
        --------
        1. resampling is relatively safe for mdf3 as it contains only time series.
        However, mdf4 can contain also distance, angle, etc. It might make not sense
        to apply one resampling to several data groups that do not share same kind
        of master channel (like time resampling to distance or angle data groups)
        If several kind of data groups are used, you should better use pandas to resample

        2. resampling will convert all your channels so be careful for big files
        and memory consumption
        """
        # must make sure all channels are converted
        self.convertAllChannel()
        # resample all channels to one sampling time vector
        if len(list(self.masterChannelList.keys())) > 1:  # Not yet resampled
            channelNames = list(self.keys())
            minTime = maxTime = []
            if masterChannel is None:  # create master channel if not proposed
                masterChannelName = 'master'
                self[masterChannelName] = {}
                unit = ''
                masterType = 1  # time by default

                for master in list(self.masterChannelList.keys()):
                    masterData = self.getChannelData(master)
                    if master in self and len(masterData) > 5:  # consider groups having minimum size
                        minTime.append(masterData[0])
                        maxTime.append(masterData[len(masterData) - 1])
                        if len(self.getChannelUnit(master)) > 1:
                            unit = self.getChannelUnit(master)
                            masterType = self[master]['masterType']
                self[masterChannelName]['data'] = arange(min(minTime), max(maxTime), samplingTime)
                self[masterChannelName]['unit'] = unit
                self[masterChannelName]['description'] = 'Unique master channel'
                self[masterChannelName]['masterType'] = masterType
            else:
                masterChannelName = masterChannel

            # Interpolate channels
            timevect = []
            for Name in channelNames:
                try:
                    if Name not in list(self.masterChannelList.keys()):  # not a master channel
                        timevect = self.getChannelData(self[Name]['master'])
                        if not self.getChannelData(Name).dtype.kind in ('S', 'U'):  # if channel not array of string
                            self[Name]['data'] = interp(self.getChannelData(masterChannelName), timevect, self.getChannelData(Name))
                            if masterChannelName in self[Name]:
                                del self[Name][masterChannelName]
                        else:  # can not interpolate strings, remove channel containing string
                            self.masterChannelList[self[Name]['master']].remove(Name)
                            self.pop(Name)
                except:
                    if len(timevect) != len(self.getChannelData(Name)):
                        print((Name + ' and master channel ' + self[Name][masterChannelName] + ' do not have same length'))
                    elif not all(diff(timevect) > 0):
                        print((Name + ' has non regularly increasing master channel ' + self[Name][masterChannelName]))
            # remove time channels in masterChannelList
            for ind in list(self.masterChannelList.keys()):
                del self[ind]
            self.masterChannelList = {}  # empty dict
            self.masterChannelList[masterChannelName] = list(self.keys())
        else:
            print('Already resampled')
Example #43
0
    def estimate(self, ecg_sig, time_vector):
        """ Detection of R-peaks

        Args:
            time_vector (np.array): One-dimensional time vector
            ecg_sig (np.array): One-dimensional ECG signal

        Returns:
            List of detected peaks indices
        """
        assert len(ecg_sig.shape) == 1, "Signal must be a vector"

        # Preprocessing
        ecg_filtered = self.bp_filter.apply(ecg_sig).squeeze()
        ecg_sig = np.concatenate((self.prev_samples, ecg_sig))
        sig_diff = np.diff(ecg_filtered, 1)
        sig_abs_diff = np.abs(sig_diff)
        sig_smoothed = signal.convolve(np.concatenate((self.prev_diff_samples, sig_abs_diff)),
                                       self.hamming_window, mode='same', method='auto')[:len(ecg_filtered)]
        time_vector = np.concatenate((self.prev_times, time_vector))
        self.prev_samples = ecg_sig[-len(self.hamming_window):]
        self.prev_diff_samples = sig_abs_diff[-len(self.hamming_window):]
        self.prev_times = time_vector[-len(self.hamming_window):]
        peaks_idx_list, _ = signal.find_peaks(sig_smoothed)
        peaks_val_list = sig_smoothed[peaks_idx_list]
        peaks_time_list = time_vector[peaks_idx_list]
        detected_peaks_idx = []
        detected_peaks_time = []
        detected_peaks_val = []

        # Decision rules by Hamilton 2002 [1]
        for peak_idx, peak_val, peak_time in zip(peaks_idx_list, peaks_val_list, peaks_time_list):
            # 1- Ignore all peaks that precede or follow larger peaks by less than 200 ms.
            peaks_in_lim = [a and b and c for a, b, c in
                            zip(((peak_idx - self.ns200ms) < peaks_idx_list),
                                ((peak_idx + self.ns200ms) > peaks_idx_list),
                                (peak_idx != peaks_idx_list)
                                )
                            ]

            if True in (peak_val < peaks_val_list[peaks_in_lim]):
                continue

            # 2- If a peak occurs, check to see whether the ECG signal contained both positive and negative slopes.
            # TODO: Find a better way of checking this.
            # if peak_idx == 0:
            #     continue
            # elif peak_idx < 10:
            #     n_sample = peak_idx
            # else:
            #     n_sample = 10
            # The current n_sample leads to missing some R-peaks as it may have wider/thinner width.
            # slopes = np.diff(ecg_sig[peak_idx-n_sample:peak_idx])
            # if slopes[0] * slopes[-1] >= 0:
            #     continue

            # check missing peak
            self.check_missing_peak(peak_time, peak_idx, detected_peaks_idx, ecg_sig, time_vector)

            # 3- If the peak occurred within 360 ms of a previous detection and had a maximum slope less than half the
            # maximum slope of the previous detection assume it is a T-wave
            if (peak_time - self.r_peaks_buffer[-1][1]) < .36:
                if peak_idx < 15:
                    st_idx = 0
                else:
                    st_idx = peak_idx - 15
                if (peak_idx + 15) > (len(ecg_sig) - 1):
                    end_idx = len(ecg_sig) - 1
                else:
                    end_idx = peak_idx + 15

                curr_max_slope = np.abs(np.diff(ecg_sig[st_idx:end_idx])).max()
                if curr_max_slope < (.5 * self.prev_max_slope):
                    continue

            # 4- If the peak is larger than the detection threshold call it a QRS complex, otherwise call it noise.
            if peak_idx < 25:
                st_idx = 0
            else:
                st_idx = peak_idx - 25
            pval = peak_val  # ecg_sig[st_idx:peak_idx].max()

            if pval > self.decision_threshold:
                temp_idx = st_idx + np.argmax(ecg_sig[st_idx:peak_idx + 1])
                temp_time = time_vector[temp_idx]

                detected_peaks_idx.append(temp_idx)
                detected_peaks_val.append(ecg_sig[st_idx:peak_idx + 1].max())
                detected_peaks_time.append(temp_time)
                self._push_r_peak(pval, temp_time)

                if peak_idx < 25:
                    st_idx = 0
                else:
                    st_idx = peak_idx - 25
                self.prev_max_slope = np.abs(np.diff(ecg_sig[st_idx:peak_idx + 25])).max()
            else:
                self._push_noise_peak(pval, peak_idx, peak_time)

            # TODO: Check lead inversion!

        # Check for two close peaks
        occurrence_time = [item[1] for item in self.r_peaks_buffer]
        close_idx = (np.diff(np.array(occurrence_time), 1) < .05)
        if (True in close_idx) and len(detected_peaks_idx) > 0:
            del detected_peaks_time[0]
            del detected_peaks_val[0]

        return detected_peaks_time, detected_peaks_val
Example #44
0
FOLDER = 'Results_PIV' + os.sep + 'Open_PIV_results_Test_1'
n_t = 10  # number of steps.

# Read file number 10 (Check the string construction)
Name = FOLDER + os.sep + 'field_A%03d' % 1 + '.txt'  # Check it out: print(Name)
# Read data from a file
DATA = np.genfromtxt(Name)  # Here we have the four colums
nxny = DATA.shape[
    0]  # is the to be doubled at the end we will have n_s=2 * n_x * n_y
n_s = 2 * nxny
## 1. Reconstruct Mesh from file
X_S = DATA[:, 0]
Y_S = DATA[:, 1]
# Number of n_X/n_Y from forward differences
GRAD_Y = np.diff(Y_S)
# Depending on the reshaping performed, one of the two will start with
# non-zero gradient. The other will have zero gradient only on the change.
IND_X = np.where(GRAD_Y != 0)
DAT = IND_X[0]
n_y = DAT[0] + 1
# Reshaping the grid from the data
n_x = (nxny // (n_y))  # Carefull with integer and float!
Xg = (X_S.reshape((n_x, n_y)))
Yg = (Y_S.reshape((n_x, n_y)))  # This is now the mesh! 60x114.
# Reshape also the velocity components
V_X = DATA[:, 2]  # U component
V_Y = DATA[:, 3]  # V component
# Put both components as fields in the grid
Mod = np.sqrt(V_X**2 + V_Y**2)
Vxg = (V_X.reshape((n_x, n_y)))
Example #45
0
replic = 3
steps = 6
for i in range(mcount):
    locals()['Method' + str(i+1)] = 0 * np.ones([replic, steps])
    locals()['Err' + str(i+1)] = 0 * np.ones([replic, steps])
    locals()['sErr' + str(i+1)] = 0 * np.ones([replic, steps])

for g in range(replic):
    y = np.array(MM.iloc[g][:])
    y = np.array([x for x in y if not math.isnan(x)])[:-steps]
    for j in range(mcount):
        locals()['Method' + str(j+1)][g][:] = locals()['Forecast' + str(mlist[j])](y, steps)

    for i in range(mcount):
        locals()['Err'+str(i+1)][g][:] = np.array(MM.iloc[g][-steps:])-locals()['Method'+str(i+1)][g][:]
        locals()['sErr'+str(i+1)][g][:] = locals()['Err'+str(i+1)][g][:] / np.mean(np.abs(np.diff(y)))

ResultsMAPE = 0*np.ones([steps, mcount*2])
for s in range(steps):
    sMAPE = 0*np.ones([replic, mcount])
    for i in range(replic):
        temp = np.array(MM.iloc[i][-steps:])
        for j in range(mcount):
            sMAPE[i][j] = np.mean(200 * abs(locals()['Err'+str(j+1)][i][0:s + 1]) / (abs(locals()['Method'+str(j+1)][i][0:s + 1]) + abs(temp[0:s + 1])))

    for t in range(mcount):
        ResultsMAPE[s][t] = np.mean(sMAPE[:, t])
        ResultsMAPE[s][t+mcount] = np.median(sMAPE[:, t])

ResultsMASE = 0*np.ones([steps, mcount*2])
Example #46
0
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
    """
    Compute the multidimensional histogram of some data.

    Parameters
    ----------
    sample : array_like
        The data to be histogrammed. It must be an (N,D) array or data
        that can be converted to such. The rows of the resulting array
        are the coordinates of points in a D dimensional polytope.
    bins : sequence or int, optional
        The bin specification:

        * A sequence of arrays describing the bin edges along each dimension.
        * The number of bins for each dimension (nx, ny, ... =bins)
        * The number of bins for all dimensions (nx=ny=...=bins).

    range : sequence, optional
        A sequence of lower and upper bin edges to be used if the edges are
        not given explicitly in `bins`. Defaults to the minimum and maximum
        values along each dimension.
    normed : bool, optional
        If False, returns the number of samples in each bin. If True,
        returns the bin density ``bin_count / sample_count / bin_volume``.
    weights : array_like (N,), optional
        An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
        Weights are normalized to 1 if normed is True. If normed is False,
        the values of the returned histogram are equal to the sum of the
        weights belonging to the samples falling into each bin.
        Weights can also be a list of (weight arrays or None), in which case
        a list of histograms is returned as H.

    Returns
    -------
    H : ndarray
        The multidimensional histogram of sample x. See normed and weights
        for the different possible semantics.
    edges : list
        A list of D arrays describing the bin edges for each dimension.

    See Also
    --------
    histogram: 1-D histogram
    histogram2d: 2-D histogram

    Examples
    --------
    >>> r = np.random.randn(100,3)
    >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
    >>> H.shape, edges[0].size, edges[1].size, edges[2].size
    ((5, 8, 4), 6, 9, 5)

    """

    try:
        # Sample is an ND-array.
        N, D = sample.shape
    except (AttributeError, ValueError):
        # Sample is a sequence of 1D arrays.
        sample = atleast_2d(sample).T
        N, D = sample.shape
    
    if weights is None:
        W = None
    else:    
        try:
            # Weights is a 1D-array
            weights.shape
            W = -1
        except (AttributeError, ValueError):
            # Weights is a list of 1D-arrays or None's
            W = len(weights)

    if W == -1 and weights.ndim != 1:
        raise AttributeError('Weights must be a 1D-array, None, or a list of both')

    nbin = empty(D, int)
    edges = D*[None]
    dedges = D*[None]
    if weights is not None:
        if W == -1:
            weights = asarray(weights)
            assert weights.shape == (N,)
        else:
            for i in arange(W):
                if weights[i] is not None:
                    weights[i] = asarray(weights[i])
                    assert weights[i].shape == (N,)

    try:
        M = len(bins)
        if M != D:
            raise AttributeError(
                'The dimension of bins must be equal to the dimension of the '
                ' sample x.')
    except TypeError:
        # bins is an integer
        bins = D*[bins]

    # Select range for each dimension
    # Used only if number of bins is given.
    if range is None:
        # Handle empty input. Range can't be determined in that case, use 0-1.
        if N == 0:
            smin = zeros(D)
            smax = ones(D)
        else:
            smin = atleast_1d(array(sample.min(0), float))
            smax = atleast_1d(array(sample.max(0), float))
    else:
        smin = zeros(D)
        smax = zeros(D)
        for i in arange(D):
            smin[i], smax[i] = range[i]

    # Make sure the bins have a finite width.
    for i in arange(len(smin)):
        if smin[i] == smax[i]:
            smin[i] = smin[i] - .5
            smax[i] = smax[i] + .5

    # Create edge arrays
    for i in arange(D):
        if isscalar(bins[i]):
            if bins[i] < 1:
                raise ValueError(
                    "Element at index %s in `bins` should be a positive "
                    "integer." % i)
            nbin[i] = bins[i] + 2  # +2 for outlier bins
            edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
        else:
            edges[i] = asarray(bins[i], float)
            nbin[i] = len(edges[i]) + 1  # +1 for outlier bins
        dedges[i] = diff(edges[i])
        if np.any(np.asarray(dedges[i]) <= 0):
            raise ValueError(
                "Found bin edge of size <= 0. Did you specify `bins` with"
                "non-monotonic sequence?")

    nbin = asarray(nbin)

    # Handle empty input.
    if N == 0:
        if W > 0:
            return [np.zeros(nbin-2) for _ in arange(W)], edges
        else:
            return np.zeros(nbin-2), edges

    # Compute the bin number each sample falls into.
    Ncount = {}
    for i in arange(D):
        # searchsorted is faster for many bins
        Ncount[i] = searchsorted(edges[i], sample[:, i], "right")
        #Ncount[i] = digitize(sample[:, i], edges[i])

    # Using digitize, values that fall on an edge are put in the right bin.
    # For the rightmost bin, we want values equal to the right
    # edge to be counted in the last bin, and not as an outlier.
    for i in arange(D):
        # Rounding precision
        mindiff = dedges[i].min()
        if not np.isinf(mindiff):
            decimal = int(-log10(mindiff)) + 6
            # Find which points are on the rightmost edge.
            not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
            on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal))
            # Shift these points one bin to the left.
            Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1

    # Compute the sample indices in the flattened histogram matrix.
    ni = nbin.argsort()
    xy = zeros(N, int)
    for i in arange(0, D-1):
        xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
    xy += Ncount[ni[-1]]

    # Compute the number of repetitions in xy and assign it to the
    # flattened histmat.
    if len(xy) == 0:
        if W > 0:
            return [np.zeros(nbin-2) for _ in arange(W)], edges
        else:
            return zeros(nbin-2, int), edges

    # Flattened histogram matrix (1D)
    # Reshape is used so that overlarge arrays
    # will raise an error.
    Wd = W if W > 0 else 1
    hists = [zeros(nbin, float).reshape(-1) for _ in arange(Wd)]
    for histidx, hist in enumerate(hists):
        weights_ = weights[histidx] if W > 0 else weights
        flatcount = bincount(xy, weights_)
        a = arange(len(flatcount))
        hist[a] = flatcount
    
        # Shape into a proper matrix
        hist = hist.reshape(sort(nbin))
        ni = nbin.argsort()
        for i in arange(nbin.size):
            j = ni.argsort()[i]
            hist = hist.swapaxes(i, j)
            ni[i], ni[j] = ni[j], ni[i]
    
        # Remove outliers (indices 0 and -1 for each dimension).
        core = D*[slice(1, -1)]
        hist = hist[core]
    
        # Normalize if normed is True
        if normed:
            s = hist.sum()
            for i in arange(D):
                shape = ones(D, int)
                shape[i] = nbin[i] - 2
                hist = hist / dedges[i].reshape(shape)
            hist /= s
    
        if (hist.shape != nbin - 2).any():
            raise RuntimeError(
                "Internal Shape Error: hist.shape != nbin-2 -> " + str(hist.shape) + " != " + str(nbin-2))
        
        hists[histidx] = hist
    
    if W in [None, -1]:
        return hists[0], edges
    else:
        return hists, edges
Example #47
0
def assert_strictly_decreasing(array):
    np.testing.assert_array_less(np.diff(array), 0.)
Example #48
0
 def average_rr_interval(self):
     if len(self.r_peaks_buffer) < 7:
         return 1.
     return np.mean(np.diff([item[1] for item in self.r_peaks_buffer]))
Example #49
0
def normalize_time(dataseries, timeseries):

	"""
	desc: |
		*New in v0.7.0*

		Creates a new series in which a series of timestamps (`timeseries`) is
		used as the indices for a series of data point (`dataseries`). This is
		useful, for example, if you have a series of measurements and a
		separate series of timestamps, and you want to combine the two.

		The resulting series will generally contain a lot of `nan` values, which
		you can interpolate with `interpolate()`.

		__Example:__

		%--
		python: |
		 from matplotlib import pyplot as plt
		 from datamatrix import DataMatrix, SeriesColumn, series as srs, NAN

		 # Create a DataMatrix with one series column that contains samples
		 # and one series column that contains timestamps.
		 dm = DataMatrix(length=2)
		 dm.samples = SeriesColumn(depth=3)
		 dm.time = SeriesColumn(depth=3)
		 dm.samples[0] = 3, 1, 2
		 dm.time[0]    = 1, 2, 3
		 dm.samples[1] = 1, 3, 2
		 dm.time[1]    = 0, 5, 10
		 # Create a normalized column with samples spread out according to
		 # the timestamps, and also create an interpolate version of this
		 # column for smooth plotting.
		 dm.normalized = srs.normalize_time(
		 	dataseries=dm.samples,
		 	timeseries=dm.time
		 )
		 dm.interpolated = srs.interpolate(dm.normalized)
		 # And plot!
		 plt.clf()
		 plt.plot(dm.normalized.plottable, 'o')
		 plt.plot(dm.interpolated.plottable, ':')
		 plt.xlabel('Time')
		 plt.ylabel('Data')
		 plt.savefig('content/pages/img/series/normalize_time.png')
		--%

		%--
		figure:
		 source: normalize_time.png
		 id: FigNormalizeTime
		--%

	arguments:
		dataseries:
			desc:	A column with datapoints.
			type:	SeriesColumn
		timeseries:
			desc:	A column with timestamps. This should be an increasing list
					of the same depth as `dataseries`. NAN values are allowed,
					but only at the end.
			type:	SeriesColumn

	returns:
		desc:	A new series in which the data points are spread according to
				the timestamps.
		type:	SeriesColumn
	"""

	if (
		not isinstance(dataseries, _SeriesColumn)
		or not isinstance(timeseries, _SeriesColumn)
	):
		raise TypeError(
			'dataseries and timeseries should be SeriesColumn objects'
		)
	if dataseries.dm is not timeseries.dm:
		raise ValueError(
			'dataseries and timeseries should belong to the same DataMatrix'
		)
	if dataseries.depth != timeseries.depth:
		raise ValueError(
			'dataseries and timeseries should have the same depth'
		)
	if max(timeseries.max) < 0 or min(timeseries.min) < 0:
		raise ValueError('timeseries should contain only positive values')
	series = _SeriesColumn(dataseries.dm, depth=int(max(timeseries.max))+1)
	haystack = np.arange(series.depth, dtype=int)
	for row in range(series._seq.shape[0]):
		needle = timeseries._seq[row]
		values = dataseries._seq[row]
		while len(needle) and np.isnan(needle)[-1]:
			needle = needle[:-1]
			values = values[:-1]
		if np.any(np.isnan(needle)):
			raise ValueError(
				'timeseries should not contain NAN values, except at the end'
			)
		if not np.all(np.diff(needle) > 0):
			raise ValueError(
				'timeseries should contain increasing values '
				'(i.e. time should go forward)'
			)
		indices = np.searchsorted(haystack, needle)
		series._seq[row,indices] = values
	return series
Example #50
0

def mode(x, n):
    freq = np.fft.rfft(x)
    freq[:n] = 0
    freq[n+1:] = 0
    return np.fft.irfft(freq)


dns = np.linspace(0, 365, 5000, endpoint=False)
eots = [eot(dn) for dn in dns]

m1 = mode(eots, 1)
m2 = mode(eots, 2)

d1 = np.diff(np.hstack((m1, [m1[0]])))
d2 = np.diff(np.hstack((m2, [m2[0]])))

d1 *= 7.9 / max(np.abs(d1))
d2 *= 20.3 / max(np.abs(d2))

print('Periapsis:', dns[np.argmin(d1)])
print('Apoapsis:', dns[np.argmax(d1)])
print('Vernal equinox:', dns[np.argmax(d2[:2500])])
print('Autumnal equinox:', dns[2500 + np.argmax(d2[2500:])])
print('Summer solstice:', dns[np.argmin(d2[:2500])])
print('Winter solstice:', dns[2500 + np.argmin(d2[2500:])])

data = np.vstack((dns, -d1, -d2, -d1 - d2, [-e for e in eots])).T
df = pd.DataFrame(data=data, columns=['Day number', 'Length due to ellipticity',
                                      'Length due to obliquity of ecliptic',
Example #51
0
def DiffDeviation(seq, base_value):
    diffs = np.diff(seq, n=1)
    diff_deviations = base_value - diffs
    return diff_deviations
Example #52
0
def assert_strictly_increasing(array):
    np.testing.assert_array_less(0., np.diff(array))
Example #53
0
def KRateTrend(seq, base_value):
    diffs = np.diff(seq, n=1)
    k_rate_trend = diffs / base_value
    return k_rate_trend
Example #54
0
    def write(self, outfile):
        ends = {}
        mnemonics = self.mnemonics
        labels_sorted = self.labels_sorted
        label_dict = self.label_dict
        colors = self.colors

        self.write_trackline(outfile)

        for chrom, chromosome in sorted(viewitems(self)):
            for run in chromosome:
                segments = array(run)

                start = segments[0, OFFSET_START]
                end = segments[-1, OFFSET_END]
                ends[chrom] = end

                for label in labels_sorted:
                    label_key = label_dict[label]
                    color = colors.get(label, COLOR_DEFAULT)

                    # find all the rows for this label
                    segments_label_rows = segments[:, OFFSET_LABEL] == label_key

                    # extract just the starts and ends
                    segments_label = segments[segments_label_rows,
                                              OFFSET_START:OFFSET_END+1]

                    # pad end if necessary
                    segments_label_list = [segments_label]
                    if not len(segments_label) or segments_label[-1, OFFSET_END] != end:
                        # must be end-1 to end or UCSC gets cranky.
                        # unfortunately this results in all on at the
                        # right edge of each region
                        segments_label_list.append((end-1, end))

                    # pad beginning if necessary
                    if not len(segments_label) or segments_label[0, OFFSET_START] != start:
                        segments_label_list.insert(0, (start, start+1))

                    segments_label = vstack(segments_label_list)

                    # reverse offset by start
                    segments_label -= start

                    block_count = str(len(segments_label))

                    block_sizes = diff(segments_label).ravel()
                    block_sizes_str = make_csv(block_sizes)

                    block_starts = segments_label[:, 0]
                    block_starts_str = make_csv(block_starts)

                    # this just passes through the label itself if there
                    # are no mnemonics
                    mnemonic = mnemonics[str(label)]

                    row = [chrom, str(start), str(end), mnemonic, BED_SCORE,
                           BED_STRAND, str(start), str(end), color, block_count,
                           block_sizes_str, block_starts_str]

                    print(*row, sep="\t", file=outfile)

        return ends
    def _insert_many(self, i, j, x):
        """Inserts new nonzero at each (i, j) with value x

        Here (i,j) index major and minor respectively.
        i, j and x must be non-empty, 1d arrays.
        Inserts each major group (e.g. all entries per row) at a time.
        Maintains has_sorted_indices property.
        Modifies i, j, x in place.
        """
        order = np.argsort(i, kind='mergesort')  # stable for duplicates
        i = i.take(order, mode='clip')
        j = j.take(order, mode='clip')
        x = x.take(order, mode='clip')

        do_sort = self.has_sorted_indices

        # Update index data type
        idx_dtype = get_index_dtype((self.indices, self.indptr),
                                    maxval=(self.indptr[-1] + x.size))
        self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
        self.indices = np.asarray(self.indices, dtype=idx_dtype)
        i = np.asarray(i, dtype=idx_dtype)
        j = np.asarray(j, dtype=idx_dtype)

        # Collate old and new in chunks by major index
        indices_parts = []
        data_parts = []
        ui, ui_indptr = np.unique(i, return_index=True)
        ui_indptr = np.append(ui_indptr, len(j))
        new_nnzs = np.diff(ui_indptr)
        prev = 0
        for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
            # old entries
            start = self.indptr[prev]
            stop = self.indptr[ii]
            indices_parts.append(self.indices[start:stop])
            data_parts.append(self.data[start:stop])

            # handle duplicate j: keep last setting
            uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
            if len(uj) == je - js:
                indices_parts.append(j[js:je])
                data_parts.append(x[js:je])
            else:
                indices_parts.append(j[js:je][::-1][uj_indptr])
                data_parts.append(x[js:je][::-1][uj_indptr])
                new_nnzs[c] = len(uj)

            prev = ii

        # remaining old entries
        start = self.indptr[ii]
        indices_parts.append(self.indices[start:])
        data_parts.append(self.data[start:])

        # update attributes
        self.indices = np.concatenate(indices_parts)
        self.data = np.concatenate(data_parts)
        nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
        nnzs[0] = idx_dtype(0)
        indptr_diff = np.diff(self.indptr)
        indptr_diff[ui] += new_nnzs
        nnzs[1:] = indptr_diff
        self.indptr = np.cumsum(nnzs, out=nnzs)

        if do_sort:
            # TODO: only sort where necessary
            self.has_sorted_indices = False
            self.sort_indices()

        self.check_format(full_check=False)
Example #56
0
def KKTrend(seq):
    return diff(seq, n=2)
Example #57
0
def histogram_mode(node):
    areas = np.diff(node.breaks) * node.densities
    _x = np.argmax(areas)
    mode_value = node.bin_repr_points[_x]
    return mode_value
     alt.append(APRS_data['altitude'] * 3.281)   # m
     lat.append(APRS_data['lat'])
     lng.append(APRS_data['lng'])
     print('Altitude: ' + str(APRS_data['altitude'] * 3.281))
 
 except:
     CycleError = True
     ErrorCode = 1
           
 # Figure out state of balloon
 if CycleError == False:
     try:
         
         if len(alt) > 1:
             gradPrep = np.array(alt)
             grad = np.diff(gradPrep)
             
             if grad[-1] > 0:    # ascent
                 status = 1
             if (grad[-1] < -70):    # if it has fallen more than 50 ft, it is descending
                 status = -1
     except:
         CycleError = True
         ErrorCode = 2
 
 # Some Error Catching
 if CycleError == False:
     try:
         if len(alt) > 1:
             if (max(alt) - min(alt)) > 200:
                 
Example #59
0
import numpy as np

a = np.arange(2, 14).reshape((3, 4))
print(a)

a_max = np.argmax(a)  # return the index of maximum element
a_min = np.argmin(a)  # 返回索引
print(a_max, '\n', a_min)

# mean value

# sum and difference:
print(np.cumsum(a))  # return a list =[Sum1, Sum2, Sum3, Sum4 ...]
print(np.diff(a))  #

print(np.nonzero(a))  # strange function

np.sort(a)  # sort in a row; from small to large

print(np.transpose(a))
print(a.T.dot(a))  # a_t * a

print(np.clip(a, 5, 9))  # set a_i<5 to 5, a_i > 9 to 9

print(np.mean(a, axis=0)
      )  # calculate the average of every column, axis = 1 is for every row
plt.figure(2)
plt.plot(wavelength,((Xin).T))

"""
Determination of the number of Components via SVD - NEW
"""
Xnica = Xin
U, Sv, V = np.linalg.svd( Xnica, full_matrices=True)
S = np.zeros(Xnica.shape)
np.fill_diagonal(S,Sv, wrap=True)
N = np.linalg.norm(Xnica,ord=1)
E = np.zeros(Xnica.shape[0])
for nn in range(0,Xnica.shape[0]):
     Rec = U@S[:,0:nn+1]@V[0:nn+1,:]
     E[nn] = np.linalg.norm(Xnica-Rec,ord=1)/N
DE = np.append(-E[0],np.diff(E)) 
nica = np.max([(sum(E>=1e-2)+1),sum(DE<-1e-2)])
print(nica)
plt.figure(3)
plt.plot(Sv)
#nica = len(CIndex)+1

"""
BSS Removal
"""
tic = time.time()

Xd = auxiliary_funs.npass_SGderivative(Xin,1,7,2) #used in pls later
#Xd=Xin

# Compute ICA