Beispiel #1
0
    def test_x2_surrounds_x1(self):
        """
        x2 range surrounds x1 range
        """
        # old size
        m = 2

        # new size
        n = 3

        # bin edges
        x_old = np.linspace(0., 1., m + 1)
        x_new = np.linspace(-0.1, 1.2, n + 1)

        # some arbitrary distribution
        y_old = 1. + np.sin(x_old[:-1] * np.pi) / np.ediff1d(x_old)

        # rebin
        y_new = rebin.rebin(x_old, y_old, x_new,
                            interp_kind='piecewise_constant')

        # compute answer here to check rebin
        y_old_ave  = y_old / np.ediff1d(x_old)
        y_new_here = [y_old_ave[0] * (x_new[1] - 0.),
                      y_old_ave[0] * (x_old[1]- x_new[1])
                      + y_old_ave[1] * (x_new[2] - x_old[1]),
                      y_old_ave[1] * (x_old[-1] - x_new[-2])]

        assert_allclose(y_new, y_new_here)
        assert_allclose(y_new.sum(), y_old.sum())
Beispiel #2
0
    def turning_angle(self,consecutive=False):
        """ Returns collection of turning angles in the trace (in radians)
            If consecutive=True, only counts angles between consecutive flights
        """
        if self.N_flights < 2:
            return [False]
        else:
            if not consecutive:
                dp = np.array([g for g in self.iter_flights('StartEnd')]) #(pairs of points)
                angs = np.ediff1d(tuple(geom.angle_vect(g[0][:-1],g[1][:-1]) for g in dp))
            else:
                angs = []
                angs_d = []
                for f in self.iter_all():
                    if not isinstance(f,Stop):
                        angs_d.append(geom.angle_vect(f.StartEnd[0][:-1],f.StartEnd[-1][:-1]))
                    else:
                        if len(angs_d)>1:
	                        angs.extend(np.ediff1d(angs_d))
                        angs_d = []
                if len(angs_d)>1:
                    angs.extend(np.ediff1d(angs_d))
            for i,a in enumerate(angs):
                if a>np.pi:
                    angs[i] = - (2*np.pi-a)
                if a<-np.pi:
                    angs[i] = 2*np.pi+a
            return np.array(angs)
Beispiel #3
0
def write_griddata_fits(x0, x1, y, name_or_obj):
    """Write a 2-d grid of data to a FITS file

    The grid coordinates are encoded in the FITS-WCS header keywords.

    Parameters
    ----------
    x0 : numpy.ndarray
        1-d array.
    x1 : numpy.ndarray
        1-d array.
    y : numpy.ndarray
        2-d array of shape (len(x0), len(x1)).
    name_or_obj : str or file-like object
        Filename to write to or open file.
    """

    d0, d1 = np.ediff1d(x0), np.ediff1d(x1)
    if not (np.allclose(d0, d0[0]) and np.allclose(d1, d1[0])):
        raise ValueError('grid must be regularly spaced in both x0 and x1')
    if not (len(x0), len(x1)) == y.shape:
        raise ValueError('length of x0 and x1 do not match shape of y')

    w = wcs.WCS(naxis=2)
    w.wcs.crpix = [1, 1]
    w.wcs.crval = [x1[0], x0[0]]
    w.wcs.cdelt = [d1[0], d0[0]]
    hdu = fits.PrimaryHDU(y, header=w.to_header())
    hdu.writeto(name_or_obj)
Beispiel #4
0
def roc(x_vals, y_vals):
    speeds = []
    # x_diff = np.gradient(x_vals)
    x_diff = np.ediff1d(x_vals)
    # y_diff = np.gradient(y_vals)
    y_diff = np.ediff1d(y_vals)
    dy = y_diff / x_diff
    # dy_diff = np.gradient(dy)
    dy_diff = np.ediff1d(dy, to_begin=dy[1]-dy[0])
    d2y = dy_diff / x_diff
    R = np.power(1 + np.square(dy), 1.5) / d2y
    R[np.abs(R) > 32000] = 0
    print R
    for i in range(1,len(R)):
        if x_vals[i] - x_vals[i-1] < 0:
            if i == 1:
                R[i-1] = -R[i-1]
            R[i] = -R[i]
    for i in range(0, len(R)-1):
        dist = np.sqrt((x_vals[i+1] - x_vals[i])**2 + (y_vals[i+1] - y_vals[i])**2)
        theta = np.arccos(1 - dist**2 / (2*R[i]**2))
        if np.isnan(theta) or theta == 0:
            speeds.append(dist/time_interval)
        else:
            speeds.append(R[i]*theta / time_interval)
    R = R[:-1]
    return R, speeds
Beispiel #5
0
    def period(self, line, edge="rising"):
        """
        Returns a dictionary with avg, min, max, and st of period for a line.
        """
        bit = self._line_to_bit(line)

        if edge.lower() == "rising":
            edges = self.get_rising_edges(bit)
        elif edge.lower() == "falling":
            edges = self.get_falling_edges(bit)

        if len(edges) > 2:

            timebase_freq = self.meta_data['ni_daq']['counter_output_freq']
            avg_period = np.mean(np.ediff1d(edges[1:]))/timebase_freq
            max_period = np.max(np.ediff1d(edges[1:]))/timebase_freq
            min_period = np.min(np.ediff1d(edges[1:]))/timebase_freq
            period_sd = np.std(avg_period)

        else:
            raise IndexError("Not enough edges for period: %i" % len(edges))

        return {
            'avg': avg_period,
            'max': max_period,
            'min': min_period,
            'sd': period_sd,
        }
def speeds_by_arc_length_to_speeds_by_time(speeds_by_arc_length,
                                    arc_lengths, time_step_size):
    times_by_arc_length = speeds_by_arc_length_to_times_by_arc_length(
                                        speeds_by_arc_length, arc_lengths)
    trip_time = times_by_arc_length[-1]
    time_differences = np.ediff1d(times_by_arc_length)
    speed_differences = np.ediff1d(speeds_by_arc_length)
    slopes = np.divide(speed_differences, time_differences)
    num_time_steps = int(trip_time / time_step_size)
    time_steps = np.empty(num_time_steps)
    time_steps.fill(time_step_size)
    cumulative_time_steps = np.cumsum(time_steps)
    cumulative_time_steps = np.append(cumulative_time_steps, trip_time)
    selected_indices = np.searchsorted(times_by_arc_length,
                                       cumulative_time_steps)
    selected_times_by_arc_length = times_by_arc_length[selected_indices]
    selected_speeds_by_arc_length = speeds_by_arc_length[
                                                    selected_indices]
    selected_slopes = slopes[selected_indices - 1]
    excess_times = np.subtract(cumulative_time_steps,
                               selected_times_by_arc_length)
    excess_speeds = np.multiply(excess_times, selected_slopes)
    speeds_by_time = np.add(excess_speeds, 
                                selected_speeds_by_arc_length)
    return [speeds_by_time, cumulative_time_steps]
def _jackknife_2d_random(rbins, box_size, jackknife_nside):
    def corner_area(x, y):
        a = math.sqrt(1.0-x*x)-y
        b = math.sqrt(1.0-y*y)-x
        theta = math.asin(math.sqrt(a*a+b*b)*0.5)*2.0
        return (a*b + theta - math.sin(theta))*0.5

    def segment_without_corner_area(x, r):
        half_chord = math.sqrt(1.0-x*x)
        return math.acos(x) - x*half_chord \
                - quad(corner_area, 0, min(half_chord, 1.0/r), (x,))[0]*r

    def overlapping_circular_areas(r):
        if r*r >= 2: return 1.0
        return (math.pi - quad(segment_without_corner_area, 0, min(1, 1.0/r), \
                (r,))[0]*4.0*r)*r*r

    overlapping_circular_areas_vec = np.vectorize(overlapping_circular_areas, \
            [float])

    side_length = box_size/float(jackknife_nside)
    square_area = 1.0/float(jackknife_nside*jackknife_nside)
    rbins_norm = rbins/side_length
    annulus_areas = np.ediff1d(overlapping_circular_areas_vec(rbins_norm))
    annulus_areas /= np.ediff1d(rbins_norm*rbins_norm)*math.pi
    return 1.0 - square_area * (2.0 - annulus_areas)
Beispiel #8
0
    def test_x2_in_x1(self):
        """
        x2 only has one bin, and it is surrounded by x1 range
        """
        # old size
        m = 4

        # new size
        n = 1

        # bin edges
        x_old = np.linspace(0., 1., m + 1)
        x_new = np.linspace(0.3, 0.65, n + 1)

        # some arbitrary distribution
        y_old = 1. + np.sin(x_old[:-1] * np.pi) / np.ediff1d(x_old)

        # rebin
        y_new = rebin.rebin(x_old, y_old, x_new,
                            interp_kind='piecewise_constant')

        # compute answer here to check rebin
        y_old_ave  = y_old / np.ediff1d(x_old)
        y_new_here = (y_old_ave[1] * (x_old[2] - x_new[0])
                      + y_old_ave[2] * (x_new[1] - x_old[2]) )

        assert_allclose(y_new, y_new_here)
def rr_1_update(rr_1, NFound, Found):
    if np.logical_and(NFound <= 7, NFound > 0):
        rr_1[0:NFound - 1] = np.ediff1d(Found[0:NFound, 0])
    elif NFound > 7:
        rr_1 = np.ediff1d((Found[NFound - 7:NFound - 1, 0]))

    rr_average_1 = np.mean(rr_1)

    return rr_1, rr_average_1
Beispiel #10
0
def findlevel(inwave, threshold, direction='both'):
    temp = inwave - threshold
    if (direction.find("up")+1):
        crossings = np.nonzero(np.ediff1d(np.sign(temp), to_begin=0)>0)
    elif (direction.find("down")+1):
        crossings = np.nonzero(np.ediff1d(np.sign(temp), to_begin=0)<0)
    else:
        crossings = np.nonzero(np.ediff1d(np.sign(temp), to_begin=0))
    return crossings[0][0]
Beispiel #11
0
def velocity(x, y):
	"""Returns array of velocity at each time step"""
	if isinstance(x, pd.Series):
		x = x.values
		y = y.values
	vx = np.ediff1d(x)
	vy = np.ediff1d(y)
	vel = np.sqrt( vx**2 + vy **2 ) # Pythagoras
	return vel
Beispiel #12
0
def ediff1d(ary, to_end=None, to_begin=None):
    if not isinstance(ary, Quantity):
        return np.ediff1d(ary, to_end, to_begin)

    return Quantity(
        np.ediff1d(ary.magnitude, to_end, to_begin),
        ary.dimensionality,
        copy=False
    )
def check_sync(file_obj, wps, word_index, pattern_name):
    '''
    Check sync words in the array.

    Performs analysis of sync words in the file to check if there are no
    sync problems.
    
    :param file_obj: File object containing byte-aligned data.
    :param wps: Expected words per second
    :type wps: int
    :param word_index: First index of a sync word within the data.
    :type word_index: int
    :param pattern_name: Sync word pattern name, either 'Standard or Reverse'.
    :type pattern_name: str
    '''
    array = np.fromfile(file_obj, dtype=np.short)

    array &= 0xFFF

    s1 = np.array(
        np.nonzero(array == SYNC_PATTERNS[pattern_name][0])).flatten()
    s2 = np.array(
        np.nonzero(array == SYNC_PATTERNS[pattern_name][1])).flatten()
    s3 = np.array(
        np.nonzero(array == SYNC_PATTERNS[pattern_name][2])).flatten()
    s4 = np.array(
        np.nonzero(array == SYNC_PATTERNS[pattern_name][3])).flatten()
    syncs = np.concatenate((s1, s2, s3, s4))
    syncs = np.sort(syncs)
    syncs_i = iter(syncs)
    # print 'Sync words\n', syncs
    prev_sync = next(syncs_i)
    ix = prev_sync
    while ix < array.size:
        next_sync = ix + wps
        found = syncs == next_sync
        if np.any(found):
            ix = next_sync
        else:
            try:
                last_ix = ix
                ix = next(syncs_i)
                while ix < next_sync:
                    ix = next(syncs_i)
                logger.warning(
                    'Sync lost at word %d, next sync not found at %d, '
                    'found at %d instead', last_ix, next_sync, ix)
            except StopIteration:
                break

    syncs = np.ediff1d(syncs, to_begin=0, to_end=0)
    syncs[0] = syncs[1]
    syncs[-1] = syncs[-2]
    # print 'Distances\n', syncs
    syncs = np.ediff1d(syncs, to_begin=0, to_end=0)
Beispiel #14
0
	def check_move(self):
		'Checks to see if move available'
		if np.count_nonzero(self.data == 0):
			return True
		for row in self.data:
			if np.count_nonzero(np.ediff1d(row) == 0):
				return True
		for row in self.data.T:
			if np.count_nonzero(np.ediff1d(row) == 0):
				return True
		return False
Beispiel #15
0
def distancesFromArrays(xData, yData):
    """Returns distances between each points

    :param numpy.ndarray xData: X coordinate of points
    :param numpy.ndarray yData: Y coordinate of points
    :rtype: numpy.ndarray
    """
    deltas = numpy.dstack((
        numpy.ediff1d(xData, to_begin=numpy.float32(0.)),
        numpy.ediff1d(yData, to_begin=numpy.float32(0.))))[0]
    return numpy.cumsum(numpy.sqrt(numpy.sum(deltas ** 2, axis=1)))
Beispiel #16
0
def euc_dist(x, y):
    """
    Calculate Euclidean distances travelled by the trajectory at each time step.
    :param x: (array) x-coordinates
    :param y: (array) y-coordinates
    :return: Euclidean distance
    """
    xdist = np.ediff1d(x)**2
    ydist = np.ediff1d(y)**2
    dist = np.sqrt(xdist+ydist)
    return dist
Beispiel #17
0
def get_mids_and_lengths(x0, y0, x1, y1, gx, gy):
    """Return the midpoints and intersection lengths of a line and a grid.

    Parameters
    ----------
    x0,y0,x1,y1 : float
        Two points which define the line. Points must be outside the grid
    gx,gy : :py:class:`np.array`
        Defines positions for the gridlines

    Return
    ------
    xm,ym : :py:class:`np.array`
        Coordinates along the line within each intersected grid pixel.
    dist : :py:class:`np.array`
        Lengths of the line segments crossing each pixel

    """
    # avoid upper-right boundary errors
    if (x1 - x0) == 0:
        x0 += 1e-6
    if (y1 - y0) == 0:
        y0 += 1e-6

    # vector lengths (ax, ay)
    ax = (gx - x0) / (x1 - x0)
    ay = (gy - y0) / (y1 - y0)

    # edges of alpha (a0, a1)
    ax0 = min(ax[0], ax[-1])
    ax1 = max(ax[0], ax[-1])
    ay0 = min(ay[0], ay[-1])
    ay1 = max(ay[0], ay[-1])
    a0 = max(max(ax0, ay0), 0)
    a1 = min(min(ax1, ay1), 1)

    # sorted alpha vector
    cx = (ax >= a0) & (ax <= a1)
    cy = (ay >= a0) & (ay <= a1)
    alpha = np.sort(np.r_[ax[cx], ay[cy]])

    # lengths
    xv = x0 + alpha * (x1 - x0)
    yv = y0 + alpha * (y1 - y0)
    lx = np.ediff1d(xv)
    ly = np.ediff1d(yv)
    dist = np.sqrt(lx**2 + ly**2)

    # indexing
    mid = alpha[:-1] + np.ediff1d(alpha) / 2.
    xm = x0 + mid * (x1 - x0)
    ym = y0 + mid * (y1 - y0)

    return xm, ym, dist
Beispiel #18
0
def jitter(vals):
    totVals = len(vals)
    B = numpy.unique(vals)
    N = WhereIs(vals, B)
    (n, B) = numpy.histogram(vals, B, new=False)
    fwdSpace = numpy.ediff1d(B, to_end=0.0)
    prevSpace = numpy.flipud(numpy.ediff1d(numpy.flipud(B), to_end=0.0))

    baseJit = ((fwdSpace[N] - prevSpace[N]) * numpy.random.rand(totVals)) - prevSpace[N]
    baseJit[n[N] <= 1] = 0.0

    return(vals + baseJit)
Beispiel #19
0
def find_angles(core, var1, var2):
    """
    Calculates the "bends"/angles of variables graphed against each other
    (e.g. depth v age to look for sharp elbows)
    """
    x, y = graphlist(core, var1, var2)
    x1 = np.ediff1d(x)
    y1 = np.ediff1d(y)
    a = x1[:-1] ** 2 + y1[:-1] ** 2
    b = x1[1:] ** 2 + y1[1:] ** 2
    c = (x[2:] - x[:-2]) ** 2 + (y[2:] - y[:-2]) ** 2
    return np.degrees(np.arccos((a + b - c) / np.sqrt(4 * a * b)))
 def compute_derivative(self, x_vals, y_vals):
     y_diffs = np.ediff1d(y_vals)
     x_diffs = np.ediff1d(x_vals)
     quotients = np.divide(y_diffs, x_diffs)
     quotients_a = quotients[:-1]
     quotients_b = quotients[1:]
     mean_quotients = (quotients_a + quotients_b) / 2.0
     derivative = np.empty(x_vals.shape[0])
     derivative[1:-1] = mean_quotients
     derivative[0] = quotients[0]
     derivative[-1] = quotients[-1]
     return derivative
Beispiel #21
0
def mwu_ediff(a, i):
    a1 = a[:i]
    a2 = a[i:]
    a1 = a1[np.ediff1d(a1).nonzero()[0]]
    a2 = a2[np.ediff1d(a2).nonzero()[0]]
    if len(a1) == 0 or len(a2) == 0:
        return (None, 1)
    elif (len(np.unique(a1)) == 1 and
          np.array_equal(np.unique(a1), np.unique(a2))):
        return (None, 1)
    else:
        U, p = mannwhitneyu(a1, a2)
        return (U, p)
Beispiel #22
0
def plot_deriv_groessen(A,B=0,fitted=False,poly=0):
        X=A.x.magnitude
	plt.grid(True,which="majorminor",ls="-", color='0.65')
        SX=A.Sx.magnitude
        print A.x
        Y=B.x.magnitude
        SY=B.Sx.magnitude
        DX=np.ediff1d(X)
        DY=np.ediff1d(Y)
        SDX=SX[1:]*np.sqrt(2) # sqrt(2) because of subtraction
        SDY=SY[1:]*np.sqrt(2) # sqrt(2) because of subtraction
        SDYY=np.sqrt(SDX**2*DY**2/DX**4+SDY**2/DX**2)
        ax=plt.errorbar(X[1:], DY/DX,xerr=SX[1:],yerr=SDYY,fmt=".")
Beispiel #23
0
def circulation(u, v, w, x, y, z):

  n = len(x)

	# local variables
  uavg = np.zeros((n), np.float32)
  vavg = np.zeros((n), np.float32)
  wavg = np.zeros((n), np.float32)

  # initialize returned variables
  vdotdl1 = np.empty((n), np.float32)
  vdotdl2 = np.empty((n), np.float32)
  C = 0.0

  if (u == missingval).any() or \
     (v == missingval).any() or \
     (w == missingval).any() or \
     (x == missingval).any() or \
     (y == missingval).any() or \
     (z == missingval).any():

     del uavg, vavg, wavg, dx, dy, dz, vdotdl1, vdotdl2
     return (missingval, 0, 0)

        # calculate wind and dl around the circuit
  uavg[0:n-1] = 0.5 * (u[0:n-1] + u[1:n])
  vavg[0:n-1] = 0.5 * (v[0:n-1] + v[1:n])
  wavg[0:n-1] = 0.5 * (w[0:n-1] + w[1:n])

  uavg[n-1] = 0.5*(u[0] + u[n-1])
  vavg[n-1] = 0.5*(v[0] + v[n-1])
  wavg[n-1] = 0.5*(w[0] + w[n-1])

        # ediff1d returns an array of the difference between
	# each element of the passed array, which is to say,
	# it returns the delta of each array element.  perfect.
  dx = np.ediff1d(x, to_end=x[0]-x[n-1])
  dy = np.ediff1d(y, to_end=y[0]-y[n-1])
  dz = np.ediff1d(z, to_end=z[0]-z[n-1])

        # assumes clockwise parcels
  C = np.sum(-uavg*dx - vavg*dy - wavg*dz) 
  vdotdl1 = - uavg*dx - vavg*dy - wavg*dz
  vdotdl2 = vdotdl1 / (dx**2 + dy**2 + dz**2)**0.5
  vdotdl1 = vdotdl1 * km2m

        
  C = C * km2m  # C now in units m2/s
        
  del uavg, vavg, wavg, dx, dy, dz
  return (C, vdotdl1, vdotdl2)
Beispiel #24
0
def detect_linear_regions(data, eps_r=0.01, run=10):
    """
    Detect linear-like regions of stress-strain curve (i.e. the regions of
    small and large deformations). The first and last regions are identified
    with small and large deformation linear regions.

    Notes
    -----
    Sets `strain_regions` and `strain_regions_iranges` attributes of `data`.
    """
    stress = data.stress
    window_size = max(int(0.001 * stress.shape[0]), 35)

    ds = savitzky_golay(stress, window_size, 3, 1)
    de = savitzky_golay(data.strain, window_size, 3, 1)
    dstress = ds / de
    ddstress = savitzky_golay(dstress, window_size, 3, 1)

    p1 = np.where(dstress >= 0)[0]
    p2 = np.ediff1d(p1, to_end=2)
    p3 = np.where(p2 > 1)[0]
    if p3[0] == 0 or p3[0] == 1:
        index_value = p1[-1]
    else:
        index_value = p1[p3][0]
    output('index_value:', index_value) # Usually equal to data.iult.

    ddstress = ddstress[:index_value]
    addstress = np.abs(ddstress)
    eps = eps_r * addstress.max()
    ii = np.where(addstress < eps)[0]
    idd = np.ediff1d(ii)
    ir = np.where(idd > 1)[0]

    run_len = int((run * index_value) / 100.)

    regions = []
    ic0 = 0
    for ic in ir:
        region = slice(ii[ic0], ii[ic] + 1)
        ic0 = ic + 1
        if (region.stop - region.start) >= run_len:
            regions.append(region)

    output('%d region(s)' % len(regions))

    data.strain_regions_iranges = regions
    data.strain_regions = [(data.strain[ii.start], data.strain[ii.stop])
                           for ii in data.strain_regions_iranges]

    return data
Beispiel #25
0
def project(x0, y0, x1, y1, obj):
    """Project single x-ray beam.
    """

    x0, y0, x1, y1 = float(x0), float(y0), float(x1), float(y1)
    sx, sy = obj.shape

    # grid frame (gx, gy)
    gx = np.arange(0, sx + 1)
    gy = np.arange(0, sy + 1)

    # avoid upper-right boundary errors
    if (x1 - x0) == 0:
        x0 += 1e-6
    if (y1 - y0) == 0:
        y0 += 1e-6

    # vector lengths (ax, ay)
    ax = (gx - x0) / (x1 - x0)
    ay = (gy - y0) / (y1 - y0)

    # edges of alpha (a0, a1)
    ax0 = min(ax[0], ax[-1])
    ax1 = max(ax[0], ax[-1])
    ay0 = min(ay[0], ay[-1])
    ay1 = max(ay[0], ay[-1])
    a0 = max(max(ax0, ay0), 0)
    a1 = min(min(ax1, ay1), 1)

    # sorted alpha vector
    cx = (ax >= a0) & (ax <= a1)
    cy = (ay >= a0) & (ay <= a1)
    alpha = np.sort(np.r_[ax[cx], ay[cy]])

    # lengths
    xv = x0 + alpha * (x1 - x0)
    yv = y0 + alpha * (y1 - y0)
    lx = np.ediff1d(xv)
    ly = np.ediff1d(yv)
    dist = np.sqrt(lx**2 + ly**2)
    ind = dist != 0

    # indexing
    mid = alpha[:-1] + np.ediff1d(alpha) / 2.
    xm = x0 + mid * (x1 - x0)
    ym = y0 + mid * (y1 - y0)
    ix = np.floor(xm).astype('int')
    iy = np.floor(ym).astype('int')

    # projection
    return np.dot(dist[ind], obj[ix[ind], iy[ind]])
Beispiel #26
0
    def _calculate_derivatives(self, initial_potential, current_at_start=0., current_at_end=0., extra_current=0.):
        # TODO: maybe change to one step (performance?) <-> problem: numpy.array not easy to enhance
        left_difference = numpy.ediff1d(initial_potential, to_begin = 0)
        right_difference = - numpy.ediff1d(initial_potential, to_end = 0)
         
        # the current from one segment to the next one
        # second order central difference formula
        boundary_current = self._segment_plasma_conductance * (left_difference + right_difference)
        boundary_current[0] += current_at_start
        boundary_current[-1] += current_at_end
         
        membrane_current = self._segment_membrane_conductance * (initial_potential - self._resting_potential)
 
        return (self._outside_current - boundary_current - membrane_current - extra_current) / self._segment_membrane_capacitance # new potential
def find_sequences_indices(mask):
    """
    Return iterator with tuples of start and end indices for all sequences
    of True values in mask.

    Parameters
    ----------
    mask : np.ndarray
        The mask the function should be performed on.

    Returns
    -------
    tuples
        Iterator with a tuple of start and end indices for each sequences of
        True values in the input.

     Examples
    --------
    >>> mask = np.array([False, True, True, False])
    >>> list(_sequences(mask))
    [(1, 3)]
    >>> mask = np.array([True, False, False, True])
    >>> list(_sequences(mask))
    [(0, 1), (3, 4)]
    >>> mask = np.ones((4,), dtype=bool)
    >>> list(_sequences(mask))
    [(0, 4)]
    >>> mask = np.zeros((4,), dtype=bool)
    >>> list(_sequences(mask))
    []
    """
    if not np.any(mask):
        # There are no sequences in this mask
        return izip([], [])

    start_indices = np.flatnonzero(
        np.ediff1d(mask.astype(int), to_begin=0) == 1)
    end_indices = np.flatnonzero(
        np.ediff1d(mask.astype(int), to_begin=0) == -1)

    # If the mask starts or ends with a True value this needs to be handled
    # separately:
    if (start_indices.size == 0 or
            (end_indices.size != 0 and end_indices[0] < start_indices[0])):
        start_indices = np.insert(start_indices, 0, 0)
    if (end_indices.size == 0 or
            (start_indices.size != 0 and start_indices[-1] > end_indices[-1])):
        end_indices = np.append(end_indices, len(mask))
    return izip(start_indices, end_indices)
Beispiel #28
0
def change_outlier_proportion(raw_data, predicted_data, check_fn=more_than_three):
	assert len(raw_data) == len(predicted_data)
	raw_arrays = split_time_series(np.ediff1d(raw_data))
	pred_arrays = split_time_series(np.ediff1d(predicted_data))
	count_ = 0
	len_ = 0
	for (raw_array, pred_array) in zip(raw_arrays, pred_arrays):
		sublen_ = len(raw_array)
		if sublen_ >= 12:
			mean = np.mean(raw_array)
			std = np.std(raw_array)
			normDist = lambda x: (x-mean)/std
			count_ += sum(map(check_fn, map(normDist, pred_array)))
			len_ += sublen_
	return count_*1.0/len_
Beispiel #29
0
def get_smooth_speed(posdf,fs=60,th=3,cutoff=0.5,showfig=False,verbose=False):
    x = (np.array(posdf['x1']) + np.array(posdf['x2']))/2
    y = (np.array(posdf['y1']) + np.array(posdf['y2']))/2

    dx = np.ediff1d(x,to_begin=0)
    dy = np.ediff1d(y,to_begin=0)
    dvdt = np.sqrt(np.square(dx) + np.square(dy))*fs # units (cm?) per second
    t0 = 0
    tend = len(dvdt)/fs # end in seconds

    dvdtlowpass = np.fmax(0,butter_lowpass_filtfilt(dvdt, cutoff=cutoff, fs=fs, order=6))

    if verbose:
        print('The animal (gor01) ran an average of {0:2.2f} units/s'.format(dvdt.mean()))

    #th = 3 #cm/s
    
    runindex = np.where(dvdtlowpass>=th); runindex = runindex[0]
    if verbose:
        print("The animal ran faster than th = {0:2.1f} units/s for a total of {1:2.1f} seconds (out of a total of {2:2.1f} seconds).".format(th,len(runindex)/fs,len(x)/fs))
    
    if showfig:
        #sns.set(rc={'figure.figsize': (15, 4),'lines.linewidth': 3, 'font.size': 18, 'axes.labelsize': 16, 'legend.fontsize': 12, 'ytick.labelsize': 12, 'xtick.labelsize': 12 })
        #sns.set_style("white")

        f, (ax1, ax2) = plt.subplots(1,2)

        ax1.plot(np.arange(0,len(dvdt))/fs,dvdt,alpha=1,color='lightgray',linewidth=2)
        ax1.plot(np.arange(0,len(dvdt))/fs,dvdtlowpass, alpha=1,color='k',linewidth=1)
        ax1.set_xlabel('time (seconds)')
        ax1.set_ylabel('instantaneous velocity (units/s)')
        ax1.legend(['unfiltered', str(cutoff) + ' Hz lowpass filtfilt'])
        ax1.set_xlim([0,10*np.ceil(len(x)/fs/10)])

        ax2.plot(np.arange(0,len(dvdt))/fs,dvdt,alpha=1,color='lightgray',linewidth=2)
        ax2.plot(np.arange(0,len(dvdt))/fs,dvdtlowpass, alpha=1,color='k',linewidth=1)
        ax2.set_xlabel('time (seconds)')
        ax2.set_ylabel('instantaneous velocity (units/s)')
        ax2.legend(['unfiltered',  str(cutoff) + ' Hz lowpass filtfilt'])
        ax2.set_xlim([30,70])

    speed = Map()
    speed['data'] = dvdtlowpass
    speed['active_bins'] = runindex
    speed['active_thresh'] = th
    speed['samprate'] = fs
  
    return speed
Beispiel #30
0
def sftfOPtuning(syncsub, celltraces, sweeplength, showflag):
    nc = len(celltraces)    

    f0mean = np.zeros((3,3,nc))
    f0sem = np.zeros((3,3,nc))
    
    '''sort sweep times by sort condition'''
    '''difference between values'''
    valuedifference = np.ediff1d(syncsub[:,6], to_end=None, to_begin = 1)
    '''indices for condition transitions'''
    transitions = argwhere(valuedifference)    
    transitions = append(transitions, len(valuedifference))
    
    f0m = np.empty(((len(transitions)-1),nc))
    f0s = np.empty(((len(transitions)-1),nc))      
    for cond in range(len(transitions)-1):
        firstpoint = transitions[cond]
        lastpoint = transitions[cond+1]
        starttimes = syncsub[firstpoint:lastpoint,0]
        starttimes = starttimes.astype(int32)
        (traceave, tracesem) = OPtraceave(celltraces, starttimes, sweeplength, showflag)     
        sp = int(floor(10*syncsub[firstpoint, 5]))
        tp = int(sqrt(syncsub[firstpoint,6])-1)        
        f0mean[sp,tp,:] = traceave.mean(0)
        f0sem[sp,tp,:] = tracesem.mean(0)
        
    return (f0mean, f0sem)
Beispiel #31
0
def _VR_align_to_2P_FlashLED(vr_dframe,infofile, n_imaging_planes = 1,n_lines = 512.):
    '''align VR behavior data to 2P sample times using splines. called internally
    from behavior_dataframe if scanmat exists
    inputs:
        vr_dframe- VR pandas dataframe loaded directly from .sqlite file
        infofile- path
        n_imaging_planes- number of imaging planes (not implemented)
        n_lines - number of lines collected during each frame (default 512.)
    outputs:
        ca_df - calcium imaging aligned VR data frame (pandas dataframe)
    '''

    info = loadmat_sbx(infofile) # load .mat file with ttl times
    fr = info['fr'] # frame rate
    lr = fr*n_lines # line rate


    orig_ttl_times = info['frame']/fr + info['line']/lr # including error ttls
    dt_ttl = np.diff(np.insert(orig_ttl_times,0,0)) # insert zero at beginning and calculate delta ttl time
    tmp = np.zeros(dt_ttl.shape)
    tmp[dt_ttl<.005] = 1 # find ttls faster than 200 Hz (unrealistically fast - probably a ttl which bounced to ground)
    # ensured outside of this script that this finds the true start ttl on every scan
    mask = np.insert(np.diff(tmp),0,0) # find first ttl in string that were too fast
    mask[mask<0] = 0
    print('num aberrant ttls',tmp.sum())

    frames = info['frame'][mask==0] # should be the original ttls up to a 1 VR frame error
    lines = info['line'][mask==0]

    ##
    ##

    # times of each ttl (VR frame)
    ttl_times = frames/fr + lines/lr
    numVRFrames = frames.shape[0]

    # create empty pandas dataframe to store calcium aligned data
    ca_df = pd.DataFrame(columns = vr_dframe.columns, index = np.arange(info['max_idx']))
    ca_time = np.arange(0,1/fr*info['max_idx'],1/fr) # time on this even grid
    if (ca_time.shape[0]-ca_df.shape[0])==1: # occaionally a 1 frame correction due to
                                            # scan stopping mid frame
        print('one frame correction')
        ca_time = ca_time[:-1]


    ca_df.loc[:,'time'] = ca_time
    mask = ca_time>=ttl_times[0] # mask for when ttls have started on imaging clock
                                # (i.e. imaging started and stabilized, ~10s)

    # take VR frames for which there are valid TTLs
    vr_dframe = vr_dframe.iloc[-numVRFrames:]
    # print(ttl_times.shape,vr_dframe.shape)

    # linear interpolation of position
    print(ttl_times[0],ttl_times[-1])
    print(ca_time[mask][0],ca_time[mask][-1])

    near_list = ['LEDCue']
    f_nearest = sp.interpolate.interp1d(ttl_times,vr_dframe[near_list]._values,axis=0,kind='nearest')
    ca_df.loc[mask,near_list] = f_nearest(ca_time[mask])
    ca_df.fillna(method='ffill',inplace=True)
    ca_df.loc[~mask,near_list]=-1.

    # integrate, interpolate and then take difference, to make sure data is not lost
    cumsum_list = ['dz','lick','reward','gng','manrewards']
    f_cumsum = sp.interpolate.interp1d(ttl_times,np.cumsum(vr_dframe[cumsum_list]._values,axis=0),axis=0,kind='slinear')
    ca_cumsum = np.round(np.insert(f_cumsum(ca_time[mask]),0,[0, 0,0 ,0,0],axis=0))
    if ca_cumsum[-1,-2]<ca_cumsum[-1,-3]:
        ca_cumsum[-1,-2]+=1


    ca_df.loc[mask,cumsum_list] = np.diff(ca_cumsum,axis=0)
    ca_df.loc[~mask,cumsum_list] = 0.



    # smooth instantaneous speed
    k = Gaussian1DKernel(5)
    cum_dz = convolve(np.cumsum(ca_df['dz']._values),k,boundary='extend')
    ca_df['dz'] = np.ediff1d(cum_dz,to_end=0)


    ca_df['speed'].interpolate(method='linear',inplace=True)
    ca_df['speed']=np.array(np.divide(ca_df['dz'],np.ediff1d(ca_df['time'],to_begin=1./fr)))
    ca_df['speed'].iloc[0]=0

    # calculate and smooth lick rate
    ca_df['lick rate'] = np.array(np.divide(ca_df['lick'],np.ediff1d(ca_df['time'],to_begin=1./fr)))
    ca_df['lick rate'] = convolve(ca_df['lick rate']._values,k,boundary='extend')

    # replace nans with 0s
    ca_df[['reward','lick']].fillna(value=0,inplace=True)
    return ca_df
Beispiel #32
0
def replace_repeats_with_zero(arr):
    """Replace repeated elements in 1d array with 0"""
    arr[np.ediff1d(arr, to_begin=1) == 0] = 0
    return arr
Beispiel #33
0
def compute_rel_diff(seq):
    xs = np.asarray(seq)
    diff = np.ediff1d(xs, np.nan)
    return diff / seq
Beispiel #34
0
    def discontDetection_(self, jumps):
        """ Implementation of the detection algorithm

        Parameters
        ----------
        jumps : (list)
            Time in history or in solution where discontinuities occur.
        Return
        -------
        discont : ndarray, shape (nbr_discontinuities,)
            array with all discont within my interval of integration
        """
        if not self.delays:
            # delays=[], solver used as ivp
            if jumps:
                discont = jumps
            else:
                discont = []
            return discont
        else:
            discont = []
            # transport along time of discontinuities
            transp_discont = np.asarray(self.delays)

            to_transport = [self.t]

            # if jumps and self.h_info == 'from DdeResult':
            if jumps:
                # remove jumps outside tspan
                to_transport += jumps
            if self.h_info == 'from DdeResult' and not self.initDiscont \
                        and not self.firstInitDiscont and not jumps:
                warn(
                    "Start from previous integration without any jumps or initial discont"
                )
                # cas where no discont, return
                return []
            tmp = [(t_ + transp_discont).tolist() for t_ in to_transport]
            tmp_fla = sorted([val for sub_d in tmp for val in sub_d])
            for i in range(1, self.tracked_stages + 1):
                discont.append(tmp_fla)
                z = 1  # number of time for delays
                for j in range(i + 1, self.tracked_stages +
                               1):  # get intermediere discont
                    for k in range(1, self.Ndelays):
                        inter_to_trans = tmp_fla[k:]
                        inter_d = [(t_ + z * transp_discont[:-k]).tolist()
                                   for t_ in inter_to_trans]
                        inter_d_fla = [
                            val for sub_d in inter_d for val in sub_d
                        ]
                        discont.append(inter_d_fla)
                        # discont.append(inter_d.tolist())
                    z += 1
                # flatten tmp for add ones more transp_discont
                tmp = [(t_ + transp_discont * (i + 1)).tolist()
                       for t_ in to_transport]
                tmp_fla = sorted([val for sub_d in tmp for val in sub_d])
            # flatened the list of list of discont and discont as array
            discont = np.asarray(
                sorted([val for sub_d in discont for val in sub_d]))
            # no discontinuities cluster, remove them
            discont = np.delete(discont,
                                np.argwhere(np.ediff1d(discont) < EPS) + 1)
            # remove inital time from discont tracking
            discont = discont[discont > self.t0].tolist()
        return discont
Beispiel #35
0
def G(d):
    return -np.ediff1d(d, to_end=-d[-1])
def Column_main_Extracter(img, orignal_img, scaling_factor):
    blur_cr_img = cv2.blur(img, (13, 13))
    crop_img_resize = cv2.resize(blur_cr_img,
                                 None,
                                 fx=scaling_factor / 2,
                                 fy=scaling_factor / 2,
                                 interpolation=cv2.INTER_AREA)

    crop_img_resize_n = cv2.fastNlMeansDenoisingColored(
        crop_img_resize, None, 10, 10, 7, 21)
    crop_img_resize_n_gray = cv2.cvtColor(crop_img_resize_n,
                                          cv2.COLOR_BGR2GRAY)
    th3 = cv2.adaptiveThreshold(crop_img_resize_n_gray, 255,
                                cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY_INV, 21, 7)

    kernel = np.ones((100, 9), np.uint8)
    th4 = cv2.morphologyEx(th3.copy(), cv2.MORPH_CLOSE, kernel)
    th_sum00 = np.sum(th4, axis=0)
    empty_spc_clm = np.where(th_sum00 < (np.min(th_sum00) + 100))[0]
    empty_spc_clm_dif = np.ediff1d(empty_spc_clm)
    Column_boundries = (empty_spc_clm[np.where(
        empty_spc_clm_dif > np.mean(empty_spc_clm_dif) + 5)[0] + 1] /
                        (scaling_factor / 2)).astype(int)
    Column_boundries = np.delete(
        Column_boundries,
        np.where(Column_boundries < (img.shape[1] / 5))[0])
    # print('Column_boundries:1',Column_boundries)
    if len(Column_boundries) < 3:
        Angles_Records = []
        for angle in [
                0.1, -0.1, 0.2, -0.2, 0.3, -0.3, 0.4, -0.4, 0.5, -0.5, 0.6,
                -0.6, 0.7, -0.7, 0.8, -0.8
        ]:
            Column_boundries = rotate_check_column_border(
                img, th3.copy(), angle, scaling_factor, img.shape[1])
            # print(Column_boundries)
            ############################################
            Column_boundries = np.append(Column_boundries, [0, img.shape[1]])
            Column_boundries = np.unique(Column_boundries)
            for i in range(len(Column_boundries)):
                closer = np.where(
                    np.ediff1d(Column_boundries) < (img.shape[1]) / 5)[0]
                if len(closer) > 0:
                    Column_boundries = np.delete(Column_boundries, closer[-1])
                else:
                    break
            Column_boundries = Column_boundries[1:]
            ############################################

            Angles_Records.append([angle, len(Column_boundries)])
            if len(Column_boundries) > 2:
                break
        Angles_Records = np.array(Angles_Records)
        if len(Column_boundries) > 2:
            img = rotate(img, angle, value_replace=(255, 255, 255))
            First_Column = img[:, 0:Column_boundries[0] + 10]
            Second_Column = img[:,
                                Column_boundries[0]:Column_boundries[1] + 10]
            Third_Column = img[:, Column_boundries[1]:]
        else:
            angle = np.append([0], Angles_Records)
            angle_rec = Angles_Records[np.where(
                Angles_Records[:, 1] == np.max(Angles_Records[:, 1]))[0]][:, 0]

            Column_boundries, ang = method5_column(img, th3, scaling_factor,
                                                   angle_rec)
            if len(Column_boundries) > 2:
                img = rotate(img, ang, value_replace=(255, 255, 255))
                First_Column = img[:, 0:Column_boundries[0] + 10]
                Second_Column = img[:,
                                    Column_boundries[0]:Column_boundries[1] +
                                    10]
                Third_Column = img[:, Column_boundries[1]:]
            else:
                First_Column, Second_Column, Third_Column = Column_main_Extracter_sub_second(
                    img, orignal_img, scaling_factor)

                if First_Column is None:
                    First_Column, Second_Column, Third_Column = Column_main_Extracter_sub(
                        orignal_img, scaling_factor)
                    # print([First_Column])

    else:
        First_Column = img[:, 0:Column_boundries[0] + 10]
        Second_Column = img[:, Column_boundries[0]:Column_boundries[1] + 10]
        Third_Column = img[:, Column_boundries[1]:]
    return First_Column, Second_Column, Third_Column
Beispiel #37
0
    def evaluateRecommendations(self,
                                URM_test,
                                at=10,
                                minRatingsPerUser=1,
                                exclude_seen=True,
                                mode='parallel',
                                filterTopPop=False,
                                filterCustomItems=np.array([], dtype=np.int),
                                filterCustomUsers=np.array([], dtype=np.int)):
        """
        Speed info:
        - Sparse weights: batch mode is 2x faster than sequential
        - Dense weights: batch and sequential speed are equivalent
        :param URM_test_new:            URM to be used for testing
        :param at: 10                   Length of the recommended items
        :param minRatingsPerUser: 1     Users with less than this number of interactions will not be evaluated
        :param exclude_seen: True       Whether to remove already seen items from the recommended items
        :param mode: 'sequential', 'parallel', 'batch'
        :param filterTopPop: False or decimal number        Percentage of items to be removed from recommended list and testing interactions
        :param filterCustomItems: Array, default empty           Items ID to NOT take into account when recommending
        :param filterCustomUsers: Array, default empty           Users ID to NOT take into account when recommending
        :return:
        """

        if len(filterCustomItems) == 0:
            self.filterCustomItems = False
        else:
            self.filterCustomItems = True
            self.filterCustomItems_ItemsID = np.array(filterCustomItems)
        '''
        if filterTopPop != False:
            self.filterTopPop = True
            _,_, self.filterTopPop_ItemsID = removeTopPop(self.URM_train, URM_2 = URM_test_new, percentageToRemove=filterTopPop)
            print("Filtering {}% TopPop items, count is: {}".format(filterTopPop*100, len(self.filterTopPop_ItemsID)))
            # Zero-out the items in order to be considered irrelevant
            URM_test_new = check_matrix(URM_test_new, format='lil')
            URM_test_new[:,self.filterTopPop_ItemsID] = 0
            URM_test_new = check_matrix(URM_test_new, format='csr')
        '''

        # During testing CSR is faster
        self.URM_test = check_matrix(URM_test, format='csr')
        self.evaluator = Evaluator()
        self.URM_train = check_matrix(self.URM_train, format='csr')
        self.at = at
        self.minRatingsPerUser = minRatingsPerUser
        self.exclude_seen = exclude_seen

        nusers = self.URM_test.shape[0]

        # Prune users with an insufficient number of ratings
        rows = self.URM_test.indptr
        numRatings = np.ediff1d(rows)
        mask = numRatings >= minRatingsPerUser
        usersToEvaluate = np.arange(nusers)[mask]

        if len(filterCustomUsers) != 0:
            print("Filtering {} Users".format(len(filterCustomUsers)))
            usersToEvaluate = set(usersToEvaluate) - set(filterCustomUsers)

        usersToEvaluate = list(usersToEvaluate)

        if mode == 'sequential':
            return self.evaluateRecommendationsSequential(usersToEvaluate)
        elif mode == 'parallel':
            return self.evaluateRecommendationsParallel(usersToEvaluate)
        elif mode == 'batch':
            return self.evaluateRecommendationsBatch(usersToEvaluate)
        elif mode == 'cython':
            return self.evaluateRecommendationsCython(usersToEvaluate)
        # elif mode=='random-equivalent':
        #     return self.evaluateRecommendationsRandomEquivalent(usersToEvaluate)
        else:
            raise ValueError("Mode '{}' not available".format(mode))
Beispiel #38
0
def load_sync(exptpath):

    #verify that sync file exists in exptpath
    syncMissing = True
    for f in os.listdir(exptpath):
        if f.endswith('_sync.h5'):
            syncpath = os.path.join(exptpath, f)
            syncMissing = False
            print("Sync file: " + f)
    if syncMissing:
        print("No sync file")
        sys.exit()

    #load the sync data from .h5 and .pkl files
    d = Dataset(syncpath)
    print(d.line_labels)

    #set the appropriate sample frequency
    sample_freq = d.meta_data['ni_daq']['counter_output_freq']

    #get sync timing for each channel
    twop_vsync_fall = d.get_falling_edges('vsync_2p') / sample_freq
    stim_vsync_fall = d.get_falling_edges(
        'vsync_stim')[1:] / sample_freq  #eliminating the DAQ pulse

    photodiode_rise = d.get_rising_edges('stim_photodiode') / sample_freq
    photodiode_fall = d.get_falling_edges('stim_photodiode') / sample_freq
    photodiode_transition = np.union1d(photodiode_rise, photodiode_fall)

    #make sure all of the sync data are available
    channels = {
        'twop_vsync_fall': twop_vsync_fall,
        'stim_vsync_fall': stim_vsync_fall,
        'photodiode_rise': photodiode_rise
    }
    channel_test = []
    for i in channels:
        channel_test.append(any(channels[i]))
        print(i + ' syncs : ' + str(len(channels[i])))
    if all(channel_test):
        print("All channels present.")
    else:
        print("Not all channels present. Sync test failed.")
        sys.exit()

    # find the start of the photodiode 1-second pulses:
    ptd_transition_diff = np.ediff1d(photodiode_transition)
    is_roughly_one_second = np.abs(ptd_transition_diff - 1.0) < 0.016
    first_transition_idx = np.argwhere(is_roughly_one_second)[0, 0]

    first_transition_time = photodiode_transition[first_transition_idx]
    first_stim_vsync = stim_vsync_fall[0]
    first_delay = first_transition_time - first_stim_vsync
    print('delay between first stim_vsync and photodiode: ' + str(first_delay))

    #test and correct for photodiode transition errors

    ptd_rise_diff = np.ediff1d(photodiode_rise)
    #    short = np.where(np.logical_and(ptd_rise_diff>0.1, ptd_rise_diff<0.3))[0]
    #    medium = np.where(np.logical_and(ptd_rise_diff>0.5, ptd_rise_diff<1.5))[0]
    #    ptd_start = 2
    #    for i in medium:
    #        if set(range(i-2,i)) <= set(short):
    #            ptd_start = i+1
    ptd_start = first_transition_idx
    ptd_end = np.where(photodiode_rise > stim_vsync_fall.max())[0][0] - 1

    #    if ptd_start > 3:
    #        print "Photodiode events before stimulus start.  Deleted."

    ptd_errors = []
    while any(ptd_rise_diff[ptd_start:ptd_end] < 1.8):
        error_frames = np.where(
            ptd_rise_diff[ptd_start:ptd_end] < 1.8)[0] + ptd_start
        print("Photodiode error detected. Number of frames: " +
              len(error_frames))
        photodiode_rise = np.delete(photodiode_rise, error_frames[-1])
        ptd_errors.append(photodiode_rise[error_frames[-1]])
        ptd_end -= 1
        ptd_rise_diff = np.ediff1d(photodiode_rise)

    if any(np.abs(first_transition_time -
                  photodiode_rise) < 0.02):  #first transition is rise
        first_pulse = np.argwhere(
            np.abs(first_transition_time - photodiode_rise) < 0.02)[0, 0]
    else:  #first transition is a fall
        first_pulse = np.argwhere(
            photodiode_rise + 0.03 > first_transition_time)[0, 0]

    stim_on_photodiode_idx = 60 + 120 * np.arange(
        0, ptd_end + 1 - ptd_start - 1, 1)
    stim_on_photodiode = stim_vsync_fall[stim_on_photodiode_idx]
    photodiode_on = photodiode_rise[first_pulse +
                                    np.arange(0, ptd_end - ptd_start, 1)]
    delay_rise = photodiode_on - stim_on_photodiode

    #    print 'ptd_start: ' + str(ptd_start)
    #    print str(ptd_end)

    #    plt.figure()
    #    plt.plot(stim_on_photodiode[:10],'o')
    #    plt.plot(photodiode_on[:10],'.r')
    #    plt.show()

    delay = np.mean(delay_rise[:-1])
    print("monitor delay: " + str(delay))

    #adjust stimulus time with monitor delay
    stim_time = stim_vsync_fall + delay

    #convert stimulus frames into twop frames
    twop_frames = np.empty((len(stim_time), 1))
    acquisition_ends_early = 0
    for i in range(len(stim_time)):
        # crossings = np.nonzero(np.ediff1d(np.sign(twop_vsync_fall - stim_time[i]))>0)
        crossings = np.searchsorted(twop_vsync_fall, stim_time[i],
                                    side='left') - 1
        if crossings < (len(twop_vsync_fall) - 1):
            twop_frames[i] = crossings
        else:
            twop_frames[i:len(stim_time)] = np.NaN
            acquisition_ends_early = 1
            break

    if acquisition_ends_early > 0:
        print("Acquisition ends before stimulus")

    return twop_frames, twop_vsync_fall, stim_vsync_fall, photodiode_rise
def _select_train_warm_items(URM_all,
                             train_item_percentage,
                             train_interaction_percentage=None):
    """
    Selects a certain percentage of the URM_all WARM items and splits the URM in two
    IMPORTANT: the number of items to be sampled is not computed with respect to the shape of the URM but with respect
    to the number of WARM items it contains. Cold items don't count.
    :param URM_all:
    :param train_item_percentage:
    :param train_interaction_percentage:
    :return:
    """

    sample_successful = False
    terminate = False

    n_interactions = URM_all.nnz

    URM = sps.csc_matrix(URM_all)
    item_interactions = np.ediff1d(URM.indptr)

    n_warm_items = np.sum(item_interactions > 0)

    n_train_items = int(n_warm_items * train_item_percentage)

    indices_for_sampling = np.arange(0, URM_all.shape[1],
                                     dtype=np.int)[item_interactions > 0]
    np.random.shuffle(indices_for_sampling)

    while not terminate:

        if n_train_items == n_warm_items and n_train_items > 1:
            n_train_items -= 1

        train_items = indices_for_sampling[0:n_train_items]

        # check if enough interactions are in train
        if train_interaction_percentage is not None:

            train_interactions = np.sum(item_interactions[train_items])

            current_train_interaction_percentage = train_interactions / n_interactions

            if current_train_interaction_percentage < train_interaction_percentage * 0.9:
                # Too few interactions in train, add items
                if n_train_items == n_warm_items:
                    terminate = True
                    sample_successful = False
                else:
                    n_train_items += 1

            elif current_train_interaction_percentage > train_interaction_percentage * 1.1:
                # Too many interactions in train, remove items
                if n_train_items == 1:
                    terminate = True
                    sample_successful = False
                else:
                    n_train_items -= 1

            else:
                terminate = True
                sample_successful = True

        else:
            terminate = True
            sample_successful = True

    assert sample_successful, "Unable to select the train items with the desired specifications"

    return train_items
Beispiel #40
0
    def _insert_many(self, i, j, x):
        """Inserts new nonzero at each (i, j) with value x

        Here (i,j) index major and minor respectively.
        i, j and x must be non-empty, 1d arrays.
        Inserts each major group (e.g. all entries per row) at a time.
        Maintains has_sorted_indices property.
        Modifies i, j, x in place.
        """
        order = np.argsort(i, kind='mergesort')  # stable for duplicates
        i = i.take(order, mode='clip')
        j = j.take(order, mode='clip')
        x = x.take(order, mode='clip')

        do_sort = self.has_sorted_indices

        # Update index data type
        idx_dtype = get_index_dtype((self.indices, self.indptr),
                                    maxval=(self.indptr[-1] + x.size))
        self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
        self.indices = np.asarray(self.indices, dtype=idx_dtype)
        i = np.asarray(i, dtype=idx_dtype)
        j = np.asarray(j, dtype=idx_dtype)

        # Collate old and new in chunks by major index
        indices_parts = []
        data_parts = []
        ui, ui_indptr = np.unique(i, return_index=True)
        ui_indptr = np.append(ui_indptr, len(j))
        new_nnzs = np.diff(ui_indptr)
        prev = 0
        for c, (ii, js, je) in enumerate(izip(ui, ui_indptr, ui_indptr[1:])):
            # old entries
            start = self.indptr[prev]
            stop = self.indptr[ii]
            indices_parts.append(self.indices[start:stop])
            data_parts.append(self.data[start:stop])

            # handle duplicate j: keep last setting
            uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
            if len(uj) == je - js:
                indices_parts.append(j[js:je])
                data_parts.append(x[js:je])
            else:
                indices_parts.append(j[js:je][::-1][uj_indptr])
                data_parts.append(x[js:je][::-1][uj_indptr])
                new_nnzs[c] = len(uj)

            prev = ii

        # remaining old entries
        start = self.indptr[ii]
        indices_parts.append(self.indices[start:])
        data_parts.append(self.data[start:])

        # update attributes
        self.indices = np.concatenate(indices_parts)
        self.data = np.concatenate(data_parts)
        nnzs = np.asarray(np.ediff1d(self.indptr, to_begin=0), dtype=idx_dtype)
        nnzs[1:][ui] += new_nnzs
        self.indptr = np.cumsum(nnzs, out=nnzs)

        if do_sort:
            # TODO: only sort where necessary
            self.has_sorted_indices = False
            self.sort_indices()

        self.check_format(full_check=False)
Beispiel #41
0
def create_partition(A, cdepth=2, epsilon=0.25, seeds=None):
    """
    Create the partition based on an input matrix using the algebraic multigrid
    method coarse/fine-splittings based on direct couplings. The standard values
    for cdepth and epsilon are taken from the following reference.

    For more information see: U. Trottenberg, C. W. Oosterlee, and A. Schuller.
    Multigrid. Academic press, 2000.

    Parameters
    ----------
    A: sparse matrix used for the agglomeration
    cdepth: the greather is the more intense the aggregation will be, e.g. less
        cells if it is used combined with generate_coarse_grid
    epsilon: weight for the off-diagonal entries to define the "strong
        negatively cupling"
    seeds: (optional) to define a-priori coarse cells

    Returns
    -------
    out: agglomeration indices

    How to use
    ----------
    part = create_partition(tpfa_matrix(g))
    g = generate_coarse_grid(g, part)

    """

    if A.size == 0: return np.zeros(1)
    Nc = A.shape[0]

    # For each node, which other nodes are strongly connected to it
    ST = sps.lil_matrix((Nc, Nc), dtype=np.bool)

    # In the first instance, all cells are strongly connected to each other
    At = A.T

    for i in np.arange(Nc):
        ci, _, vals = sps.find(At[:, i])
        neg = vals < 0.
        nvals = vals[neg]
        nci = ci[neg]
        minId = np.argmin(nvals)
        ind = -nvals >= epsilon * np.abs(nvals[minId])
        ST[nci[ind], i] = True

    # Temporary field, will store connections of depth 1
    STold = ST.copy()
    for _ in np.arange(2, cdepth + 1):
        for j in np.arange(Nc):
            rowj = np.array(STold.rows[j])
            row = np.hstack([STold.rows[r] for r in rowj])
            ST[j, np.concatenate((rowj, row))] = True
        STold = ST.copy()

    del STold

    ST.setdiag(False)
    lmbda = np.array([len(s) for s in ST.rows])

    # Define coarse nodes
    candidate = np.ones(Nc, dtype=np.bool)
    is_fine = np.zeros(Nc, dtype=np.bool)
    is_coarse = np.zeros(Nc, dtype=np.bool)

    # cells that are not important for any other cells are on the fine scale.
    for row_id, row in enumerate(ST.rows):
        if not row:
            is_fine[row_id] = True
            candidate[row_id] = False

    ST = ST.tocsr()
    it = 0
    while np.any(candidate):
        i = np.argmax(lmbda)
        is_coarse[i] = True
        j = ST.indices[ST.indptr[i]:ST.indptr[i + 1]]
        jf = j[candidate[j]]
        is_fine[jf] = True
        candidate[np.r_[i, jf]] = False
        loop = ST.indices[mcolon.mcolon(ST.indptr[jf], ST.indptr[jf + 1] - 1)]
        for row in np.unique(loop):
            s = ST.indices[ST.indptr[row]:ST.indptr[row + 1]]
            lmbda[row] = s[candidate[s]].size + 2 * s[is_fine[s]].size
        lmbda[np.logical_not(candidate)] = -1
        it = it + 1

        # Something went wrong during aggregation
        assert it <= Nc

    del lmbda, ST

    if seeds is not None:
        is_coarse[seeds] = True
        is_fine[seeds] = False

    # If two neighbors are coarse, eliminate one of them
    c2c = np.abs(A) > 0
    c2c_rows, _, _ = sps.find(c2c)

    pairs = np.empty((2, 0), dtype=np.int)
    for idx, it in enumerate(np.where(is_coarse)[0]):
        loc = slice(c2c.indptr[it], c2c.indptr[it + 1])
        ind = np.setdiff1d(c2c_rows[loc], it)
        cind = ind[is_coarse[ind]]
        new_pair = np.stack((np.repeat(it, cind.size), cind))
        pairs = np.append(pairs, new_pair, axis=1)

    if pairs.size:
        pairs = unique.unique_np113(np.sort(pairs, axis=0), axis=1)
        for ij in pairs.T:
            mi = np.argmin(A[ij, ij])
            is_coarse[ij[mi]] = False
            is_fine[ij[mi]] = True

    coarse = np.where(is_coarse)[0]

    # Primal grid
    NC = coarse.size
    primal = sps.lil_matrix((NC, Nc), dtype=np.bool)
    for i in np.arange(NC):
        primal[i, coarse[i]] = True

    connection = sps.lil_matrix((Nc, Nc), dtype=np.double)
    for it in np.arange(Nc):
        n = np.setdiff1d(c2c_rows[c2c.indptr[it]:c2c.indptr[it + 1]], it)
        connection[it, n] = np.abs(A[it, n] / At[it, it])

    connection = connection.tocsr()

    candidates_rep = np.ediff1d(connection.indptr)
    candidates_idx = np.repeat(is_coarse, candidates_rep)
    candidates = np.stack(
        (connection.indices[candidates_idx],
         np.repeat(np.arange(NC), candidates_rep[is_coarse])),
        axis=-1)

    connection_idx = mcolon.mcolon(connection.indptr[coarse],
                                   connection.indptr[coarse + 1] - 1)
    vals = accumarray.accum(candidates,
                            connection.data[connection_idx],
                            size=[Nc, NC])
    del candidates_rep, candidates_idx, connection_idx

    mcind = np.argmax(vals, axis=0)
    mcval = [vals[r, c] for c, r in enumerate(mcind)]

    it = NC
    not_found = np.logical_not(is_coarse)
    # Process the strongest connection globally
    while np.any(not_found):
        mi = np.argmax(mcval)
        nadd = mcind[mi]

        primal[mi, nadd] = True
        not_found[nadd] = False
        vals[nadd, :] *= 0

        nc = connection.indices[connection.indptr[nadd]:connection.indptr[nadd
                                                                          + 1]]
        af = not_found[nc]
        nc = nc[af]
        nv = mcval[mi] * connection[nadd, :]
        nv = nv.data[af]
        if len(nc) > 0:
            vals += sps.csr_matrix((nv, (nc, np.repeat(mi, len(nc)))),
                                   shape=(Nc, NC)).todense()
        mcind = np.argmax(vals, axis=0)
        mcval = [vals[r, c] for c, r in enumerate(mcind)]

        it = it + 1
        if it > Nc + 5: break

    coarse, fine = primal.tocsr().nonzero()
    return coarse[np.argsort(fine)]
def read_data_split_and_search(dataset_name,
                                   flag_baselines_tune = False,
                                   flag_DL_article_default = False, flag_DL_tune = False,
                                   flag_print_results = False):

    from Conferences.SIGIR.CMN_our_interface.CiteULike.CiteULikeReader import CiteULikeReader
    from Conferences.SIGIR.CMN_our_interface.Pinterest.PinterestICCVReader import PinterestICCVReader
    from Conferences.SIGIR.CMN_our_interface.Epinions.EpinionsReader import EpinionsReader


    result_folder_path = "result_experiments/{}/{}_{}/".format(CONFERENCE_NAME, ALGORITHM_NAME, dataset_name)

    if dataset_name == "citeulike":
        dataset = CiteULikeReader(result_folder_path)

    elif dataset_name == "epinions":
        dataset = EpinionsReader(result_folder_path)

    elif dataset_name == "pinterest":
        dataset = PinterestICCVReader(result_folder_path)


    URM_train = dataset.URM_DICT["URM_train"].copy()
    URM_validation = dataset.URM_DICT["URM_validation"].copy()
    URM_test = dataset.URM_DICT["URM_test"].copy()
    URM_test_negative = dataset.URM_DICT["URM_test_negative"].copy()


    # Ensure IMPLICIT data and DISJOINT sets
    assert_implicit_data([URM_train, URM_validation, URM_test, URM_test_negative])


    if dataset_name == "citeulike":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_test, URM_test_negative])

    elif dataset_name == "pinterest":
        assert_disjoint_matrices([URM_train, URM_validation, URM_test])
        assert_disjoint_matrices([URM_train, URM_validation, URM_test_negative])

    else:
        assert_disjoint_matrices([URM_train, URM_validation, URM_test, URM_test_negative])


    # If directory does not exist, create
    if not os.path.exists(result_folder_path):
        os.makedirs(result_folder_path)


    collaborative_algorithm_list = [
        Random,
        TopPop,
        UserKNNCFRecommender,
        ItemKNNCFRecommender,
        P3alphaRecommender,
        RP3betaRecommender,
        PureSVDRecommender,
        NMFRecommender,
        IALSRecommender,
        MatrixFactorization_BPR_Cython,
        MatrixFactorization_FunkSVD_Cython,
        EASE_R_Recommender,
        SLIM_BPR_Cython,
        SLIMElasticNetRecommender,
        ]

    metric_to_optimize = "HIT_RATE"
    n_cases = 50
    n_random_starts = 15





    algorithm_dataset_string = "{}_{}_".format(ALGORITHM_NAME, dataset_name)

    plot_popularity_bias([URM_train + URM_validation, URM_test],
                         ["Training data", "Test data"],
                         result_folder_path + algorithm_dataset_string + "popularity_plot")

    save_popularity_statistics([URM_train + URM_validation + URM_test, URM_train + URM_validation, URM_test],
                               ["Full data", "Training data", "Test data"],
                               result_folder_path + algorithm_dataset_string + "popularity_statistics")



    from Base.Evaluation.Evaluator import EvaluatorNegativeItemSample

    evaluator_validation = EvaluatorNegativeItemSample(URM_validation, URM_test_negative, cutoff_list=[5])
    evaluator_test = EvaluatorNegativeItemSample(URM_test, URM_test_negative, cutoff_list=[5, 10])



    runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,
                                                       URM_train = URM_train,
                                                       URM_train_last_test = URM_train + URM_validation,
                                                       metric_to_optimize = metric_to_optimize,
                                                       evaluator_validation_earlystopping = evaluator_validation,
                                                       evaluator_validation = evaluator_validation,
                                                       evaluator_test = evaluator_test,
                                                       output_folder_path = result_folder_path,
                                                       parallelizeKNN = False,
                                                       allow_weighting = True,
                                                       resume_from_saved = True,
                                                       n_cases = n_cases,
                                                       n_random_starts = n_random_starts)





    if flag_baselines_tune:

        for recommender_class in collaborative_algorithm_list:
            try:
                runParameterSearch_Collaborative_partial(recommender_class)
            except Exception as e:
                print("On recommender {} Exception {}".format(recommender_class, str(e)))
                traceback.print_exc()


    ################################################################################################
    ######
    ######      DL ALGORITHM
    ######

    if flag_DL_article_default:

        try:

            CMN_article_hyperparameters = {
                "epochs": 100,
                "epochs_gmf": 100,
                "hops": 3,
                "neg_samples": 4,
                "reg_l2_cmn": 1e-1,
                "reg_l2_gmf": 1e-4,
                "pretrain": True,
                "learning_rate": 1e-3,
                "verbose": False,
            }

            if dataset_name == "citeulike":
                CMN_article_hyperparameters["batch_size"] = 128
                CMN_article_hyperparameters["embed_size"] = 50

            elif dataset_name == "epinions":
                CMN_article_hyperparameters["batch_size"] = 128
                CMN_article_hyperparameters["embed_size"] = 40

            elif dataset_name == "pinterest":
                CMN_article_hyperparameters["batch_size"] = 256
                CMN_article_hyperparameters["embed_size"] = 50



            CMN_earlystopping_hyperparameters = {
                "validation_every_n": 5,
                "stop_on_validation": True,
                "evaluator_object": evaluator_validation,
                "lower_validations_allowed": 5,
                "validation_metric": metric_to_optimize
            }


            parameterSearch = SearchSingleCase(CMN_RecommenderWrapper,
                                               evaluator_validation=evaluator_validation,
                                               evaluator_test=evaluator_test)

            recommender_input_args = SearchInputRecommenderArgs(
                                                CONSTRUCTOR_POSITIONAL_ARGS = [URM_train],
                                                FIT_KEYWORD_ARGS = CMN_earlystopping_hyperparameters)

            recommender_input_args_last_test = recommender_input_args.copy()
            recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS[0] = URM_train + URM_validation

            parameterSearch.search(recommender_input_args,
                                   recommender_input_args_last_test = recommender_input_args_last_test,
                                   fit_hyperparameters_values=CMN_article_hyperparameters,
                                   output_folder_path = result_folder_path,
                                   resume_from_saved = True,
                                   output_file_name_root = CMN_RecommenderWrapper.RECOMMENDER_NAME)



        except Exception as e:

            print("On recommender {} Exception {}".format(CMN_RecommenderWrapper, str(e)))
            traceback.print_exc()



    ################################################################################################
    ######
    ######      PRINT RESULTS
    ######

    if flag_print_results:

        n_test_users = np.sum(np.ediff1d(URM_test.indptr)>=1)
        file_name = "{}..//{}_{}_".format(result_folder_path, ALGORITHM_NAME, dataset_name)

        result_loader = ResultFolderLoader(result_folder_path,
                                         base_algorithm_list = None,
                                         other_algorithm_list = [CMN_RecommenderWrapper],
                                         KNN_similarity_list = KNN_similarity_to_report_list,
                                         ICM_names_list = None,
                                         UCM_names_list = None)


        result_loader.generate_latex_results(file_name + "{}_latex_results.txt".format("article_metrics"),
                                           metrics_list = ["HIT_RATE", "NDCG"],
                                           cutoffs_list = [5, 10],
                                           table_title = None,
                                           highlight_best = True)

        result_loader.generate_latex_results(file_name + "{}_latex_results.txt".format("all_metrics"),
                                           metrics_list = ["PRECISION", "RECALL", "MAP_MIN_DEN", "MRR", "NDCG", "F1", "HIT_RATE", "ARHR_ALL_HITS",
                                                           "NOVELTY", "DIVERSITY_MEAN_INTER_LIST", "DIVERSITY_HERFINDAHL", "COVERAGE_ITEM", "DIVERSITY_GINI", "SHANNON_ENTROPY"],
                                           cutoffs_list = [10],
                                           table_title = None,
                                           highlight_best = True)

        result_loader.generate_latex_time_statistics(file_name + "{}_latex_results.txt".format("time"),
                                           n_evaluation_users=n_test_users,
                                           table_title = None)
Beispiel #43
0
def find_intersections(x, a, b, direction='all'):
    """Calculate the best estimate of intersection.

    Calculates the best estimates of the intersection of two y-value
    data sets that share a common x-value set.

    Parameters
    ----------
    x : array-like
        1-dimensional array of numeric x-values
    a : array-like
        1-dimensional array of y-values for line 1
    b : array-like
        1-dimensional array of y-values for line 2
    direction : string, optional
        specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
        or 'decreasing' (b becoming greater than a). Defaults to 'all'.

    Returns
    -------
        A tuple (x, y) of array-like with the x and y coordinates of the
        intersections of the lines.

    """
    # Find the index of the points just before the intersection(s)
    nearest_idx = nearest_intersection_idx(a, b)
    next_idx = nearest_idx + 1

    # Determine the sign of the change
    sign_change = np.sign(a[next_idx] - b[next_idx])

    # x-values around each intersection
    _, x0 = _next_non_masked_element(x, nearest_idx)
    _, x1 = _next_non_masked_element(x, next_idx)

    # y-values around each intersection for the first line
    _, a0 = _next_non_masked_element(a, nearest_idx)
    _, a1 = _next_non_masked_element(a, next_idx)

    # y-values around each intersection for the second line
    _, b0 = _next_non_masked_element(b, nearest_idx)
    _, b1 = _next_non_masked_element(b, next_idx)

    # Calculate the x-intersection. This comes from finding the equations of the two lines,
    # one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
    # finding their intersection, and reducing with a bunch of algebra.
    delta_y0 = a0 - b0
    delta_y1 = a1 - b1
    intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)

    # Calculate the y-intersection of the lines. Just plug the x above into the equation
    # for the line through the a points. One could solve for y like x above, but this
    # causes weirder unit behavior and seems a little less good numerically.
    intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0

    # If there's no intersections, return
    if len(intersect_x) == 0:
        return intersect_x, intersect_y

    # Check for duplicates
    duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)

    # Make a mask based on the direction of sign change desired
    if direction == 'increasing':
        mask = sign_change > 0
    elif direction == 'decreasing':
        mask = sign_change < 0
    elif direction == 'all':
        return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
    else:
        raise ValueError('Unknown option for direction: {0}'.format(str(direction)))

    return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask]
Beispiel #44
0
    def export_graphviz(self, filename='rfstree.gv'):
        """
        Export the dependency graph built in the fir phase
        as a graphviz document. It returns an object g representing the
        graph (e.g., you can visualize it by g.view())
        Args:
            filename (str): output file

        Returns:
            g (graphviz.Digraph): an object representing the graph

        """
        def apply_styles(graph, styles):
            graph.graph_attr.update(('graph' in styles and styles['graph'])
                                    or {})
            graph.node_attr.update(('nodes' in styles and styles['nodes'])
                                   or {})
            graph.edge_attr.update(('edges' in styles and styles['edges'])
                                   or {})
            return graph

        if not hasattr(self, 'nodes'):
            raise ValueError('Model must be trained.')

        from graphviz import Digraph

        g = Digraph('G', filename=filename)
        g.body.extend(['rankdir=BT'])
        g.attr('node', shape='circle')
        # BFS
        S = set()
        Q = [0]
        g.node('0',
               label='{}\nr2={:.4f}'.format(self.nodes[0].feature_name,
                                            self.nodes[0].data['r2score'][-1]))
        while len(Q) > 0:
            current_id = Q[0]
            current = self.nodes[current_id]
            Q = [Q[i] for i in range(1, len(Q))]

            # prepare visualization data
            keys = {}
            if 'r2score' in current.data.keys():
                diff_scores = np.ediff1d(current.data['r2score'],
                                         to_begin=current.data['r2score'][0])
                for cnt, el in enumerate(current.data['ordered_features']):
                    keys[el] = cnt
            else:
                diff_scores = None

            for node_id in current.children:
                if node_id not in S:
                    lfn = self.nodes[node_id].feature_name
                    if current.feature_name == self.nodes[
                            node_id].feature_name:
                        # make self loop if parent feature is equal to the current one
                        g.edge(str(current_id),
                               str(current_id),
                               label='r2={:.4f}'.format(diff_scores[keys[lfn]])
                               if diff_scores is not None else '')
                    else:
                        if 'r2score' in self.nodes[node_id].data.keys():
                            lbl = '{}\nr2={:.4f}'.format(
                                lfn, self.nodes[node_id].data['r2score'][-1])
                        else:
                            lbl = '{}'.format(lfn)
                        g.node(str(node_id), label=lbl)
                        g.edge(str(node_id),
                               str(current.id),
                               label='r2={:.4f}'.format(diff_scores[keys[lfn]])
                               if diff_scores is not None else '')
                    S.add(node_id)
                    Q.append(node_id)

        styles = {
            # 'graph': {
            #     'label': 'A Fancy Graph',
            #     'fontsize': '16',
            #     'fontcolor': 'black',
            #     'bgcolor': 'white',
            #     'rankdir': 'BT',
            # },
            # 'nodes': {
            #     'fontname': 'Helvetica',
            #     'shape': 'hexagon',
            #     'fontcolor': 'black',
            #     'color': 'black',
            #     'style': 'filled',
            #     'fillcolor': 'white',
            # },
            'edges': {
                # 'style': 'solid',
                # 'color': 'black',
                'arrowhead': 'open',
                # 'fontname': 'Courier',
                'fontsize': '12',
                'fontcolor': 'black',
            }
        }
        g = apply_styles(g, styles)
        # g.view()
        return g
Beispiel #45
0
for mac in os.scandir(r"C:\Niagara\Macstuffffffff"):
    df = pd.read_csv(mac.path)
    df = df.dropna(how='all')
    print((mac.name[:-9]))
    try:

        df["Unit_name"] = mac_dict.get(mac.name[:-9]).get_unit_name()
    except Exception as e:
        continue
        raise e
    zero = np.array([0])
    name = 'MGASKBTU'
    if name in df.columns.values.tolist():
        gas = df[name].copy()
        gas = gas.dropna()
        n = np.ediff1d(gas)
        n = np.append(zero, n)
        gas = pd.DataFrame(data=n, index=gas.index)
        gas.columns = ['Gas Used']
        df['Gas Used'] = gas
    if 'WHTRMODE' in df.columns.values.tolist():
        whtr_ints = df['WHTRMODE'].copy()
        whtr_strs = []
        for mode in whtr_ints.values:
            #print (mode)
            if not np.isnan(mode):
                whtr_strs.append(whtr_modes[int(mode)])
            else:
                whtr_strs.append("Off")
        df['WHTRMODESTRINGS'] = whtr_strs
    df.to_csv('c:/Niagara/MACPRP/' + '/' + mac.name)
        user_profile = self.URM.indices[start_pos:end_pos]

        scores[user_profile] = -np.inf

        return scores


if __name__ == '__main__':
    extractor = Extractor
    userList = extractor.get_interaction_users(extractor, False)
    itemList = extractor.get_interaction_items(extractor, False)
    ratingList = np.ones(Extractor().get_numb_interactions())

    URM_all = extractor.get_interaction_matrix(extractor, False)
    warm_items_mask = np.ediff1d(URM_all.tocsc().indptr) > 0
    warm_items = np.arange(URM_all.shape[1])[warm_items_mask]

    URM_all = URM_all[:, warm_items]

    warm_users_mask = np.ediff1d(URM_all.tocsr().indptr) > 0
    warm_users = np.arange(URM_all.shape[0])[warm_users_mask]

    URM_all = URM_all[warm_users, :]

    URM_train, URM_test = train_test_holdout(URM_all, train_perc=0.8)

    recommender = UserCFKNNRecommender(URM_train)
    recommender.fit(shrink=0.0, topK=200)

    submissionID = 3
Beispiel #47
0
def _distancesFromArrays(xData, yData):
    deltas = np.dstack((np.ediff1d(xData, to_begin=np.float32(0.)),
                        np.ediff1d(yData, to_begin=np.float32(0.))))[0]
    return np.cumsum(np.sqrt((deltas**2).sum(axis=1)))
def _peak_detection(signal, threshold=0.0 * mV, sign='above', format=None):
    """
    Return the peak times for all events that cross threshold.
    Usually used for extracting spike times from a membrane potential.
    Similar to spike_train_generation.threshold_detection.

    Parameters
    ----------
    signal : neo AnalogSignal object
        'signal' is an analog signal.
    threshold : A quantity, e.g. in mV
        'threshold' contains a value that must be reached
        for an event to be detected.
    sign : 'above' or 'below'
        'sign' determines whether to count thresholding crossings that
        cross above or below the threshold. Default: 'above'.
    format : None or 'raw'
        Whether to return as SpikeTrain (None) or as a plain array
        of times ('raw'). Default: None.

    Returns
    -------
    result_st : neo SpikeTrain object
        'result_st' contains the spike times of each of the events
        (spikes) extracted from the signal.
    """
    assert threshold is not None, "A threshold must be provided"

    if sign == 'above':
        cutout = np.where(signal > threshold)[0]
        peak_func = np.argmax
    elif sign == 'below':
        cutout = np.where(signal < threshold)[0]
        peak_func = np.argmin
    else:
        raise ValueError("sign must be 'above' or 'below'")

    if len(cutout) <= 0:
        events_base = np.zeros(0)
    else:
        # Select thr crossings lasting at least 2 dtps, np.diff(cutout) > 2
        # This avoids empty slices
        border_start = np.where(np.diff(cutout) > 1)[0]
        border_end = border_start + 1
        borders = np.concatenate((border_start, border_end))
        borders = np.append(0, borders)
        borders = np.append(borders, len(cutout) - 1)
        borders = np.sort(borders)
        true_borders = cutout[borders]
        right_borders = true_borders[1::2] + 1
        true_borders = np.sort(np.append(true_borders[0::2], right_borders))

        # Workaround for bug that occurs when signal goes below thr for 1 dtp,
        # Workaround eliminates empy slices from np. split
        backward_mask = np.absolute(np.ediff1d(true_borders, to_begin=1)) > 0
        forward_mask = np.absolute(
            np.ediff1d(true_borders[::-1], to_begin=1)[::-1]) > 0
        true_borders = true_borders[backward_mask * forward_mask]
        split_signal = np.split(np.array(signal), true_borders)[1::2]

        maxima_idc_split = np.array([peak_func(x) for x in split_signal])

        max_idc = maxima_idc_split + true_borders[0::2]

        events = signal.times[max_idc]
        events_base = events.magnitude

        if events_base is None:
            # This occurs in some Python 3 builds due to some
            # bug in quantities.
            events_base = np.array([event.magnitude
                                    for event in events])  # Workaround
    if format is None:
        result_st = SpikeTrain(events_base,
                               units=signal.times.units,
                               t_start=signal.t_start,
                               t_stop=signal.t_stop)
    elif 'raw':
        result_st = events_base
    else:
        raise ValueError("Format argument must be None or 'raw'")

    return result_st
def Column_main_Extracter_sub_second(img, orignal_img, scaling_factor):
    blur_cr_img = cv2.blur(orignal_img, (13, 13))
    crop_img_resize = cv2.resize(blur_cr_img,
                                 None,
                                 fx=scaling_factor / 2,
                                 fy=scaling_factor / 2,
                                 interpolation=cv2.INTER_AREA)

    crop_img_resize_n = cv2.fastNlMeansDenoisingColored(
        crop_img_resize, None, 10, 10, 7, 21)
    crop_img_resize_n_gray = cv2.cvtColor(crop_img_resize_n,
                                          cv2.COLOR_BGR2GRAY)
    th3 = cv2.adaptiveThreshold(crop_img_resize_n_gray, 255,
                                cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY_INV, 21, 7)

    top = int(th3.shape[1] / 40)
    bottom = int(th3.shape[1] / 40)
    left = int(th3.shape[1] / 20)
    right = int(th3.shape[1] / 20)
    th4 = cv2.copyMakeBorder(th3,
                             top=top,
                             bottom=bottom,
                             left=left,
                             right=right,
                             borderType=cv2.BORDER_CONSTANT,
                             value=0)

    for angle in [
            0.1, -0.1, 0.2, -0.2, 0.3, -0.3, 0.4, -0.4, 0.5, -0.5, 0.6, -0.6,
            0.7, -0.7, 0.8, -0.8
    ]:
        th5 = rotate(th4.copy(), angle)
        kernel = np.ones((30, 9), np.uint8)
        th5 = cv2.morphologyEx(th5.copy(), cv2.MORPH_CLOSE, kernel)

        th5 = cv2.bitwise_not(th5)
        th5[th5 < 255] = 0
        th5[th5 == 255] = 1

        split_candidates = np.where(
            np.sum(th5, axis=0) >= (np.max(np.sum(th5, axis=0)) -
                                    (np.mean(np.sum(th5, axis=0)) / 1.5)))[0]
        split_candidates = np.unique(
            np.append(split_candidates, [0, th5.shape[1]]))

        empty_spc_clm_dif = np.ediff1d(split_candidates)
        Column_boundries = (split_candidates[np.where(
            empty_spc_clm_dif > np.mean(empty_spc_clm_dif))[0] + 1] /
                            (scaling_factor / 2)).astype(int)

        Column_boundries = np.append(Column_boundries, [0, img.shape[1]])
        Column_boundries = np.unique(Column_boundries)
        for i in range(len(Column_boundries)):
            closer = np.where(
                np.ediff1d(Column_boundries) < (img.shape[1]) / 5)[0]
            if len(closer) > 0:
                Column_boundries = np.delete(Column_boundries, closer[-1])
            else:
                break
        Column_boundries = Column_boundries[1:]

        if len(Column_boundries) > 2:
            # print(Column_boundries,np.mean(np.sum(th5,axis=0)),np.max(np.sum(th5,axis=0)))
            img = rotate(img, angle, value_replace=(255, 255, 255))
            First_Column = img[:, 0:Column_boundries[0] + 10]
            Second_Column = img[:,
                                Column_boundries[0]:Column_boundries[1] + 10]
            Third_Column = img[:, Column_boundries[1]:]
        else:
            First_Column, Second_Column, Third_Column = None, None, None

        if First_Column is not None:
            break
    return First_Column, Second_Column, Third_Column
Beispiel #50
0
    def predict_bounding_boxes(self,
                               pointcloud,
                               set_next_bounding_boxes=False,
                               bounding_factor=.5,
                               eps=0.1):
        next_bounding_boxes = []
        for bounding_box in self.bounding_boxes:
            filtered_pointcloud = pointcloud.filter_points()
            filtered_pointcloud_indices = bounding_box.filter_points(
                filtered_pointcloud, bounding_factor=bounding_factor)
            filtered_points = filtered_pointcloud.points[
                filtered_pointcloud_indices, :]
            x, y = filtered_points[:, 0], filtered_points[:, 1]
            z = filtered_pointcloud.intensities[filtered_pointcloud_indices]

            # fig = plt.figure()
            # ax = fig.add_subplot(111, projection='3d')
            # ax.scatter(x, y, z)
            # plt.show()

            sorted_x, sorted_y = np.sort(x), np.sort(y)

            resolution = max(
                eps,
                min(np.min(np.ediff1d(sorted_x)),
                    np.min(np.ediff1d(sorted_y))))
            h, w = int((np.max(x) - np.min(x)) // resolution) + 1, int(
                (np.max(y) - np.min(y)) // resolution) + 1
            print(h, w, resolution)

            im = -np.ones((h, w)) * 5e-2
            quantized_x = ((filtered_points[:, 0] - np.min(x)) //
                           resolution).astype(int)
            quantized_y = ((filtered_points[:, 1] - np.min(y)) //
                           resolution).astype(int)
            im[quantized_x, quantized_y] = 1

            mask_h = int(bounding_box.width // resolution) + 1
            mask_w = int(bounding_box.length // resolution) + 1

            mask = np.ones((mask_h, mask_w))

            # plt.scatter(x, y)
            # plt.show()

            print("mask shape: ", mask.shape)
            cc = signal.correlate2d(im, mask, mode="same")
            center = (np.array([np.argmax(cc) // w,
                                np.argmax(cc) % w]) * resolution +
                      np.array([np.min(x), np.min(y)]))
            upper_right = center + np.array(
                [bounding_box.width / 2, bounding_box.length / 2])
            lower_left = center - np.array(
                [bounding_box.width / 2, bounding_box.length / 2])
            theta = bounding_box.angle
            box_pointcloud = PointCloud(np.vstack((upper_right, lower_left)))
            corners = box_pointcloud.rigid_transform(theta, center) + center
            next_bounding_boxes.append([corners.tolist(), theta])
            print(
                np.argmax(cc) // w,
                np.argmax(cc) % w, np.argmax(cc), np.max(cc),
                cc[np.argmax(cc) // w, np.argmax(cc) % w])
            # plt.subplot(1,2,1)
            # plt.imshow(im, cmap='Greys',  interpolation='nearest')
            # plt.subplot(1,2,2)
            # plt.imshow(cc, cmap='Greys',  interpolation='nearest')

            # plt.show()
        return next_bounding_boxes
    def fit(self):

        # Use np.ediff1d and NOT a sum done over the rows as there might be values other than 0/1
        self.item_pop = np.ediff1d(self.URM_train.tocsc().indptr)
        self.n_items = self.URM_train.shape[1]
Beispiel #52
0
def edges_to_centers(x):
    """
    Convert coordinate edges to centers, and return also the widths
    """
    return 0.5 * (x[1:] + x[:-1]), np.ediff1d(x)
Beispiel #53
0
def get_signal_diff_score(signal_1, signal_2):
    diffs_1 = np.ediff1d(signal_1, to_begin=signal_1[0])
    diffs_2 = np.ediff1d(signal_2, to_begin=signal_2[0])
    subtracted = np.absolute(diffs_1 - diffs_2)
    return np.sum(subtracted) / diffs_1.shape[0]
Beispiel #54
0
    def fullDF(self):
        """ 
        """
        # strains
        maxStrains = len(self.N.reshape(-1))
        Ns = self.allN

        timesteps = len(Ns)

        strains_mat = np.zeros([maxStrains,timesteps])

        for i,N in enumerate(Ns):
            zeros = np.zeros( maxStrains - len(N) )
            full_N = np.append(N, zeros)
            strains_mat[:,i] = full_N

        df = pd.DataFrame(None)

        for j in range(maxStrains):

            pop = strains_mat[j,:]
            dpop = np.ediff1d(pop, to_begin=0)
            name = self.strainIDS[j]
            strain = self.strains[name]
            type_ = strain.type
            spacers = strain.numSpacers()
            dpop_pop = np.where((dpop==0) & (pop==0), 0, dpop/pop)

            df_j = pd.DataFrame(
                {
                    'timestep': list( range(timesteps) ),
                    'name': self.strainIDS[j],
                    'pop': pop,
                    'dpop': dpop,
                    'dpop_pop': dpop_pop,
                    'type':type_,
                    'spacers': spacers
                }
            )

            df = df.append(df_j, ignore_index=True)

        # phage
        maxPhages = len(self.P.reshape(-1))
        Ps = self.allP

        phages_mat = np.zeros([maxPhages,timesteps])

        for i,P in enumerate(Ps):
            zeros = np.zeros( maxPhages - len(P) )
            full_P = np.append(P, zeros)
            phages_mat[:,i] = full_P

        for j in range(maxPhages):

            pop = phages_mat[j,:]
            dpop = np.ediff1d(pop, to_begin=0)
            name = self.phageIDS[j]
            phage = self.phages[name]
            spacers = phage.numProto()
            dpop_pop = np.where((dpop==0) & (pop==0), 0, dpop/pop)

            df_j = pd.DataFrame(
                {
                    'timestep': list( range(timesteps) ),
                    'name': self.phageIDS[j],
                    'pop': pop,
                    'dpop': dpop,
                    'dpop_pop': dpop_pop,
                    'type':'phage',
                    'spacers': spacers
                }
            )

            df = df.append(df_j, ignore_index=True)

        return df
Beispiel #55
0
def raw_chunkify_with_remap_main(args):
    """ Main function for `chunkify.py raw_remap` producing batch file for model training
    """
    if not args.overwrite:
        if os.path.exists(args.output):
            print("Cowardly refusing to overwrite {}".format(args.output))
            sys.exit(1)
        if os.path.exists(args.output_strand_list):
            print("Cowardly refusing to overwrite {}".format(args.output_strand_list))
            sys.exit(2)

    fast5_files = iterate_fast5(args.input_folder, paths=True, limit=args.limit,
                                strand_list=args.input_strand_list)

    references = util.fasta_file_to_dict(args.references)

    print('* Processing data using', args.jobs, 'threads')

    kwarg_names = ['trim', 'min_prob', 'kmer_len', 'min_length',
                   'prior', 'slip', 'chunk_len', 'normalisation', 'downsample_factor',
                   'interpolation', 'open_pore_fraction']
    kwargs = util.get_kwargs(args, kwarg_names)
    kwargs['references'] = references

    i = 0
    compiled_file = helpers.compile_model(args.model, args.compile)
    output_strand_list_entries = []
    bad_list = []
    chunk_list = []
    label_list = []
    with open(args.output_strand_list, 'w') as slfh:
        slfh.write(u'\t'.join(['filename', 'nblocks', 'score', 'nstay', 'seqlen', 'start', 'end']) + u'\n')
        for res in imap_mp(raw_chunk_remap_worker, fast5_files, threads=args.jobs,
                        fix_kwargs=kwargs, unordered=True, init=batch.init_chunk_remap_worker,
                        initargs=[compiled_file, args.kmer_len, args.alphabet]):
            if res is not None:
                i = util.progress_report(i)

                read, score, nblocks, path, seq, chunks, labels, bad_ev = res

                chunk_list.append(chunks)
                label_list.append(labels)
                bad_list.append(bad_ev)
                strand_data = [read, nblocks, -score / nblocks,
                               np.sum(np.ediff1d(path, to_begin=1) == 0),
                               len(seq), min(path), max(path)]
                slfh.write('\t'.join([str(x) for x in strand_data]) + '\n')

    if compiled_file != args.compile:
        os.remove(compiled_file)

    if chunk_list == []:
        print("no chunks were produced", file=sys.stderr)
        sys.exit(1)
    else:
        print('\n* Writing out to HDF5')
        hdf5_attributes = {
            'chunk': args.chunk_len,
            'downsample_factor': args.downsample_factor,
            'input_type': 'raw',
            'interpolation': args.interpolation,
            'kmer': args.kmer_len,
            'normalisation': args.normalisation,
            'section': 'template',
            'trim': args.trim,
            'alphabet': args.alphabet,
        }
        blanks_per_chunk = np.concatenate([(l == 0).mean(1) for l in label_list])
        blanks = np.percentile(blanks_per_chunk, args.blanks_percentile)
        util.create_labelled_chunks_hdf5(args.output, blanks, hdf5_attributes, chunk_list, label_list, bad_list)
Beispiel #56
0
    def fit(self, X, y=None):
        """
        Fit the estimator.
        Parameters
        ----------
        X : numeric array-like, shape (n_samples, n_features)
            Data to be discretized.
        y : None
            Ignored. This parameter exists only for compatibility with
            :class:`sklearn.pipeline.Pipeline`.
        Returns
        -------
        self
        """
        self._infer_numerical_type(X)
        self._header = X.columns

        # X = self._validate_data(X, dtype='numeric')
        X = check_array(X, dtype='numeric', force_all_finite='allow-nan')

        valid_strategy = ('uniform', 'quantile')
        if self.strategy not in valid_strategy:
            raise ValueError("Valid options for 'strategy' are {}. "
                             "Got strategy={!r} instead.".format(
                                 valid_strategy, self.strategy))

        n_features = X.shape[1]
        n_bins = self._validate_n_bins(n_features)

        bin_edges = np.zeros(n_features, dtype=object)
        for jj in range(n_features):
            column = X[:, jj]
            missing_idx = self._get_missing_idx(column)
            column = column[~missing_idx]
            col_min, col_max = column.min(), column.max()

            if col_min == col_max:
                warnings.warn("Feature %d is constant and will be "
                              "replaced with 0." % jj)
                n_bins[jj] = 1
                bin_edges[jj] = np.array([-np.inf, np.inf])
                continue

            if self.strategy == 'uniform':
                bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)

            elif self.strategy == 'quantile':
                quantiles = np.linspace(0, 100, n_bins[jj] + 1)
                bin_edges[jj] = np.asarray(np.percentile(column, quantiles))

            # Remove bins whose width are too small (i.e., <= 1e-8)
            if self.strategy == 'quantile':
                mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
                bin_edges[jj] = bin_edges[jj][mask]
                if len(bin_edges[jj]) - 1 != n_bins[jj]:
                    warnings.warn('Bins whose width are too small (i.e., <= '
                                  '1e-8) in feature %d are removed. Consider '
                                  'decreasing the number of bins.' % jj)
                    n_bins[jj] = len(bin_edges[jj]) - 1

        self.bin_edges_ = bin_edges
        self.n_bins_ = n_bins

        return self
Beispiel #57
0
def diff(timestamps):
    """
    Returns differences between consecutive elements
    """
    return np.ediff1d(timestamps)
        parallelizeKNN=False,
        allow_weighting=True,
        resume_from_saved=True,
        n_cases=n_cases,
        n_random_starts=n_random_starts)

    if input_flags.run_baselines:

        pool = multiprocessing.Pool(processes=3, maxtasksperchild=1)
        pool.map(hyperparameter_search_collaborative_partial,
                 collaborative_algorithm_list)

        pool.close()
        pool.join()

        n_test_users = np.sum(np.ediff1d(URM_test.indptr) >= 1)
        file_name = "{}..//{}_{}_".format(result_baselines_folder_path,
                                          ALGORITHM_NAME,
                                          input_flags.dataset_name)

        KNN_similarity_to_report_list = [
            "cosine", "dice", "jaccard", "asymmetric", "tversky"
        ]

        # Put results for the CNN algorithm in the baseline folder for it to be subsequently loaded
        dataIO = DataIO(folder_path=output_folder_path +
                        "fit_ablation_all_map/all_map_0/")
        search_metadata = dataIO.load_data(
            ConvNCF_RecommenderWrapper.RECOMMENDER_NAME + "_metadata")
        dataIO = DataIO(folder_path=result_baselines_folder_path)
        dataIO.save_data(
Beispiel #59
0
def refine_grid_1d(g: pp.Grid, ratio: int = 2) -> pp.Grid:
    """Refine cells in a 1d grid.

    Parameters:
        g (pp.Grid): A 1d grid, to be refined.
        ratio (int):

    Returns:
        grid: New grid, with finer cells.

    """

    # Implementation note: The main part of the function is the construction of
    # the new cell-face relation. Since the grid is 1d, nodes and faces are
    # equivalent, and notation used mostly refers to nodes instead of faces.

    # Cell-node relation
    cell_nodes = g.cell_nodes()
    nodes, cells, _ = sps.find(cell_nodes)

    # Every cell will contribute (ratio - 1) new nodes
    num_new_nodes = (ratio - 1) * g.num_cells + g.num_nodes
    x = np.zeros((3, num_new_nodes))
    # Cooridates for splitting of cells
    theta = np.arange(1, ratio) / float(ratio)
    pos = 0
    shift = 0

    # Array that indicates whether an item in the cell-node relation represents
    # a node not listed before (e.g. whether this is the first or second
    # occurence of the cell)
    if_add = np.r_[1, np.ediff1d(cell_nodes.indices)].astype(np.bool)

    indices = np.empty(0, dtype=np.int)
    # Template array of node indices for refined cells
    ind = np.vstack((np.arange(ratio), np.arange(ratio) + 1)).flatten("F")
    nd = np.r_[np.diff(cell_nodes.indices)[1::2], 0]

    # Loop over all old cells and refine them.
    for c in np.arange(g.num_cells):
        # Find start and end nodes of the old cell
        loc = slice(cell_nodes.indptr[c], cell_nodes.indptr[c + 1])
        start, end = cell_nodes.indices[loc]

        # Flags for whether this is the first occurences of the the nodes of
        # the old cell. If so, they should be added to the new node array
        if_add_loc = if_add[loc]

        # Local cell-node (thus cell-face) relations of the new grid
        indices = np.r_[indices, shift + ind]

        # Add coordinate of the startpoint to the node array if relevant
        if if_add_loc[0]:
            x[:, pos:(pos + 1)] = g.nodes[:, start, np.newaxis]
            pos += 1

        # Add coordinates of the internal nodes
        x[:, pos:(
            pos + ratio -
            1)] = g.nodes[:, start,
                          np.newaxis] * theta + g.nodes[:, end, np.newaxis] * (
                              1 - theta)
        pos += ratio - 1
        shift += ratio + (2 - np.sum(if_add_loc) * (1 - nd[c])) - nd[c]

        # Add coordinate to the endpoint, if relevant
        if if_add_loc[1]:
            x[:, pos:(pos + 1)] = g.nodes[:, end, np.newaxis]
            pos += 1

    # For 1d grids, there is a 1-1 relation between faces and nodes
    face_nodes = sps.identity(x.shape[1], format="csc")
    cell_faces = sps.csc_matrix((
        np.ones(indices.size, dtype=np.bool),
        indices,
        np.arange(0, indices.size + 1, 2),
    ))
    g = Grid(1, x, face_nodes, cell_faces, "Refined 1d grid")
    g.compute_geometry()

    return g
Beispiel #60
0
    def _equally_spaced_path(self, path, point):
        continuous_variables = [
            "ref_wp_positions_x",
            "ref_wp_positions_y",
            "ref_wp_headings",
            "ref_wp_lane_width",
            "ref_wp_speed_limit",
        ]

        discrete_variables = ["ref_wp_lane_id", "ref_wp_lane_index"]

        ref_waypoints_coordinates = {
            parameter: [] for parameter in (continuous_variables + discrete_variables)
        }
        for idx, waypoint in enumerate(path):
            if not waypoint.is_shape_wp and 0 < idx < len(path) - 1:
                continue
            ref_waypoints_coordinates["ref_wp_positions_x"].append(waypoint.wp.pos[0])
            ref_waypoints_coordinates["ref_wp_positions_y"].append(waypoint.wp.pos[1])
            ref_waypoints_coordinates["ref_wp_headings"].append(
                waypoint.wp.heading.as_bullet
            )
            ref_waypoints_coordinates["ref_wp_lane_id"].append(waypoint.wp.lane_id)
            ref_waypoints_coordinates["ref_wp_lane_index"].append(
                waypoint.wp.lane_index
            )
            ref_waypoints_coordinates["ref_wp_lane_width"].append(
                waypoint.wp.lane_width
            )
            ref_waypoints_coordinates["ref_wp_speed_limit"].append(
                waypoint.wp.speed_limit
            )

        ref_waypoints_coordinates["ref_wp_headings"] = np.unwrap(
            ref_waypoints_coordinates["ref_wp_headings"]
        )
        first_wp_heading = ref_waypoints_coordinates["ref_wp_headings"][0]
        wp_position = np.array([*path[0].wp.pos, 0])
        vehicle_pos = np.array([point[0], point[1], 0])
        heading_vector = np.array([*radians_to_vec(first_wp_heading), 0,])
        projected_distant_wp_vehicle = np.inner(
            (vehicle_pos - wp_position), heading_vector
        )

        ref_waypoints_coordinates["ref_wp_positions_x"][0] = (
            wp_position[0] + projected_distant_wp_vehicle * heading_vector[0]
        )
        ref_waypoints_coordinates["ref_wp_positions_y"][0] = (
            wp_position[1] + projected_distant_wp_vehicle * heading_vector[1]
        )
        # To ensure that the distance between waypoints are equal, we used
        # interpolation approach inspired by:
        # https://stackoverflow.com/a/51515357
        cumulative_path_dist = np.cumsum(
            np.sqrt(
                np.ediff1d(ref_waypoints_coordinates["ref_wp_positions_x"], to_begin=0)
                ** 2
                + np.ediff1d(
                    ref_waypoints_coordinates["ref_wp_positions_y"], to_begin=0
                )
                ** 2
            )
        )

        if len(cumulative_path_dist) <= 1:
            return [path[0].wp]

        evenly_spaced_cumulative_path_dist = np.linspace(
            0, cumulative_path_dist[-1], len(path)
        )

        evenly_spaced_coordinates = {}
        for variable in continuous_variables:
            evenly_spaced_coordinates[variable] = interp1d(
                cumulative_path_dist, ref_waypoints_coordinates[variable]
            )(evenly_spaced_cumulative_path_dist)

        for variable in discrete_variables:
            ref_coordinates = ref_waypoints_coordinates[variable]
            evenly_spaced_coordinates[variable] = []
            jdx = 0
            for idx in range(len(path)):
                while (
                    jdx + 1 < len(cumulative_path_dist)
                    and evenly_spaced_cumulative_path_dist[idx]
                    > cumulative_path_dist[jdx + 1]
                ):
                    jdx += 1

                evenly_spaced_coordinates[variable].append(ref_coordinates[jdx])
            evenly_spaced_coordinates[variable].append(ref_coordinates[-1])

        equally_spaced_path = []
        for idx, waypoint in enumerate(path):
            equally_spaced_path.append(
                Waypoint(
                    pos=np.array(
                        [
                            evenly_spaced_coordinates["ref_wp_positions_x"][idx],
                            evenly_spaced_coordinates["ref_wp_positions_y"][idx],
                        ]
                    ),
                    heading=Heading(evenly_spaced_coordinates["ref_wp_headings"][idx]),
                    lane_width=evenly_spaced_coordinates["ref_wp_lane_width"][idx],
                    speed_limit=evenly_spaced_coordinates["ref_wp_speed_limit"][idx],
                    lane_id=evenly_spaced_coordinates["ref_wp_lane_id"][idx],
                    lane_index=evenly_spaced_coordinates["ref_wp_lane_index"][idx],
                )
            )

        return equally_spaced_path