def getCircularBounds(fitCloud=None,width=64,height=64,smoothing=0.01): circumference = 2*(width+height) if not fitCloud is None: cx = np.mean(fitCloud[:,0]) cy = np.mean(fitCloud[:,1]) r = 0.5* max( np.max(fitCloud[:,0])- np.min(fitCloud[:,0]),np.max(fitCloud[:,1])- np.min(fitCloud[:,1])) else: r = circumference /(2.0*math.pi) cx = cy = r perimeterPoints = np.zeros((circumference,2),dtype=float) for i in range(circumference): angle = (2.0*math.pi)*float(i) / circumference - math.pi * 0.5 perimeterPoints[i][0] = cx + r * math.cos(angle) perimeterPoints[i][1] = cy + r * math.sin(angle) bounds = {'top':perimeterPoints[0:width], 'right':perimeterPoints[width-1:width+height-1], 'bottom':perimeterPoints[width+height-2:2*width+height-2], 'left':perimeterPoints[2*width+height-3:]} bounds['s_top'],u = interpolate.splprep([bounds['top'][:,0], bounds['top'][:,1]],s=smoothing) bounds['s_right'],u = interpolate.splprep([bounds['right'][:,0],bounds['right'][:,1]],s=smoothing) bounds['s_bottom'],u = interpolate.splprep([bounds['bottom'][:,0],bounds['bottom'][:,1]],s=smoothing) bounds['s_left'],u = interpolate.splprep([bounds['left'][:,0],bounds['left'][:,1]],s=smoothing) return bounds
def interpolation_polynom(path,grad): # data=np.ndarray(shape=(len(path),3),dtype=float) #create an array of float type for the input points # #fill the array with the Pathdata # a=path[0] # b=path[1] # c=path[2] # for i in range(len(a)): # data[i,0]=a[i] # data[i,1]=b[i] # data[i,2]=c[i] # #arrange the data to use the function # data = data.transpose() #interpolate polynom degree 1 if grad==1: tck, u= interpolate.splprep(path,k=1,s=10) path = interpolate.splev(np.linspace(0,1,200), tck) #interpolate polynom degree 2 if grad==2: tck, u= interpolate.splprep(path,k=2,s=10) path = interpolate.splev(np.linspace(0,1,200), tck) #interpolate polynom degree 3 if grad==3: tck, u= interpolate.splprep(path, w=None, u=None, ub=None, ue=None, k=3, task=0, s=0.3, t=None, full_output=0, nest=None, per=0, quiet=1) path = interpolate.splev(np.linspace(0,1,200), tck) return path
def joinJoints(j1, j2): assert(j1.dtype==j2.dtype) atype = j1.dtype param = np.array([0,0.33, 0.66, 1]) j1_r = np.concatenate([j1['r_arm'], [j1['r_gripper']]]) j2_r = np.concatenate([j2['r_arm'], [j2['r_gripper']]]) j1_l = np.concatenate([j1['l_arm'], [j1['l_gripper']]]) j2_l = np.concatenate([j2['l_arm'], [j2['l_gripper']]]) combined_r = np.concatenate([[j1_r], [0.67*j1_r + 0.33*j2_r], [0.33*j1_r + 0.67*j2_r], [j2_r]]) combined_l = np.concatenate([[j1_l], [0.67*j1_l + 0.33*j2_l], [0.33*j1_l + 0.67*j2_l], [j2_l]]) N = 100 (r_tck, _) = si.splprep(combined_r.T, s=0.3, u=param, k=3) smooth_r = np.r_[si.splev(np.linspace(0,1,N), r_tck, der=0)].T (l_tck, _) = si.splprep(combined_l.T, s=0.3, u=param, k=3) smooth_l = np.r_[si.splev(np.linspace(0,1,N), l_tck, der=0)].T smooth = np.zeros(N, dtype=atype) for i in xrange(0,N): smooth[i]['r_arm'][:] = smooth_r[i][:-1] smooth[i]['r_gripper'] = smooth_r[i][-1] smooth[i]['l_arm'][:] = smooth_l[i][:-1] smooth[i]['l_gripper'] = smooth_l[i][-1] return smooth
def makeSpline(pointList,smPnts): x = [p[0] for p in pointList] y = [p[1] for p in pointList] xRed = [p[0] for p in smPnts] yRed = [p[1] for p in smPnts] # print xRed # print yRed tck,uout = splprep([xRed,yRed],s=0.,k=2,per=False) tckOri, uout = splprep([x,y],s=0.,k=2,per=False) N=300 uout = list((float(i) / N for i in xrange(N + 1))) xOri, yOri = splev(uout,tckOri) xSp,ySp = splev(uout,tck) import dtw diff = dtw.dynamicTimeWarp(zip(xOri,yOri), zip(xSp,ySp)) err = diff/len(xSp) return tck,err
def muDDot(time,mu): tck,uout = interpolate.splprep([time,mu],s=0.,k=2,per=False) dx,dy = interpolate.splev(uout,tck,der=1) mudot = dy/dx tck,uout = interpolate.splprep([time,mudot],s=0.,k=2,per=False) ddx,ddy = interpolate.splev(uout,tck,der=1) muddot = ddy/ddx return muddot
def midcrossings(a, b=None, thresh=1e-3, k=5): """usage res = midcrossings([x,] y) returns fwhm of y (a function with a single maximum value) from spline-interpolated midpoint crossings""" if b == None: y = array(a) x = arange(y) else: x = array(a) y = array(b) try: assert x.shape[0] == y.shape[0] except AssertionError: print "x and y must be same length" return None maxind = where(y == y.max())[0].flatten()[0] # uses only first max pt (y1, y2) = y[:maxind], y[maxind:] (x1, x2) = x[:maxind], x[maxind:] s = 1.0 nest = -1 interpvals = linspace(0, 1, 251) # 251 simply gives enough points to give one point close to 0.5 print "thresholding to:", thresh # lower half nob1 = where(y1 > thresh) # ^ need to ignore baseline values when fitting splines y1tofit = y1[nob1] / y1[nob1].max() tckp, u = splprep([y1tofit, x1[nob1]], s=s, k=k, nest=nest) y1p, x1p = splev(interpvals, tckp) dtohalf = abs(y1p - 0.5) # 0.5 because want width at _half_ max closest = where(dtohalf == dtohalf.min()) lowval = x1p[closest] # upper half nob2 = where(y2 > thresh) y2tofit = y2[nob2] / y2[nob2].max() tckp, u = splprep([y2tofit, x2[nob2]], s=s, k=k, nest=nest) y2p, x2p = splev(interpvals, tckp) dtohalf = abs(y2p - 0.5) closest = where(dtohalf == dtohalf.min()) hival = x2p[closest] if graphic: fwhm = hival - lowval figure(2) clf() plot(x, y / y.max(), "bx-", label="original") plot(x1p, y1p, "r-", x2p, y2p, "r-", label="spline fit") plot((lowval, hival), (0.5, 0.5), "k-", label="width") return fwhm
def __init__(self, pts, V, dV=None, V_spline_samples=100, extend_to_minima=False, reeval_distances=True): assert len(pts) > 1 # 1. Find derivs dpts = _pathDeriv(pts) # 2. Extend the path if extend_to_minima: def V_lin(x, p0, dp0, V): return V(p0+x*dp0) # extend at the front of the path xmin = optimize.fmin(V_lin, 0.0, args=(pts[0], dpts[0], V), xtol=1e-6, disp=0)[0] if xmin > 0.0: xmin = 0.0 nx = np.ceil(abs(xmin)-.5) + 1 x = np.linspace(xmin, 0, nx)[:, np.newaxis] pt_ext = pts[0] + x*dpts[0] pts = np.append(pt_ext, pts[1:], axis=0) # extend at the end of the path xmin = optimize.fmin(V_lin, 0.0, args=(pts[-1], dpts[-1], V), xtol=1e-6, disp=0)[0] if xmin < 0.0: xmin = 0.0 nx = np.ceil(abs(xmin)-.5) + 1 x = np.linspace(xmin, 0, nx)[::-1, np.newaxis] pt_ext = pts[-1] + x*dpts[-1] pts = np.append(pts[:-1], pt_ext, axis=0) # Recalculate the derivative dpts = _pathDeriv(pts) # 3. Find knot positions and fit the spline. pdist = integrate.cumtrapz(np.sqrt(np.sum(dpts*dpts, axis=1)), initial=0.0) self.L = pdist[-1] k = min(len(pts)-1, 3) # degree of the spline self._path_tck = interpolate.splprep(pts.T, u=pdist, s=0, k=k)[0] # 4. Re-evaluate the distance to each point. if reeval_distances: def dpdx(_, x): dp = np.array(interpolate.splev(x, self._path_tck, der=1)) return np.sqrt(np.sum(dp*dp)) pdist = integrate.odeint(dpdx, 0., pdist, rtol=0, atol=pdist[-1]*1e-8)[:,0] self.L = pdist[-1] self._path_tck = interpolate.splprep(pts.T, u=pdist, s=0, k=k)[0] # Now make the potential spline. self._V = V self._dV = dV self._V_tck = None if V_spline_samples is not None: x = np.linspace(0,self.L,V_spline_samples) # extend 20% beyond this so that we more accurately model the # path end points x_ext = np.arange(x[1], self.L*.2, x[1]) x = np.append(-x_ext[::-1], x) x = np.append(x, self.L+x_ext) y = self.V(x) self._V_tck = interpolate.splrep(x,y,s=0)
def correct_te(tck, k): """Corrects the trailing edge of a flatback airfoil. This corrections will make the trailing edge of the normalized flatback airfoil align with the y-axis. Args: tck (tuple): A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline. k (int): The degree of the returned bspline Return: tuple: A tuple (t,c,k) containing the vector of knots, the B-spline coefficients, and the degree of the spline. """ try: u0_x = bspl_find_x(x_loc=1.0, start=0.0, end=0.1, tck=tck) except ValueError: u0_x = None try: u1_x = bspl_find_x(x_loc=1.0, start=0.9, end=1.0, tck=tck) except ValueError: u1_x = None if u0_x is not None and u1_x is not None: u = np.linspace(u0_x, u1_x, 1000) points = interpolate.splev(u, tck, der=0) tck_norm_mod = interpolate.splprep(points, s=0.0, k=k) elif u0_x is None and u1_x is not None: u = np.linspace(0.0, u1_x, 1000) points = interpolate.splev(u, tck, der=0) p_u0 = [points[0][0], points[1][0]] u0_grad = interpolate.splev(0.0, tck, der=1) dx = 1.0 - p_u0[0] dy = dx * u0_grad[1] / u0_grad[0] p_new = [1.0, p_u0[1] + dy] x_pts = np.insert(points[0], 0, p_new[0]) y_pts = np.insert(points[1], 0, p_new[1]) tck_norm_mod, _ = interpolate.splprep([x_pts, y_pts], s=0.0, k=k) elif u0_x is not None and u1_x is None: u = np.linspace(u0_x, 1.0, 1000) points = interpolate.splev(u, tck, der=0) p_u1 = [points[0][-1], points[1][-1]] u1_grad = interpolate.splev(1.0, tck, der=1) dx = 1.0 - p_u1[0] dy = dx * u1_grad[1] / u1_grad[0] p_new = [1.0, p_u1[1] + dy] x_pts = np.append(points[0], p_new[0]) y_pts = np.append(points[1], p_new[1]) tck_norm_mod, _ = interpolate.splprep([x_pts, y_pts], s=0.0, k=k) else: raise ValueError('Something is wrong with the bspline!') return tck_norm_mod
def interpolate(self, fit_x, fit_y, smoothing = None, newu = None): if smoothing is not None: tck, u = interpolate.splprep([fit_x, fit_y], k=4, s=smoothing) else: tck, u = interpolate.splprep([fit_x, fit_y], k=4) if (newu is not None): u = newu out = interpolate.splev(u, tck) return out, u, tck
def test_splprep_errors(self): # test that both "old" and "new" code paths raise for x.ndim > 2 x = np.arange(3*4*5).reshape((3, 4, 5)) with assert_raises(ValueError, message="too many values to unpack"): splprep(x) with assert_raises(ValueError, message="too many values to unpack"): _impl.splprep(x) # input below minimum size x = np.linspace(0, 40, num=3) with assert_raises(TypeError, message="m > k must hold"): splprep([x]) with assert_raises(TypeError, message="m > k must hold"): _impl.splprep([x]) # automatically calculated parameters are non-increasing # see gh-7589 x = [-50.49072266, -50.49072266, -54.49072266, -54.49072266] with assert_raises(ValueError, message="Invalid inputs"): splprep([x]) with assert_raises(ValueError, message="Invalid inputs"): _impl.splprep([x]) # given non-increasing parameter values u x = [1, 3, 2, 4] u = [0, 0.3, 0.2, 1] with assert_raises(ValueError, message="Invalid inputs"): splprep(*[[x], None, u])
def test_splprep(self): x = np.arange(15).reshape((3, 5)) b, u = splprep(x) tck, u1 = _impl.splprep(x) # test the roundtrip with splev for both "old" and "new" output assert_allclose(u, u1, atol=1e-15) assert_allclose(splev(u, b), x, atol=1e-15) assert_allclose(splev(u, tck), x, atol=1e-15) # cover the ``full_output=True`` branch (b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True) assert_allclose(u, u_f, atol=1e-15) assert_allclose(splev(u_f, b_f), x, atol=1e-15)
def _smooth_segs(self,seg,kind,avg=False): useg,per=unique_vec(seg) if len(useg)>self.k: #check if long enough to smooth tck,u=splprep(useg.T,s=self.smooth,k=self.k,per=per) seg=np.vstack(splev(self.unew,tck)).T if (self.smooth>0) and (avg): #when smooth>0 you get different results if you reverse the order of the points #do it both ways and take the average of the two solutions #and return that instead (needed for filled contours to line up correctly) tck2,u2=splprep(useg[::-1].T,s=self.smooth,k=self.k,per=per) seg2=np.vstack(splev(self.unew[::-1],tck2)).T seg=np.dstack([seg,seg2]).mean(axis=2) if kind is not None: kind=self.kinds_fill return seg,kind
def spline_fitted_magnitudes_brute(times, magnitudes, errors, requested_times): # Spline parameters: s=len(times)/100. # smoothness parameter k=5 # spline order nest=-1 # estimate of number of knots needed (-1 = maximal) # Find the knot points. tckp, u = splprep([times, magnitudes],s=s,k=k,nest=-1) # Evaluate spline, including interpolated points. new_times, new_magnitudes = splev(linspace(0,1,len(times)),tckp) # Define an interpolating function along the spline fit. interpolating_function = interp1d(new_times, new_magnitudes, kind = "linear") # Interpolate linerarly along the spline at the requested times. fitted_magnitudes = interpolating_function(requested_times) fitted_errors = array([]) for n in range(len(requested_times)): for m in range(len(times)-1): if (requested_times[n] > times[m]) and (requested_times[n] < times[m+1]): error = (errors[m] + errors[m+1])*0.5 fitted_errors = append(fitted_errors, error) return fitted_magnitudes, fitted_errors
def spline_fitted_magnitudes(times, magnitudes, errors, requested_times, mindiff = None): # Spline parameters: k=5 # spline order nest=-1 # estimate of number of knots needed (-1 = maximal) fitted_errors = empty(0) fitted_magnitudes = empty(0) for rtime, window in window_creator(times, requested_times, mindiff = mindiff): wtimes = times[window] wmagnitudes = magnitudes[window] werrors = errors[window] # spline parameter: s=len(wtimes)/100. # smoothness parameter # Find the knot points. tckp, u = splprep([wtimes, wmagnitudes],s=s,k=k,nest=-1) # Evaluate spline, including interpolated points. new_times, new_magnitudes = splev(linspace(0,1,len(wtimes)),tckp) # Define an interpolating function along the spline fit. interpolating_function = interp1d(new_times, new_magnitudes, kind = "linear") # Interpolate linerarly along the spline at the requested times. fitted_magnitude = interpolating_function(rtime) fitted_magnitudes = append(fitted_magnitudes, fitted_magnitude) for m in range(len(wtimes)-1): if (rtime > wtimes[m]) and (rtime < wtimes[m+1]): error = (werrors[m] + werrors[m+1])*0.5 fitted_errors = append(fitted_errors, error) return fitted_magnitudes, fitted_errors
def densify_list_xy(x, y, n=500, per=True): from scipy import interpolate if per and (x[0] != x[-1]) and (x[0] != x[-1]): x.append(x[0]) y.append(y[0]) tck, u = interpolate.splprep([x, y], s=0, k=1, per=per) return interpolate.splev(np.linspace(0, 1, n), tck)
def b_spline_python(x, y, z, s=0, k=3, nest=-1): """see http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splprep.html for full input information""" from scipy.interpolate import splprep, splev tckp, u = splprep([x, y, z], s=s, k=k, nest=nest) xnew, ynew, znew = splev(u, tckp) return xnew, ynew, znew
def test_get_point(self): # Stationary trajectory traj = Trajectory(self.control_points, pixel_size=1 * q.m, furthest_point=1 * q.m) np.testing.assert_equal(traj.get_point(1 * q.s), traj.control_points[0]) tck = interp.splprep(zip(*self.control_points), s=0)[0] def evaluate_point(t): if t > 1: t = 1 return interp.splev(t, tck) * q.m # Create velocity profile which goes until the trajectory end. # We need to scale the sine amplitude in order to # max(sin(x)) = trajectory.length times = np.linspace(0, 2 * np.pi, self.n) * q.s # Normalize for not going below zero. dist = (self.traj.length + self.traj.length * np.sin(times.magnitude)) * q.m traj = Trajectory(self.control_points, pixel_size=1 * q.m, furthest_point=1 * q.m, time_dist=zip(times, dist)) for i in range(len(times)): np.testing.assert_almost_equal(traj.get_point(times[i]), evaluate_point(dist[i] / traj.length), decimal=4)
def make_joint_trajectory_with_limits(positions, vel_limits, acc_limits): positions = np.asarray(positions) vel_limits = np.asarray(vel_limits) acc_limits = np.asarray(acc_limits) n_waypoints, n_joints = positions.shape # estimate the time, to pick a reasonable number of samples t_est = (abs(positions[1:] - positions[:-1]).sum(axis=0) / vel_limits).max() # samples is an integer multiple of n_waypoints, so that we hit all waypoints upsample_ratio = max(1, int(np.ceil(t_est * 10 / n_waypoints))) n_samples = n_waypoints * upsample_ratio # upsample and smooth a little bit k = min(3, n_waypoints - 1) (tck, _) = si.splprep(positions.T, s = .001**2*n_waypoints, u=linspace(0,1,n_waypoints), k=k) # todo: is s total or per waypoint? sampled_positions = r_[si.splev(linspace(0,1,n_samples),tck)].T velocities = np.zeros((n_samples, n_joints)) times = np.zeros(n_samples) for i in xrange(1,n_samples): dpos = (sampled_positions[i] - sampled_positions[i-1]) # amount of time if we're at velocity speed limit: dt_vel = norm(dpos / vel_limits, inf) # search for minimal dt that satisfies velocity and acceleration limits f = lambda dt: (abs(dpos/dt - velocities[i-1])/dt < acc_limits).all() dt = line_search(f, dt_vel) times[i] = times[i-1]+dt velocities[i] = dpos/dt return sampled_positions, velocities, times
def rotate_te(self, alpha, nsamples, te_smooth=1, smoothing=0.0, degree=5, ins_pt=None, plot=False): """Splits the airfoil in two parts at the leading edge, rotates both parts by alpha/2.0 around the trailing edge and than reconnects both parts again.""" te_point = self.get_te_point() u_le, le_point = self.get_le_point() te_smoothing = 1.0*te_smooth/100.0 u0 = 0.0 u1 = u_le - te_smoothing u2 = u_le + te_smoothing u3 = 1.0 ss_pts = self._rotate_around_point(alpha=-alpha/2.0, u0=u0, u1=u1, rot_pt=te_point, nsamples=nsamples) ps_pts = self._rotate_around_point(alpha=alpha/2.0, u0=u2, u1=u3, rot_pt=te_point, nsamples=nsamples) if ins_pt is None: new_pts = np.vstack((ss_pts, ps_pts)) else: new_pts = np.vstack((ss_pts, ins_pt, ps_pts)) if plot: plt.figure('Leading edge rotation') plt.plot(new_pts[:, 0], new_pts[:, 1], 'or', label='fit points') plt.axis('equal') plt.grid(True) plt.legend() plt.show() x = [new_pts[:, 0], new_pts[:, 1]] self.tck, _ = interpolate.splprep(x, s=smoothing, k=degree) return ss_pts, ps_pts
def normal_transform(self, tck_transf, nsamples, s=0.0, k=3, plot=False): """bla.""" nvecs, pvecs, u = self.get_normal_vecs(nsamples=nsamples) amps = interpolate.splev(u, tck_transf) pts = [] _, vec_len = nvecs.shape for i in range(vec_len): x = pvecs[0, i] y = pvecs[1, i] dirx = nvecs[0, i] diry = nvecs[1, i] amp = amps[i] lx = x+dirx*amp ly = y+diry*amp pts.append([lx, ly]) pts = np.array(pts) if plot: plt.figure('Comparison org. airfoil vs mod. airfoil') plt.plot(pvecs[0, :], pvecs[1, :], label='org. airfoil') plt.plot(pts[:, 0], pts[:, 1], '-r', label='mod. airfoil') plt.axis('equal') plt.grid(True) plt.legend() plt.show() # Create bspline representation x = [pts[:, 0], pts[:, 1]] self.tck, _ = interpolate.splprep(x, s=s, k=k)
def interpolate(self, distance, spline=False): if spline: spline = 2 else: spline = 1 orig_num = self.getPointNumber() x = [] y = [] x.append(self.getLat()) y.append(self.getLon()) while self.goToNext(): x.append(self.getLat()) y.append(self.getLon()) tck, u = interpolate.splprep([x, y], s=0, k=spline) num = int(self.getDistance() / distance) x, y = interpolate.splev(np.linspace(0, 1, num), tck) self.points = [] for j in range(0, len(x)): self.points.append((x[j], y[j])) self.interpolated = True self.len = len(self.points) self.rewind()
def unif_resample(traj, max_diff, wt = None): """ Resample a trajectory so steps have same length in joint space """ import scipy.interpolate as si tol = .005 if wt is not None: wt = np.atleast_2d(wt) traj = traj*wt dl = mu.norms(traj[1:] - traj[:-1],1) l = np.cumsum(np.r_[0,dl]) goodinds = np.r_[True, dl > 1e-8] deg = min(3, sum(goodinds) - 1) if deg < 1: return traj, np.arange(len(traj)) nsteps = max(int(np.ceil(float(l[-1])/max_diff)),2) newl = np.linspace(0,l[-1],nsteps) ncols = traj.shape[1] colstep = 10 traj_rs = np.empty((nsteps,ncols)) for istart in xrange(0, traj.shape[1], colstep): (tck,_) = si.splprep(traj[goodinds, istart:istart+colstep].T,k=deg,s = tol**2*len(traj),u=l[goodinds]) traj_rs[:,istart:istart+colstep] = np.array(si.splev(newl,tck)).T if wt is not None: traj_rs = traj_rs/wt newt = np.interp(newl, l, np.arange(len(traj))) return traj_rs, newt
def second_derivative(xdata, inds, gt=False, s=0): ''' The second derivative of d^2 xdata / d inds^2 why inds for interpolation, not log l? if not using something like model number instead of log l, the tmin will get hidden by data with t < tmin but different log l. This is only a problem for very low Z. If I find the arg min of teff to be very close to MS_BEG it probably means the MS_BEG is at a lower Teff than Tmin. ''' tckp, _ = splprep([inds, xdata], s=s, k=3) arb_arr = np.arange(0, 1, 1e-2) xnew, ynew = splev(arb_arr, tckp) # second derivative, bitches. ddxnew, ddynew = splev(arb_arr, tckp, der=2) ddyddx = ddynew / ddxnew # not just argmin, but must be actual min... try: if gt: aind = [a for a in np.argsort(ddyddx) if ddyddx[a-1] < 0][0] else: aind = [a for a in np.argsort(ddyddx) if ddyddx[a-1] > 0][0] except IndexError: return -1 tmin_ind, _ = closest_match2d(aind, inds, xdata, xnew, ynew) return inds[tmin_ind]
def tract_prototype_mean(tractography, smooth_order, file_output=None): from .tract_obb import prototype_tract tracts = tractography.tracts() prototype_ix, leave_centers = prototype_tract(tracts, return_leave_centers=True) median_tract = tracts[prototype_ix] mean_tract = numpy.empty_like(median_tract) centers_used = set() for point in median_tract: closest_leave_center_ix = ( ((leave_centers - point[None, :]) ** 2).sum(1) ).argmin() if closest_leave_center_ix in centers_used: continue mean_tract[len(centers_used)] = leave_centers[closest_leave_center_ix] centers_used.add(closest_leave_center_ix) mean_tract = mean_tract[:len(centers_used)] if smooth_order > 0: try: from scipy import interpolate tck, u = interpolate.splprep(mean_tract.T) mean_tract = numpy.transpose(interpolate.splev(u, tck)) except ImportError: warn("A smooth order larger than 0 needs scipy installed") return Tractography([mean_tract], {}, **tractography.extra_args)
def testMVCgetDerivWpt(W): Ndim = W.shape[0] Nwaypoints = W.shape[1] dW = np.zeros((W.shape)) ddW = np.zeros((W.shape)) traj, tmp = splprep(W, k=5, s=0.01) # L = getLengthWpt(W) d = 0.0 for i in range(0, Nwaypoints - 1): dW[:, i] = splev(d, traj, der=1) ddW[:, i] = splev(d, traj, der=2) # dW[:,i] = dW[:,i]/np.linalg.norm(dW[:,i]) ds = np.linalg.norm(W[:, i + 1] - W[:, i]) dv = np.linalg.norm(dW[:, i]) dt = ds / dv # ddW[:,i] = ddW[:,i]/np.linalg.norm(ddW[:,i]) print d d = d + dt dW[:, Nwaypoints - 1] = splev(d, traj, der=1) ddW[:, Nwaypoints - 1] = splev(d, traj, der=2) return [dW, ddW]
def linear_interp(pos1, pos2, n): """ interpolate n points between pos1 and pos2 pos1 and pose2 should be a list of triplets will return: a list of list of triplets with n+2 items (including end points) a list of values between 0 and 1, the parametric parameter """ import numpy as np from scipy import interpolate n_atoms = len(pos1) pos = np.array([pos1,pos2]) #this will transform the shape of coord #to a list of triplets, describing motion of one atom over entire path pos = np.reshape(np.transpose(np.reshape(pos, (2,-1))),(-1,3,2)) new_t = np.linspace(0,1,n+2) new_pos = [] for cord in pos: tck = interpolate.splprep(u = [0,1], x=cord, k=1, s=0)[0] new_pos.append(interpolate.splev(new_t, tck)) new_pos = np.array(new_pos) new_pos = np.reshape(np.transpose(np.reshape(new_pos,(3*n_atoms,-1))), (n+2, n_atoms, 3)) return np.array(new_pos), new_t
def interpolation_polynom(path,grad): (x,y)=path.shape anzahl=y*40 #interpolate polynom degree 1 if grad==1: tck, u= interpolate.splprep(path,k=1,s=0.2) path = interpolate.splev(np.linspace(0,1,anzahl), tck) #interpolate polynom degree 2 if grad==2: tck, u= interpolate.splprep(path,k=2,s=0.2) path = interpolate.splev(np.linspace(0,1,anzahl), tck) #interpolate polynom degree 3 if grad==3: tck, u= interpolate.splprep(path, w=None, u=None, ub=None, ue=None, k=3, task=0, s=0.2, t=None, full_output=0, nest=None, per=0, quiet=1) path = interpolate.splev(np.linspace(0,1,anzahl), tck) return path
def unif_resample(x,n=None,tol=0,deg=None, seg_len = .02): if deg is None: deg = min(3, len(x) - 1) x = np.atleast_2d(x) x = remove_duplicate_rows(x) (tck,_) = si.splprep(x.T,k=deg,s = tol**2*len(x),u=np.linspace(0,1,len(x))) xup = np.array(si.splev(np.linspace(0,1, 10*len(x),.1),tck)).T dl = norms(xup[1:] - xup[:-1],1) l = np.cumsum(np.r_[0,dl]) (tck,_) = si.splprep(xup.T,k=deg,s = tol**2*len(xup),u=l) if n is not None: newu = np.linspace(0,l[-1],n) else: newu = np.linspace(0, l[-1], l[-1]//seg_len) return np.array(si.splev(newu,tck)).T
def get_measurement_lines(self, tail): """ determines the measurement segments that are used for the line scan """ f_c = self.params['measurement/line_offset'] f_o = 1 - f_c centerline = tail.centerline result = [] for side in tail.sides: # find the line between the centerline and the ventral line points = [] for p_c in centerline: p_o = curves.get_projection_point(side, p_c) #< outer line points.append((f_c*p_c[0] + f_o*p_o[0], f_c*p_c[1] + f_o*p_o[1])) # do spline fitting to smooth the line smoothing = self.params['measurement/spline_smoothing']*len(points) tck, _ = interpolate.splprep(np.transpose(points), k=2, s=smoothing) points = interpolate.splev(np.linspace(-0.5, .8, 100), tck) points = zip(*points) #< transpose list # restrict centerline to object mline = geometry.LineString(points).intersection(tail.polygon) # pick longest line if there are many due to strange geometries if isinstance(mline, geometry.MultiLineString): mline = mline[np.argmax([l.length for l in mline])] result.append(np.array(mline.coords)) return result
def predict_number(self): all_pts = [] for w in self.canvas.children: if isinstance(w, Line): wpts = np.array(w.points) xs = wpts[::2] ys = wpts[1::2] try: tck, u = interpolate.splprep([xs, ys], s=0) except Exception as e: print('warn', e) continue u_new = np.arange(np.min(u), np.max(u), 0.01) out = interpolate.splev(u_new, tck) all_pts += out img = np.zeros((560+10, 560+10)).astype(np.uint8) polyx = np.array(all_pts[::2]).astype(np.int) polyy = np.array(all_pts[1::2]).astype(np.int) R = 10 from itertools import product for xs, ys in zip(polyx, polyy): for x, y in zip(xs, ys): circle = [(x+i, y+j) for (i, j) in product(range(-R, R+1), repeat=2) if i**2+j**2 < R**2] for c in circle: img[c[0], c[1]] = 128 img = imresize(np.rot90(img), (28, 28), interp='bilinear').astype('f')/np.max(img) plt.imshow(img, cmap='gray') plt.show() predict(img)
def __init__(self, *args, **kwargs): """FRD(d, w) Construct an FRD object The default constructor is FRD(d, w), where w is an iterable of frequency points, and d is the matching frequency data. If d is a single list, 1d array, or tuple, a SISO system description is assumed. d can also be To call the copy constructor, call FRD(sys), where sys is a FRD object. To construct frequency response data for an existing LTI object, other than an FRD, call FRD(sys, omega) """ smooth = kwargs.get('smooth', False) if len(args) == 2: if not isinstance(args[0], FRD) and isinstance(args[0], LTI): # not an FRD, but still a system, second argument should be # the frequency range otherlti = args[0] self.omega = array(args[1], dtype=float) self.omega.sort() numfreq = len(self.omega) # calculate frequency response at my points self.fresp = empty( (otherlti.outputs, otherlti.inputs, numfreq), dtype=complex) for k, w in enumerate(self.omega): self.fresp[:, :, k] = otherlti._evalfr(w) else: # The user provided a response and a freq vector self.fresp = array(args[0], dtype=complex) if len(self.fresp.shape) == 1: self.fresp = self.fresp.reshape(1, 1, len(args[0])) self.omega = array(args[1], dtype=float) if len(self.fresp.shape) != 3 or \ self.fresp.shape[-1] != self.omega.shape[-1] or \ len(self.omega.shape) != 1: raise TypeError( "The frequency data constructor needs a 1-d or 3-d" " response data array and a matching frequency vector" " size") elif len(args) == 1: # Use the copy constructor. if not isinstance(args[0], FRD): raise TypeError("The one-argument constructor can only take in" " an FRD object. Received %s." % type(args[0])) self.omega = args[0].omega self.fresp = args[0].fresp else: raise ValueError("Needs 1 or 2 arguments; receivd %i." % len(args)) # create interpolation functions if smooth: self.ifunc = empty((self.fresp.shape[0], self.fresp.shape[1]), dtype=tuple) for i in range(self.fresp.shape[0]): for j in range(self.fresp.shape[1]): self.ifunc[i, j], u = splprep( u=self.omega, x=[ real(self.fresp[i, j, :]), imag(self.fresp[i, j, :]) ], w=1.0 / (absolute(self.fresp[i, j, :]) + 0.001), s=0.0) else: self.ifunc = None LTI.__init__(self, self.fresp.shape[1], self.fresp.shape[0])
def lane_detection(self, state_image_full): ''' ##### TODO ##### This function should perform the road detection args: state_image_full [96, 96, 3] out: lane_boundary1 spline lane_boundary2 spline ''' # to gray # print(f'calculating spline') gray_state = self.cut_gray(state_image_full) # edge detection via gradient sum and thresholding gradient_sum = self.edge_detection(gray_state) maxima = self.find_maxima_gradient_rowwise(gradient_sum) # first lane_boundary points lane_boundary1_points, lane_boundary2_points, lane_found = \ self.find_first_lane_point(gradient_sum) lane1_prev = lane_boundary1_points lane2_prev = lane_boundary2_points # if no lane was found,use lane_boundaries of the preceding step if lane_found: ##### TODO ##### # in every iteration: # 1- find maximum/edge with the lowest distance to the last lane boundary point # 2- append maxium to lane_boundary1_points or lane_boundary2_points # 3- delete maximum from maxima # 4- stop loop if there is no maximum left # or if the distance to the next one is too big (>=100) row = 1 while row < 68: row_max = maxima[row] if len(row_max) < 2: break # sort the points according to their distence from previous lane predictions # also, argsort returns tha indices that can be iterated insorted order # hence we use A[0] and B[0] after sorting A = np.argsort((row_max - lane1_prev[0][0])**2) # print(f'A shape: {A.shape}') B = np.argsort((row_max - lane2_prev[0][0])**2) point_1 = np.array([[row_max[A[0]], row]]) point_2 = np.array([[row_max[B[0]], row]]) # print(f'point shape: {point_1.shape}') lane_boundary1_points = np.append(lane_boundary1_points, point_1, axis=0) lane_boundary2_points = np.append(lane_boundary2_points, point_2, axis=0) lane1_prev = point_1 lane2_prev = point_2 row += 1 # lane_boundary 1 lane_boundary1 = [] # lane_boundary 2 lane_boundary2 = [] ################ ##### TODO ##### # spline fitting using scipy.interpolate.splprep # and the arguments self.spline_smoothness # # if there are more lane_boundary points points than spline parameters # else use perceding spline if lane_boundary1_points.shape[ 0] > 4 and lane_boundary2_points.shape[0] > 4: # Pay attention: the first lane_boundary point might occur twice # lane_boundary 1 lane_boundary1, _ = splprep(lane_boundary1_points.T, k=3, s=self.spline_smoothness) # lane_boundary 2 lane_boundary2, _ = splprep(lane_boundary2_points.T, k=3, s=self.spline_smoothness) # print("temp") else: lane_boundary1 = self.lane_boundary1_old lane_boundary2 = self.lane_boundary2_old ################ else: lane_boundary1 = self.lane_boundary1_old lane_boundary2 = self.lane_boundary2_old self.lane_boundary1_old = lane_boundary1 self.lane_boundary2_old = lane_boundary2 # output the spline # print(f'returned spline') return lane_boundary1, lane_boundary2
def main(): # Initialize the spline c = np.array([100, 100]) R = 50 ctrl_points_nb = 9 phi = np.linspace(0, 2 * np.pi, ctrl_points_nb + 1) x_init = c[0] + R * np.cos(phi) y_init = c[1] + R * np.sin(phi) x_init[ctrl_points_nb] = x_init[0] y_init[ctrl_points_nb] = y_init[0] dat = np.array([x_init, y_init]) tck, _ = splprep(dat, s=0, per=1, k=3) knots = tck[0] ctrl_pts = np.transpose(tck[1]) deg = tck[2] spl = BSplineCLG(knots, ctrl_pts, deg, is_periodic=True) go_on = True while go_on: # Choose the control point that you want to move spl = BSplineCLG(knots, ctrl_pts, deg, is_periodic=True) nb_dct_total = 1000 [x, y] = np.transpose(spl(np.linspace(0, 1, nb_dct_total))) plt.plot(x, y) nth = raw_input('Enter the nth point: ') nth = int(nth) nbr_lib = len(spl.get_ctrl_pts(only_free=True)) if nth > nbr_lib: raise ValueError('Too big') lib = np.transpose(spl.get_ctrl_pts(only_free=True)) all = plt.scatter(lib[0], lib[1], s=40) # Decide the range of influence of the choosen point and plot the figure if knots[nth] < 0: # For these point satisfying this condition, each of them affect two # internal parts in the curve ust1 = 0 ued1 = knots[nth + deg + 1] ust2 = knots[nth + nbr_lib] ued2 = 1 nb_dct_1 = (ued1 - ust1) * nb_dct_total nb_dct_2 = (ued2 - ust2) * nb_dct_total [xth1, yth1] = np.transpose(spl(np.linspace(ust1, ued1, nb_dct_1))) [xth2, yth2] = np.transpose(spl(np.linspace(ust2, ued2, nb_dct_2))) plt.plot(xth1, yth1, 'r') plt.plot(xth2, yth2, 'r') lib_pts = spl.get_ctrl_pts(only_free=True) old = plt.scatter(lib_pts[nth, 0], lib_pts[nth, 1], s=40, color='y') lib_pts[nth] = [110, 250] new = plt.scatter(lib_pts[nth, 0], lib_pts[nth, 1], s=40, color='r') spl.set_ctrl_pts(lib_pts, only_free=True) [xth_a1, yth_a1] = np.transpose(spl(np.linspace(ust1, ued1, nb_dct_1))) [xth_a2, yth_a2] = np.transpose(spl(np.linspace(ust2, ued2, nb_dct_2))) plt.plot(xth_a1, yth_a1, 'g') plt.plot(xth_a2, yth_a2, 'g') plt.scatter(spl(ust2)[0], spl(ust2)[1], s=40, color='c', marker="*") plt.scatter(spl(ued1)[0], spl(ued1)[1], s=40, color='m', marker="*") else: ust = knots[nth] ued = knots[nth + deg + 1] nb_dct = (ued - ust) * nb_dct_total [xth, yth] = np.transpose(spl(np.linspace(ust, ued, nb_dct))) plt.plot(xth, yth, 'r') lib_pts = spl.get_ctrl_pts(only_free=True) old = plt.scatter(lib_pts[nth, 0], lib_pts[nth, 1], s=40, color='y') lib_pts[nth] = [110, 250] new = plt.scatter(lib_pts[nth, 0], lib_pts[nth, 1], s=40, color='r') spl.set_ctrl_pts(lib_pts, only_free=True) [xth_a, yth_a] = np.transpose(spl(np.linspace(ust, ued, nb_dct))) plt.plot(xth_a, yth_a, 'g') plt.scatter(spl(ust)[0], spl(ust)[1], s=70, color='c', marker="*") plt.scatter(spl(ued)[0], spl(ued)[1], s=70, color='m', marker="*") plt.legend((all, old, new), ('All control point', 'Old moving point', 'New moving point')) plt.savefig(r'\\bkshiva\LCshareNFS002\500.14-Litho\CLG\projects\2017\MB-METRO\MS-1703-ACTIVE_CONTOURS\Figure\MovingControlPoint.png') plt.show() exit = raw_input('Quitter ou pas ?: ') if exit == 'y' or exit == 'Y': go_on = False
def _get_tck( self ): t = sort(self.points.keys()) xp = array([self.points[i] for i in t]).T k = min(3, len(self.points)-1) tcku = splprep(xp, u=t, s=0, k=k) return tcku[0]
def boomerang(theta=pi/4):#out[0] ed out[1] sono rispettivamente ascisse e ordinate della mia curva x=[cos(theta),0,-cos(theta),0,cos(theta)] y=[sin(theta)-1.,1/sin(theta)-1,sin(theta)-1.,0,sin(theta)-1.] tck, u = scint.splprep([x, y],s=0,per=1) tnew = np.arange(0, 1.01, 0.01) out = scint.splev(tnew,tck) d1 = scint.splev(tnew,tck,der=1) d2 = scint.splev(tnew,tck,der=2) '''figure() plot(x, y, 'x', out[0], out[1], a*np.sin(2*np.pi*unew), b*np.cos(2*np.pi*unew), x, y, 'b') legend(['Linear', 'Cubic Spline', 'True']) axis([min(out[0])-0.5,max(out[0])+0.5,min(out[1])-0.5,max(out[1])+0.5 ]) title('Spline of parametrically-defined curve') ''' plot(out[0],out[1],x,y) #plot(tnew,k,tnew,a*b/(((a**2-b**2)*sin(2*pi*tnew)**2+b**2)**1.5)) legend(['spline', 'teorica']) figure(2) sign=input("press 1 if inside, -1 if outside") title("curvatura vs t") k = sign * curv(d1[0],d1[1],d2[0],d2[1]) plot(tnew,k) somma=[[],[]] normal=[[],[]] tangenzial=[[],[]] figure(1) modulo=sqrt(d1[0]**2+d1[1]**2) dt=diff(tnew)[0] arc_lenght=sum(modulo*dt) superficie=area(out[0],out[1]) D=.1 tau=.5 for i in arange (0,len(out[0])): #s=sqrt(out[0][i]**2+out[1][i]**2) tangenzial[0]=d1[0][i]/modulo[i] tangenzial[1]=d1[1][i]/modulo[i] normal[0]=np.array(normal[0]) normal[1]=np.array(normal[1]) normal[0]=sign*d1[1]/modulo normal[1]=-sign*d1[0]/modulo somma[0]=modulo*dt*normal[0]*D*abs(sqrt(D*tau)*k+1) somma[1]=modulo*dt*normal[1]*D*abs(sqrt(D*tau)*k+1) #return somma quiver(out[0],out[1],normal[0],normal[1]) print("arc lenght="+str(arc_lenght)+" area= "+str(superficie)) np.save("curvatura",k) kmin = k.min() kmax = k.max() figure(3) normalizzazione= superficie + sqrt(D*tau)*(abs(arc_lenght+sqrt(D*tau)*sum(k*modulo)*dt)) print("UCNA theoretical results: \n normalization= "+str(normalizzazione)) print("F_x risultante"+str(sum(somma[0])/normalizzazione)+" F_y risultante"+str(sum(somma[1])/normalizzazione)) particle_on_boundary=(normalizzazione-superficie)/normalizzazione print(" frazione di particelle sul bordo = "+str(particle_on_boundary)) avg_module=abs(sum(modulo*dt*D*abs(sqrt(D*tau)*k+1)))/normalizzazione print(" average of the force module(respect to all particle) = "+str(avg_module)) cond_avg_module=abs(sum(modulo*dt*D*abs(sqrt(D*tau)*k+1)))/(normalizzazione-superficie) print(" conditional average of the force module(respect to boundary) = "+str(cond_avg_module)) for i in arange(0,len(out[0])-1): myk = (k[i]+k[i+1])/2. myi = (myk-kmin)/(kmax-kmin) myc = (myi,0,1.-myi) myc = cm.jet(myi) #myc = cm.seismic(myi) plot(out[0][i:i+2],out[1][i:i+2],'-',lw=7,color=myc) xmx = 1.1*max(out[0]) ymx = 1.1*max(out[1]) #plot(out[0],out[1],"-o") savetxt("input.dat",transpose(array([out[0][:-1],out[1][:-1]])).astype(float32)) xlim(-xmx,xmx) ylim(-ymx,ymx) ax = gca() ax.set_aspect(1) figure(4) title("x(t),y(t)") plot(tnew,out[0],"-o") plot(tnew,out[1],"-o")
corners_gt = reader_gt.__next__() corners_dwt = reader_dwt.__next__() bb = reader_bb.__next__() except: print('Buildings loaded: ' + str(i) + ', total: ' + str(total_count), flush=True) break # Get GT polygons num_points = np.int32(corners_gt[0]) poly = np.zeros([num_points, 2]) for c in range(num_points): poly[c, 0] = np.float(corners_gt[1 + 2 * c]) * out_size / im_size poly[c, 1] = np.float(corners_gt[2 + 2 * c]) * out_size / im_size [tck, u] = interpolate.splprep([poly[:, 0], poly[:, 1]], s=2, k=1, per=1) [allGT[:, 0, total_count], allGT[:, 1, total_count]] = interpolate.splev(np.linspace(0, 1, L), tck) # Get DWT polygons num_points = np.int32(corners_dwt[0]) poly = np.zeros([num_points, 2]) for c in range(num_points): poly[c, 0] = np.float(corners_dwt[1 + 2 * c]) * out_size / im_size poly[c, 1] = np.float(corners_dwt[2 + 2 * c]) * out_size / im_size [tck, u] = interpolate.splprep([poly[:, 0], poly[:, 1]], s=2, k=1, per=1) [allDWT[:, 0, total_count],
def fit(filename, display=True): "Do fitting on midpoints data in filename" midpoints = load_midpoints(filename) splines = [] curvatures = [] norms = [] maxcurvs = [] meancurvs = [] if display: fig = plt.figure(figsize=(10, 10)) for iframe in tqdm(range(3100, len(midpoints))): line = midpoints[iframe] x = line[:, 0] y = line[:, 1] # Delete the duplicates okay = np.where(np.abs(np.diff(x)) + np.abs(np.diff(y)) > 0) x = x[okay] y = y[okay] # Fit the splines tck, u = interpolate.splprep([x, y]) xnew, ynew = interpolate.splev(np.linspace(0, 1, 200), tck) # xnew, ynew = interpolate.splev(np.linspace(0, 1, 100), tck) splines.append([xnew, ynew]) # Calculate the curvatures and norms ka, no = curvatures_of_line(xnew, ynew) curvatures.append(ka) norms.append(no) maxcur = np.max(np.abs(ka)) meancur = np.mean(np.abs(ka)) maxcurvs.append(maxcur) meancurvs.append(meancur) # Plot if display: plt.clf() plt.plot(x, y, 'r.') plt.plot(xnew, ynew, 'b') gap = 2 plt.quiver(xnew[1:len(xnew) - 2:gap], ynew[1:len(ynew) - 2:gap], (ka * no[:, 0])[::gap], (ka * no[:, 1])[::gap], width=0.002, headwidth=3, headlength=5, scale=1.5) plt.text(280, 380, "Max Abs. Curv. = " + str(round(maxcur, 4)), color='k', fontsize=10) plt.text(280, 360, "Avg Abs. Curv. = " + str(round(meancur, 4)), color='k', fontsize=10) plt.xlim([0, 600]) plt.ylim([0, 600]) # plt.xlim([-3, 3]) # plt.ylim([-3, 3]) # plt.savefig('./results/'+ 'Control-EGCaMP_exp1_a1_30x10fps' +'/frames/img' + str(iframe) + '.jpg', dpi=200) # , orientation='landscape') plt.pause(0.00001) input() # Return results return (np.array(maxcurvs), np.array(meancurvs))
def cartoon(point, li0, d, n = 2): #print self.get_backbone() if d[point][3] != 'backbone': return None ind = li0.index(point) fli = [] count = 0 c1, c2 = 0, 0 while len(fli) < n+1: if d[li0[ind+count]][2] == 'C': fli.append(li0[ind+count]) count += 1 if ind+count >= len(li0): break c1 = count count = 1 while len(fli) < 2*n+1: d[li0[ind-count]][2] if d[li0[ind-count]][2] == 'C': fli.append(li0[ind-count]) count+=1 if ind-count <= 0: c2 = 2*n+1 - len(fli) break if c2: count = c1 while len(fli) < 2*n+1: #print ind+count, len(fli) #print li0[ind+count] if d[li0[ind+count]][2] == 'C': fli.append(li0[ind+count]) count += 1 if ind+count >= len(li0): break fli.sort() #print fli coord = [list(map(float,d[i][-3:])) for i in fli] coord = np.array(coord) x,y,z = coord[:,0],coord[:,1],coord[:,2] try: tck, u = interpolate.splprep([x,y,z], s=3) except ValueError: return None x_knots, y_knots, z_knots = interpolate.splev(tck[0], tck) u_fine = np.linspace(0,1,len(coord)*10) x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck) coord_fine = [] for i in range (len(x_fine)): coord_fine.append([x_fine[i],y_fine[i],z_fine[i]]) coord_fine = np.array(coord_fine) ''' fig2 = plt.figure(2) ax3d = fig2.add_subplot(111, projection='3d') ax3d.plot(x, y, z, 'r*') #ax3d.plot(x_knots, y_knots, z_knots, 'go') ax3d.plot(x_fine, y_fine, z_fine, 'g-') fig2.show() plt.show() ''' #print fli #self.check_cartoon(fli) dis = 0 for i in range (1,len(coord_fine)-1): #print self.d[fli[i]][-3:],self.d[fli[i-1]][-3:] dis += distance(coord_fine[i],coord_fine[i-1]) #print dis, distance(d[fli[0]][-3:], d[fli[-1]][-3:]) res = dis/distance(d[fli[0]][-3:], d[fli[-1]][-3:]) return res
def build_profile(self, pts_mask = None, samp_int=3, bins = None, shift = True, wrap = False, cut = True, cutdist=None): """ Build the filament profile using the inputted or recently created filament spine Parameters ---------- self: An instance of the radfil_class pts_mask: numpy.ndarray A 2D array masking out any regions from image array you don't want to sample; must be of boolean type and the same shape as the image array. The spine points within the masked out region will then be excluded from the list of cuts and the master profile. samp_int: integer (default=3) An integer indicating how frequently you'd like to make sample cuts across the filament. Very roughly corresponds to sampling frequency in pixels bins: int or 1D numpy.ndarray, optional The number of bins (int) or the actual bin edges (numpy array) you'd like to divide the profile into. If entered as an integer "n", the profile will be divided into n bins, from the minimum radial distance found in any cut to the maximum radial distance found in any cut. If an array (i.e. np.linspace(-2,2,100)). the array values will represent the bin edges (i.e. 100 bins evenly distributed between -2 and 2). If entered, the profile will be averaged in each bin, and the fit_profile method will only consider the bin-averaged data shift: boolean (default = True) Indicates whether to shift the profile to center at the peak value. The peak value is determined by searching for the peak value along each cut, either confined within the filament mask, or confined within some value cutdist from the spine (if no mask is entered) wrap: boolean (default = False) Indicates whether to wrap around the central pixel, so that the final profile will be a "half profile" with the peak near/at the center (depending on whether it's shifted). make_cut: boolean (default = True) Indicates whether to perform cuts when extracting the profile. Since the original spine found by `fil_finder_2D` is not likely differentiable everywhere, setting `cut = True` necessitates a spline fit to smoothe the spine. Setting `cut = False` will make `radfil` calculate a distance and a height/value for every pixel inside the mask. cutdist: float or int If using a pre-computed spine, and you would like to shift to the peak column density value (shift=True), you must enter a cutdist, which indicates the radial distance from the spine you'd like to search for the peak column density along each cut. This will create a mask whose outer boundary is defined by all points equidistant from the spine at the value of cutdist. Attributes ---------- xall, yall: 1D numpy.ndarray (list-like) All data points (with or without cutting). xbeforespline, ybeforespline: 1D numpy.ndarray (list-like) Positions of the "filament" identified by `fil_finder_2D`, in pixel units. This is before smoothing done with `spline`. xspline, yspline: 1D numpy.ndarray (list-like) Positions of the spline points used for cuts, in pixel units. masterx, mastery: 1D numpy.ndarray (list-like) The profile (radial distances and height/column density/intensity) obtained by `profile_builder`. dictionary_cuts: Python dictionary A dictionary containing the profile (radian distances and height) for each cut along the spline, as two lists--one for the distance, and the other for the height. """ # Read shift, wrap, cut, and samp_int ## shift if isinstance(shift, bool): self.shift = shift else: raise TypeError("shift has to be a boolean value. See documentation.") ## wrap if isinstance(wrap, bool): self.wrap = wrap else: raise TypeError("wrap has to be a boolean value. See documentation.") ## cut if isinstance(cut, bool): self.cutting = cut else: raise TypeError("cut has to be a boolean value. See documentation.") ## samp_int if isinstance(samp_int, int): self.samp_int = samp_int else: self.samp_int = None warnings.warn("samp_int has to be an integer; ignored for now. See documentation.") # Read the pts_mask if isinstance(pts_mask, np.ndarray) and (pts_mask.ndim == 2): self.pts_mask = pts_mask.astype(bool) else: self.pts_mask = None #extract x and y coordinates of filament spine pixcrd = np.where(self.filspine) # Sort these points by distance along the spine x, y = profile_tools.curveorder(pixcrd[1], pixcrd[0]) self.xbeforespline, self.ybeforespline = x, y # If cut if self.cutting: # Filter out wrong samp_int if self.samp_int is None: raise TypeError("samp_int has to be an integer, when cut is True.") # Spline calculation: ##set the spline parameters k = 3 nest = -1 # estimate of number of knots needed (-1 = maximal) ## find the knot points tckp, up, = splprep([x,y], k = k, nest = -1) ## evaluate spline xspline, yspline = splev(up, tckp) xprime, yprime = splev(up, tckp, der=1) ## Notice that the result containt points on the spline that are not ## evenly sampled. This might introduce biases when using a single ## number `samp_int`. #Make sure no-mask case works. If they want to shift and have no mask, need to enter cutdist; otherwise raise warning #If everything checks out, create the new mask for them using their inputted cutdist if shift is True and self.mask is None: if isinstance(cutdist, numbers.Number): try: from descartes import PolygonPatch except ImportError: raise ImportError("You must install the descartes package to continue") self.cutdist = float(cutdist) * self.imgscale.unit spine=LineString([(i[0], i[1]) for i in zip(xspline,yspline)]) boundary = spine.buffer(self.cutdist.value/self.imgscale.value) boundarypatch=PolygonPatch(boundary) boundaryline=boundarypatch.get_verts() #green boundary of MST filament # calculate the x and y points possibly within the image y_int = np.arange(0, self.image.shape[0]) x_int = np.arange(0, self.image.shape[1]) # create a list of possible coordinates (inspired by https://stackoverflow.com/questions/25145931/extract-coordinates-enclosed-by-a-matplotlib-patch) g = np.meshgrid(x_int, y_int) coords = list(zip(*(c.flat for c in g))) # create the list of valid coordinates inside contours newmaskpoints = np.vstack([p for p in coords if boundarypatch.contains_point(p, radius=0)]) self.mask=np.zeros(self.image.shape) self.mask[newmaskpoints[:,1],newmaskpoints[:,0]]=1 self.mask=self.mask.astype(bool) else: raise TypeError("If shift=True and no mask is provided, you need to enter a valid cutdist in pc, which indicates \ the radial distance from the spine along which to search for the peak column density pixel") ## Plot the results ########## ## prepare vmin, vmax = np.min(self.image[self.mask]), np.nanpercentile(self.image[self.mask], 98.) xmin, xmax = np.where(self.mask)[1].min(), np.where(self.mask)[1].max() ymin, ymax = np.where(self.mask)[0].min(), np.where(self.mask)[0].max() ## plot fig=plt.figure(figsize=(10,5)) ax=plt.gca() ax.imshow(self.image, origin='lower', cmap='gray', interpolation='none', norm = colors.LogNorm(vmin = vmin, vmax = vmax)) ax.contourf(self.mask, levels = [0., .5], colors = 'w') ax.plot(xspline, yspline, 'r', label='fit', lw=3, alpha=1.0) ax.set_xlim(max(0., xmin-.1*(xmax-xmin)), min(self.mask.shape[1]-.5, xmax+.1*(xmax-xmin))) ax.set_ylim(max(0., ymin-.1*(ymax-ymin)), min(self.mask.shape[0]-.5, ymax+.1*(ymax-ymin))) ax.set_xticklabels([]) ax.set_yticklabels([]) self.fig, self.ax = fig, ax # Only points within pts_mask AND the original mask are used. if (self.pts_mask is not None): pts_mask = ((self.pts_mask[np.round(yspline[1:-1:self.samp_int]).astype(int), np.round(xspline[1:-1:self.samp_int]).astype(int)]) &\ (self.mask[np.round(yspline[1:-1:self.samp_int]).astype(int), np.round(xspline[1:-1:self.samp_int]).astype(int)])) else: pts_mask = (self.mask[np.round(yspline[1:-1:self.samp_int]).astype(int), np.round(xspline[1:-1:self.samp_int]).astype(int)]) # Prepare for extracting the profiles self.xspline = xspline[1:-1:self.samp_int][pts_mask] self.yspline = yspline[1:-1:self.samp_int][pts_mask] self.points = np.asarray(zip(self.xspline, self.yspline)) self.fprime = np.asarray(zip(xprime[1:-1:self.samp_int][pts_mask], yprime[1:-1:self.samp_int][pts_mask])) # Extract the profiles dictionary_cuts = defaultdict(list) if (self.imgscale.unit == u.pc): for n in range(len(self.points)): profile = profile_tools.profile_builder(self, self.points[n], self.fprime[n], shift = self.shift, wrap = self.wrap) cut_distance = profile[0]*self.imgscale.to(u.pc).value dictionary_cuts['distance'].append(cut_distance) ## in pc dictionary_cuts['profile'].append(profile[1]) dictionary_cuts['plot_peaks'].append(profile[2]) dictionary_cuts['plot_cuts'].append(profile[3]) dictionary_cuts['mask_width'].append(geometry.LineString(profile[3]).length*self.imgscale.value) elif (self.imgscale.unit == u.pix): for n in range(len(self.points)): profile = profile_tools.profile_builder(self, self.points[n], self.fprime[n], shift = self.shift, wrap = self.wrap) cut_distance = profile[0]*self.imgscale.to(u.pix).value ## in pix dictionary_cuts['distance'].append(cut_distance) dictionary_cuts['profile'].append(profile[1]) dictionary_cuts['plot_peaks'].append(profile[2]) dictionary_cuts['plot_cuts'].append(profile[3]) dictionary_cuts['mask_width'].append(geometry.LineString(profile[3]).length) # Return the complete set of cuts. Including those outside `cutdist`. self.dictionary_cuts = dictionary_cuts ## Plot the peak positions if shift if self.shift: self.ax.plot(np.asarray(dictionary_cuts['plot_peaks'])[:, 0], np.asarray(dictionary_cuts['plot_peaks'])[:, 1], 'b.', markersize = 10.,alpha=0.75) # if no cutting else: warnings.warn("The profile builder when cut=False is currently under development, and may fail with large images. Use at your own risk!!!") ## warnings.warn if samp_int exists. if (self.samp_int is not None): self.samp_int = None warnings.warn("samp_int is not used. cut is False.") ## warnings.warn if shift and/or wrap is True. if (self.shift or (not self.wrap)): warnings.warn("shift and/or wrap are not used. cut is False.") self.shift, self.wrap = False, True # Only points within pts_mask AND the original mask are used. if (self.pts_mask is not None): pts_mask = ((self.pts_mask[np.round(self.ybeforespline).astype(int), np.round(self.xbeforespline).astype(int)]) &\ (self.mask[np.round(self.ybeforespline).astype(int), np.round(self.xbeforespline).astype(int)])) else: pts_mask = (self.mask[np.round(self.ybeforespline).astype(int), np.round(self.xbeforespline).astype(int)]) # Make the line object with Shapely self.points = np.asarray(zip(self.xbeforespline[pts_mask], self.ybeforespline[pts_mask])) line = geometry.LineString(self.points) self.xspline, self.yspline, self.fprime = None, None, None # Make the mask to use for cutdist selection ## (masking out the pixels that are closest to the head or the tail) xspine, yspine = self.xbeforespline, self.ybeforespline xgrid, ygrid = np.meshgrid(np.arange(self.filspine.shape[1]), np.arange(self.filspine.shape[0])) agrid = np.argmin(np.array([np.hypot(xgrid-xspine[i], ygrid-yspine[i]) for i in range(len(xspine))]), axis = 0) mask_agrid = (agrid != agrid.max()) & (agrid != 0) ## Plot the results ##### ## prepare vmin, vmax = np.min(self.image[self.mask]), np.nanpercentile(self.image[self.mask], 98.) xmin, xmax = np.where(self.mask)[1].min(), np.where(self.mask)[1].max() ymin, ymax = np.where(self.mask)[0].min(), np.where(self.mask)[0].max() ## plot fig=plt.figure(figsize=(10, 5)) ax=plt.gca() ax.imshow(self.image, origin='lower', cmap='gray', interpolation='none', norm = colors.LogNorm(vmin = vmin, vmax = vmax)) ax.contourf(self.mask, levels = [0., .5], colors = 'w') ax.plot(line.xy[0], line.xy[1], 'r', label='fit', lw=2, alpha=0.25) ax.set_xlim(max(0., xmin-.1*(xmax-xmin)), min(self.mask.shape[1]-.5, xmax+.1*(xmax-xmin))) ax.set_ylim(max(0., ymin-.1*(ymax-ymin)), min(self.mask.shape[0]-.5, ymax+.1*(ymax-ymin))) ax.set_xticklabels([]) ax.set_yticklabels([]) self.fig, self.ax = fig, ax # Extract the distances and the heights dictionary_cuts = {} if (self.imgscale.unit == u.pc): dictionary_cuts['distance'] = [[line.distance(geometry.Point(coord))*self.imgscale.to(u.pc).value for coord in zip(np.where(mask_agrid)[1], np.where(mask_agrid)[0])]] dictionary_cuts['profile'] = [[self.image[coord[1], coord[0]] for coord in zip(np.where(mask_agrid)[1], np.where(mask_agrid)[0])]] dictionary_cuts['plot_peaks'] = None dictionary_cuts['plot_cuts'] = None elif (self.imgscale.unit == u.pix): dictionary_cuts['distance'] = [[line.distance(geometry.Point(coord))*self.imgscale.to(u.pix).value for coord in zip(np.where(mask_agrid)[1], np.where(mask_agrid)[0])]] dictionary_cuts['profile'] = [[self.image[coord[1], coord[0]] for coord in zip(np.where(mask_agrid)[1], np.where(mask_agrid)[0])]] dictionary_cuts['plot_peaks'] = None dictionary_cuts['plot_cuts'] = None self.dictionary_cuts = dictionary_cuts xall, yall = np.concatenate(self.dictionary_cuts['distance']),\ np.concatenate(self.dictionary_cuts['profile']) ## Store the values. self.xall = xall ## in pc self.yall = yall ### the following operations, including binning and fitting, should be done on self.xall and self.yall. # Bin the profiles (if nobins=False) or stack the profiles (if nobins=True) ## This step assumes linear binning. ## If the input is the number of bins: if isinstance(bins, numbers.Number) and (bins%1 == 0): self.binning = True bins = int(round(bins)) minR, maxR = np.min(self.xall), np.max(self.xall) bins = np.linspace(minR, maxR, bins+1) masterx = bins[:-1]+.5*np.diff(bins) mastery = np.asarray([np.nanmedian(self.yall[((self.xall >= (X-.5*np.diff(bins)[0]))&\ (self.xall < (X+.5*np.diff(bins)[0])))]) for X in masterx]) # record the number of samples in each bin masternobs = np.asarray([np.sum(((self.xall >= (X-.5*np.diff(bins)[0]))&\ (self.xall < (X+.5*np.diff(bins)[0])))) for X in masterx]) self.bins = bins ## If the input is the edges of bins: elif isinstance(bins, np.ndarray) and (bins.ndim == 1): self.binning = True bins = bins masterx = bins[:-1]+.5*np.diff(bins) ## assumes linear binning. mastery = np.asarray([np.nanmedian(self.yall[((self.xall >= (X-.5*np.diff(bins)[0]))&\ (self.xall < (X+.5*np.diff(bins)[0])))]) for X in masterx]) # record the number of samples in each bin masternobs = np.asarray([np.sum(((self.xall >= (X-.5*np.diff(bins)[0]))&\ (self.xall < (X+.5*np.diff(bins)[0])))) for X in masterx]) self.bins = bins ## If the input is not bins-like. else: self.binning = False self.bins = None masterx = self.xall mastery = self.yall masternobs = None print "No binning is applied." # Return the profile sent to `fit_profile`. self.masterx = masterx self.mastery = mastery self.masternobs = masternobs # Return a dictionary to store the key setup Parameters self._params['__init__']['image'] = self.image self._params['__init__']['mask'] = self.mask ## This is the intersection between all the masks params = {'cutting': self.cutting, 'binning': self.binning, 'shift': self.shift, 'wrap': self.wrap, 'bins': self.bins, 'samp_int': self.samp_int} self._params['build_profile'] = params # Return a dictionary to store the results ## "points" are the spline points used for the cuts or ## the point collection of the original spine in the ## "no-cutting" case. ## "dictionary_cuts" are for plotting, mainly. results = {'points': self.points, 'xall': self.xall, 'yall': self.yall, 'masterx': self.masterx, 'mastery': self.mastery, 'dictionary_cuts': self.dictionary_cuts} self._results['build_profile'] = results return self
def __init__(self, r, k=3, s=None, u=None, force=None): """ Interpolate vector r's path in N-dimensional space (from M points {r}). r=array([[vec1], [vec2], [vec3], ..., [vecM]]) (r[:,i] is the trajectory of component i.) k = order of the spline (cubic (3) by default to get curvature) s = smoothing parameter ala scipy (s=0 -> points r given exactly) u = node points (for internal u-parameter) force = force field at the nodes of r (forces evaluated at each M point) Usage: fr=VectorSplineFunction(r) (now r=fr(t) with t [0,1]) tan=fr.normalized_tangent(0.2) ... The trajectory is parametrized with variable 't' ([0,1]) where t=l/L (L is the total length of the trajectory and l length so far). Internally for technical reasons the path is internally parametrized by 'u' ([0,1]), where the path is taken to be _linear_ between the points. Thus, for k=1, u is equal to t. r=r(t) and r=r(u); r(t=0)=r(u=0) and r(t=1)=r(u=1) The length along the path l(u) and l(t). t has the property dl(t)/dr=L/1=L = constant (l(t=0)=0 and l(t=1)=L). dl dl(t) dt dt l'(u) -- = -----*--=L*-- --> dt=-----du --> t(u)=l(u)/L (with correct bound.cond.) du dt du du L /b /b l(a,b)= | ds = | |dr(u)/du|du (for a,b=u) /u=a /u=a /b /b l(a,b)= | ds = | |dr(u)/du*du/dt|dt (for a,b=t) /t=a /t=a where |...| is the Eucledian norm. Energy slope E'(t)=F(t)*r'(t) """ self.k = k self.s = s self.r = r.copy() self.u = u self.N = len(r[0, :]) self.M = len(r[:, 0]) self.M2 = self.M * 10 #finer grid (used for plotting etc) self.M3 = self.M * 50 #finest grid (used in integration etc) self.w = [1] * self.M self.w[0] = 1E6 self.w[-1] = 1E6 if self.s == None: self.s = self.M - np.sqrt(2.0 * self.M) if self.u != None: self.u = u.copy() self.tck, self.u = splprep(self.r.transpose(), u=self.u, k=self.k, s=self.s, w=self.w) u, l = self.line_integral(a=0, b=1, parameter='u', full_out=True) self.length = l[-1] t = l / self.length self.u_of_t = SplineFunction(t, u, k=3, s=0) self.t_of_u = SplineFunction(u, t, k=3, s=0) self.t = [self.t_of_u(u) for u in self.u] if self.k != 1: self.linear = VectorSplineFunction(self.r, k=1, s=0, u=self.u) if force != None: self.force = VectorSplineFunction(r=force, k=self.k, s=self.s, u=self.u)
def spline(points, steps): t = np.linspace(0.0, 1.0, steps * 10) tck, u = interpolate.splprep(points.T, k=2, s=100.0) points = np.stack(interpolate.splev(t, tck, der=0), 1) return subsample(points, steps)
def __init__(self, tr_model, arch_proto): ShowBase.__init__(self) self.taskMgr.add(self.renderNtestTask, "renderNtestTask") #changing camera poses # self.taskMgr.add( self.putAxesTask, "putAxesTask" ) #draw co-ordinate axis # Misc Setup self.render.setAntialias(AntialiasAttrib.MAuto) self.setFrameRateMeter(True) self.tcolor = TerminalColors.bcolors() # # Set up Mesh (including load, position, orient, scale) self.setupMesh() self.positionMesh() # Custom Render # Important Note: self.render displays the low_res and self.scene0 is the images to retrive self.scene0 = NodePath("scene0") # cytX = copy.deepcopy( cyt ) self.low_res.reparentTo(self.render) self.cyt.reparentTo(self.scene0) self.cyt2.reparentTo(self.scene0) # # Make Buffering Window bufferProp = FrameBufferProperties().getDefault() props = WindowProperties() # props.setSize(1280, 960) props.setSize(320, 240) #@# win2 = self.graphicsEngine.makeOutput( pipe=self.pipe, name='wine1', sort=-1, fb_prop=bufferProp, win_prop=props, flags=GraphicsPipe.BFRequireWindow) #flags=GraphicsPipe.BFRefuseWindow) # self.window = win2#self.win #dr.getWindow() self.win2 = win2 # self.win2.setupCopyTexture() # Adopted from : https://www.panda3d.org/forums/viewtopic.php?t=3880 # # Set Multiple Cameras self.cameraList = [] # for i in range(4*4): for i in range(1 * 1): #@# print 'Create camera#', i self.cameraList.append(self.customCamera(str(i))) # Disable default camera # dr = self.camNode.getDisplayRegion(0) # dr.setActive(0) # # Set Display Regions (4x4) dr_list = self.customDisplayRegion(1, 1) # # Setup each camera for i in range(len(dr_list)): dr_list[i].setCamera(self.cameraList[i]) # # Set buffered Queues (to hold rendered images and their positions) # each queue element will be an RGB image of size 240x320x3 self.q_imStack = Queue.Queue() self.q_labelStack = Queue.Queue() # Caffe # Caffe init was here, now removed # Setup TensorFLow #TODO Get tensorflow model file info from command line (need to edit constructor) puf_obj = puf.PlutoFlow(trainable_on_device='/gpu:0') # Setup placeholders (need just 1 placeholder, ie. input image) #TODO Try `1` instead of `None` self.tf_x = tf.placeholder('float', [None, 240, 320, 3], name='x') # Set the ResNet inference_op with tf.device('/gpu:0'): self.tf_infer_op = puf_obj.resnet50_inference(self.tf_x, is_training=False) # Print all Trainable Variables var_list = tf.trainable_variables() print '--Trainable Variables--', 'length= ', len(var_list) total_n_nums = [] for vr in var_list: shape = vr.get_shape().as_list() n_nums = np.prod(shape) total_n_nums.append(n_nums) print self.tcolor.OKGREEN, vr.name, shape, n_nums, self.tcolor.ENDC print self.tcolor.OKGREEN, 'Total Trainable Params (floats): ', sum( total_n_nums) print 'Not counting the pop_mean and pop_varn as these were set to be non trainable', self.tcolor.ENDC # Fire up the TensorFlow-Session self.tensorflow_session = tf.Session(config=tf.ConfigProto( log_device_placement=True, allow_soft_placement=True)) # Load trainables' values from file self.tensorflow_saver = tf.train.Saver() restore_file_name = 'tf.models/model-165000' print 'Loading Model File : ', restore_file_name self.tensorflow_saver.restore(self.tensorflow_session, restore_file_name) print self.tcolor.OKGREEN, 'Loaded file : ', restore_file_name, self.tcolor.ENDC # store loss at each frame in the trajectory self.loss_ary = [] self.gt_ary = [] self.pred_ary = [] # # Setting up Splines # Note: Start interpolation at 50, # self.pathGen = PathMaker.PathMaker().path_flat_h # self.pathGen = PathMaker.PathMaker().path_smallM # self.pathGen = PathMaker.PathMaker().path_yaw_only # self.pathGen = PathMaker.PathMaker().path_bigM # self.pathGen = PathMaker.PathMaker().path_flat_spiral # self.pathGen = PathMaker.PathMaker().path_helix # self.pathGen = PathMaker.PathMaker().path_like_real self.pathGen = PathMaker.PathMaker().path_like_real2 t, X = self.pathGen() self.spl_tck, self.spl_u = interpolate.splprep(X.T, u=t.T, s=0.0, per=1)
from scipy import interpolate import matplotlib.pyplot as plt import numpy as np def spline(conf, x): a, b, c, d = conf y = a + b * (x) + c * (x**2) + d * (x**3) return y x = np.array([0, 1, 2, 3, 9, 8, 6, 7, 8, 9]) y = np.array([5, 2, 3, 4, 9, 3, 4, 0, 2, 1]) deg = 5 point = 100 tck, u = interpolate.splprep([x, y], k=deg, s=0) u = np.linspace(0, 1, num=point, endpoint=True) spline = interpolate.splev(u, tck) #-----drawing----- plt.figure(1) plt.plot(x, y, "go") plt.plot(spline[0], spline[1], "r-") plt.pause(0.02) plt.show()
[+9.7652e-02, +1.6437e-02, -3.7717e-02], [+9.9588e-02, +8.7955e-03, -3.7621e-02], [+1.0067e-01, -1.2508e-03, -3.7747e-02], [+1.0001e-01, -9.6434e-03, -3.7707e-02], [+9.7509e-02, -1.8110e-02, -3.7714e-02], [+9.4712e-02, -2.3489e-02, -3.7709e-02], [+9.2555e-02, -2.6809e-02, -3.7705e-02], [+8.8488e-02, -3.0517e-02, -3.7705e-02], [+8.5550e-02, -3.2219e-02, -3.7693e-02], [+8.0712e-02, -3.4272e-02, -3.7685e-02], [+7.3737e-02, -3.6850e-02, -3.7739e-02], [+6.3462e-02, -3.7464e-02, -3.7913e-02], [+5.7212e-02, -3.7632e-02, -3.7902e-02], [+5.0136e-02, -3.7304e-02, -3.7890e-02]]) print leftFoot tck, u = splprep(leftFoot.T, u=None, s=0.0) print tck u_new = np.linspace(u.min(), u.max(), 1000) x_new, y_new, z_new = splev(u_new, tck, der=0) writeSpline("left_foot_contour", leftFoot, tck) # ----------------- Working on rightfoot rightFoot = leftFoot rightFoot[:, 1] *= -1 tck, u = splprep(rightFoot.T, u=None, s=0.0) u_new = np.linspace(u.min(), u.max(), 1000) x_new, y_new, z_new = splev(u_new, tck, der=0)
def plot_(self): self.axes.cla() data = [] knots = [] knots_uniform = [] control_point_list = [] control_point_list_uniform = [] with open("data.txt", "rt") as file: for line in file: data.append(list(map(int, line.strip().split(" ")))) with open("output_from_ui.txt", "rt") as output: for idx, line in enumerate(output.readlines()): line = line.strip('\n') if idx == 0: k = int(line.strip('\n')) elif idx == 1: num_cpoints = int(line.strip('\n')) elif idx == 3: knot = line.strip('\n').split(' ') knot.pop() knot = list(map(float, knot)) elif idx >= 5: control_point = line.strip("\n").split(' ') control_point = list(map(float, control_point)) control_point_list.append(control_point) control_point_list = np.array(control_point_list) control_point_x = control_point_list[:, 0] control_point_y = control_point_list[:, 1] data = np.array(data) x = data[:, 0] y = data[:, 1] tck, u = interpolate.splprep([x, y], s=0, k=3) unew = np.arange(0, 1, 0.01) u = np.linspace(0, 1, num=100, endpoint=True) out = interpolate.splev(u, tck) inter_x, inter_y = np.array( interpolate.splev(unew, (knot, control_point_list.T, 3))) self.axes.set_title( 'CE7453 Numerical Algorithms B-Spline interpolation', fontsize=15) self.axes.set_xlabel('x', horizontalalignment='center', fontsize=15) self.axes.set_ylabel('y', horizontalalignment='center', fontsize=15) # plt.plot(out[0], out[1], 'y',color='blue',label='Degree 3 Scipy method') self.axes.plot(control_point_x, control_point_y, color='red', marker='.', linestyle='--', label='Control Polygon(Chord)', markersize=6) self.axes.plot(inter_x, inter_y, linestyle='-', color='green', label='Degree 3 B-Spline Curve(Chord)') self.axes.plot(x, y, 'ko', label='Data Points', markersize=4) self.axes.legend() # ax.tight_layout() # plt.savefig('Comparison.png') # plt.show() self.canvas.draw()
def plot_mep(atom_pos, mep_energies, image_name=None, filename=None, show=None, plot=1, fitplot_args=None, style_dic=None): """ Used for NEB method atom_pos (list) - xcart positions of diffusing atom along the path, mep_energies (list) - full energies of the system corresponding to atom_pos image_name - deprecated, use filename style_dic - dictionary with styles 'p' - style of points 'l' - style of labels 'label' - label of points plot - if plot or not """ from analysis import determine_barrier if filename is None: filename = image_name #Create if not style_dic: style_dic = {'p': 'ro', 'l': 'b-', 'label': None} if not fitplot_args: fitplot_args = {} # print atom_pos = np.array(atom_pos) data = atom_pos.T # tck, u = interpolate.splprep( data) #now we get all the knots and info about the interpolated spline path = interpolate.splev( np.linspace(0, 1, 500), tck ) #increase the resolution by increasing the spacing, 500 in this example path = np.array(path) diffs = np.diff(path.T, axis=0) path_length = np.linalg.norm(diffs, axis=1).sum() mep_pos = np.array([p * path_length for p in u]) if 0: #plot the path in 3d fig = plt.figure() ax = Axes3D(fig) ax.plot(data[0], data[1], data[2], label='originalpoints', lw=2, c='Dodgerblue') ax.plot(path[0], path[1], path[2], label='fit', lw=2, c='red') ax.legend() plt.show() # if '_mep' not in calc: calc['_mep'] = [ atom_pos, mep_energies ] # just save in temp list to use the results in neb_wrapper if hasattr(header, 'plot_mep_invert') and header.plot_mep_invert: # for vacancy mep_energies = list(reversed(mep_energies)) mine = min(mep_energies) eners = np.array(mep_energies) - mine xnew = np.linspace(0, path_length, 1000) # ynew = spline(mep_pos, eners, xnew ) # spl = CubicSpline(mep_pos, eners, bc_type = 'natural' ) # second-derivative zero # spl = CubicSpline(mep_pos, eners,) # # spl = CubicSpline(mep_pos, eners, bc_type = 'periodic') # spl = CubicSpline(mep_pos, eners, bc_type = 'clamped' ) #first derivative zero spl = scipy.interpolate.PchipInterpolator(mep_pos, eners) ynew = spl(xnew) diff_barrier = determine_barrier(mep_pos, eners) print_and_log('plot_mep(): Diffusion barrier =', round(diff_barrier, 2), ' eV', imp='y') # sys.exit() # print() if 'fig_format' not in fitplot_args: fitplot_args['fig_format'] = 'eps' if 'xlim' not in fitplot_args: fitplot_args['xlim'] = (-0.05, None) if 'xlabel' not in fitplot_args: fitplot_args['xlabel'] = 'Reaction coordinate ($\AA$)' if 'ylabel' not in fitplot_args: fitplot_args['ylabel'] = 'Energy (eV)' path2saved = None if plot: # print(image_name) path2saved = fit_and_plot(orig=(mep_pos, eners, style_dic['p'], style_dic['label']), spline=(xnew, ynew, style_dic['l'], None), image_name=image_name, filename=filename, show=show, **fitplot_args) # print(image_name, filename) if 0: with open(filename + '.txt', 'w') as f: f.write('DFT points:\n') for m, e in zip(mep_pos, eners): f.write('{:10.5f}, {:10.5f} \n'.format(m, e)) f.write('Spline:\n') for m, e in zip(xnew, ynew): f.write('{:10.5f}, {:10.5f} \n'.format(m, e)) return path2saved, diff_barrier
def main(argv): indexPhaseX0 = 1 numberOfPhasesForX0 = 40 i0 = 10 nSPLUNew = 1000 vMin = -90 vMax = 15 nMin = -0.1 nMax = 0.8 integrationFilename = "results/integrationINapIKFig10_1.npz" isochronFilename = \ "results/isochronINapIKFig10_1Phase%02dOver%d.npz"%(indexPhaseX0, numberOfPhasesForX0) figFilename = \ "figures/isochronINapIKFig10_1Phase%02dOver%d.eps"%(indexPhaseX0, numberOfPhasesForX0) results = np.load(integrationFilename) times = results["times"] ys = results["ys"] spikeIndices = getPeakIndices(v=ys[0, :]) spikeTimes = times[spikeIndices] times = np.delete(times, np.arange(0, spikeIndices[0])) times = times - times[0] ys = np.delete(ys, np.arange(0, spikeIndices[0]), axis=1) spikeIndices = spikeIndices - spikeIndices[0] spikeTimes = spikeTimes - spikeTimes[0] period = spikeTimes[1] - spikeTimes[0] phases = times % period phasesForX0 = np.arange(0, period, period / numberOfPhasesForX0) indicesBtwFirstAndSecondSpike = np.arange(0, spikeIndices[1]) phasesToSearch = phases[indicesBtwFirstAndSecondSpike] indicesPhasesForX0 = np.empty(len(phasesForX0), dtype=np.int64) for i in xrange(len(phasesForX0)): phaseForX0 = phasesForX0[i] indicesPhasesForX0[i] = np.argmin(np.abs(phasesToSearch - phaseForX0)) x0 = ys[:, indicesPhasesForX0[indexPhaseX0]] results = np.load(isochronFilename) isochron = results["isochron"] validIndices = np.logical_and( np.logical_and(vMin <= isochron[0, :], isochron[0, :] <= vMax), np.logical_and(nMin <= isochron[1, :], isochron[1, :] <= nMax)).nonzero()[0] isochron = isochron[:, validIndices] sortedIsochron = sortIsochron(isochron=isochron) splTck, splU = splprep(sortedIsochron, s=5.0) splUNew = np.linspace(splU.min(), splU.max(), nSPLUNew) splXInter, splYInter = splev(splUNew, splTck, der=0) # plt.figure() # plotHighThresholdINapIKVectorField(i=i0) plt.plot(ys[0, :], ys[1, :], label="limit cycle attractor") # pdb.set_trace() plt.annotate("x0", xy=x0, color="red", size=14) def i(t): return (i0) model = INapIKModel.getHighThresholdInstance(i=i) plotINapIKNullclines(i=i0, eL=model._eL, gL=model._gL, eNa=model._eNa, gNa=model._gNa, eK=model._eK, gK=model._gK, mVOneHalf=model._mVOneHalf, mK=model._mK, nVOneHalf=model._nVOneHalf, nK=model._nK) plt.plot(isochron[0, :], isochron[1, :], marker="o", color="red", linestyle="None") plt.plot(splXInter, splYInter, color="gray", linestyle="solid") plt.legend(loc="upper left") plt.xlabel("Voltage (mv)") plt.ylabel("K activation variable, n") plt.xlim((-90, 15)) plt.ylim((-0.1, 0.8)) plt.savefig(figFilename) plt.show() pdb.set_trace()
print("luego es", type(axx)) ''' LO IDEAL axx = [[253, 212], [338, 247], [242, 347], [226, 417], [247, 513], [245, 552], [260, 640], [209, 725], [366, 809], [311, 905], [232, 958]] print("primero es",type(axx)) axx = np.asarray(axx) print("luego es",type(axx)) ''' print(axx) axx = np.array(axx.T) tck, u = interpolate.splprep(axx, s=0) unew = np.arange(0, 1.01, 0.01) out = interpolate.splev(unew, tck) print(type(axx)) print(axx) '''plt.xlim(0, ancho) plt.ylim(0, altura2) plt.xlabel('X Axis limit is (0,7)') plt.ylabel('Y Axis limit is (-0.5,4)')''' img = plt.imread("ex5.jpg") fig, ax = plt.subplots() ax.imshow(img) #plt.plot(out[0], out[1], color='orange') plt.plot(axx[0, :], axx[1, :], 'ob')
for j in range(len(d1)): for i in range(len(data)): if str(data[i]) == str(d1[j]): dic.append(i) break dic.sort() dic.append(-1) return data[dic] for fi in range(info[0]): dd = d[fi] data = dd data = uniqueArray(dd) ##spline to make curve smooth tck, u = splprep([data[:, 0], data[:, 1]], u=None, s=0, per=1) unew = np.linspace(u.min(), u.max(), num=2000) data = np.array(splev(unew, tck, der=0)) data = data.transpose() data = np.floor(data * 1000 * (900 / 173.0)) ## 173 is the size of image in microview data = data.astype(int) x, y = np.where(data > (image_XY_size)) data[x, y] = image_XY_size x, y = np.where(data < 0) data[x, y] = 0 onlycontour[fi, image_XY_size - data[:, 1], data[:, 0]] = 255
def resample_contour(contour, n=51): tck, _ = splprep([contour[0], contour[1], contour[2]], s=0, k=1) new_points = splev(np.linspace(0, 1, n), tck) return new_points
ax[0].plot(frames,z, label='z') ax[0].legend() ax[1].plot(frames, qx, label='qx') ax[1].plot(frames, qy, label='qy') ax[1].plot(frames, qz, label='qz') ax[1].plot(frames, qw, label='qw') ax[1].legend() ax[0].grid(True) ax[1].grid(True) plt.show() fig.savefig("trajectories/test-right-arm-motion.png") # Smoothed tck, u = interpolate.splprep([frames, x], s=0.02) #create interpolated lists of points frameXnew, xnew = interpolate.splev(u,tck) tck, u = interpolate.splprep([frames, y], s=0.02) #create interpolated lists of points frameYnew, ynew = interpolate.splev(u,tck) tck, u = interpolate.splprep([frames, z], s=0.02) #create interpolated lists of points frameZnew, znew = interpolate.splev(u,tck) tck, u = interpolate.splprep([frames, qx], s=0.02) #create interpolated lists of points frameqxnew, qxnew = interpolate.splev(u,tck) tck, u = interpolate.splprep([frames, qy], s=0.02)
print(len(coordinates_list)) # copy the coordinates list and remove x percent of entries coordinates_list_less = coordinates_list[:] # sort the lists by timestamps and unzip the triples into three seperate lists coordinates_list.sort() coordinates_list = list(zip(*coordinates_list)) timestamp_list = coordinates_list[0] latitude_list = coordinates_list[1] longitude_list = coordinates_list[2] # plt.scatter(longitude_list, latitude_list, color='blue', label='given') tck, u = interpolate.splprep([longitude_list, latitude_list], s=0.0) x_i, y_i = interpolate.splev(np.linspace(0, 1, 100), tck) # plt.plot(x_i, y_i, color='red', label='all points') # ----------------------------------------------------------------------------- # copy the coordinates list and remove x percent of entries remove_random_entries(coordinates_list_less, 0.99) coordinates_list_less.sort() print(len(coordinates_list_less)) coordinates_list_gap = [] for j in range(len(coordinates_list_less)): if not 75 < j < 100: coordinates_list_gap.append(coordinates_list_less[j])
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.001, f3=5): """ Mimic a hand-drawn line from (x, y) data Source: http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/ Parameters ---------- x, y : array_like arrays to be modified xlim, ylim : data range the assumed plot range for the modification. If not specified, they will be guessed from the data mag : float magnitude of distortions f1, f2, f3 : int, float, int filtering parameters. f1 gives the size of the window, f2 gives the high-frequency cutoff, f3 gives the size of the filter Returns ------- x, y : ndarrays The modified lines """ x = np.asarray(x) y = np.asarray(y) # get limits for rescaling if xlim is None: xlim = (x.min(), x.max()) if ylim is None: ylim = (y.min(), y.max()) if xlim[1] == xlim[0]: xlim = ylim if ylim[1] == ylim[0]: ylim = xlim # scale the data x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0]) y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0]) # compute the total distance along the path dx = x_scaled[1:] - x_scaled[:-1] dy = y_scaled[1:] - y_scaled[:-1] dist_tot = np.sum(np.sqrt(dx * dx + dy * dy)) # number of interpolated points is proportional to the distance Nu = int(200 * dist_tot) u = np.arange(-1, Nu + 1) * 1. / (Nu - 1) # interpolate curve at sampled points k = min(3, len(x) - 1) res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k) x_int, y_int = interpolate.splev(u, res[0]) # we'll perturb perpendicular to the drawn line dx = x_int[2:] - x_int[:-2] dy = y_int[2:] - y_int[:-2] dist = np.sqrt(dx * dx + dy * dy) # create a filtered perturbation coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2) b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3)) response = signal.lfilter(b, 1, coeffs) x_int[1:-1] += response * dy / dist y_int[1:-1] += response * dx / dist # un-scale data x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0] y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0] return x_int, y_int
def make_plot(fname, seed=None): n_quadrants = 8 out_shape = 256 # Define the arc (presumably ezdxf uses a similar convention) centerx, centery = 0, 0 radius = 1 numsegments = 1000 start_angles = np.linspace(0, 360 * ((n_quadrants - 1) / n_quadrants), n_quadrants) inc_angle = 360 / n_quadrants polys = [] for start_angle in start_angles: end_angle = start_angle + inc_angle # The coordinates of the arc theta = np.radians(np.linspace(start_angle, end_angle, numsegments)) x = centerx + radius * np.cos(theta) y = centery + radius * np.sin(theta) xy = np.array((x, y)) center = np.array((centerx, centery))[..., np.newaxis] pts = np.concatenate((center, xy), axis=1) poly = geom.Polygon([(pts[0, i], pts[1, i]) for i in range(pts.shape[1])]) polys.append(poly) fig, ax = plt.subplots(1, 2) colors = [] for poly in polys: line = ax[0].plot(*poly.exterior.xy, '--') colors.append(line[0].get_color()) # make random point on each quadrants if (seed is not None): np.random.seed(seed) rho_range = np.linspace(0.2, 1, 100) theta_range = np.linspace(0, 1, 100) # pts = [(np.random.choice(rho_range), np.random.choice(theta_range)) # for _ in range(n_quadrants)] pts = [(np.random.choice(rho_range), 0.5) for _ in range(n_quadrants)] pts_cart = [] for p, start_angle, color in zip(pts, start_angles, colors): rho = p[0] theta = np.radians(start_angle + inc_angle * p[1]) R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) x, y = rho, 0 p = np.array((x, y))[..., np.newaxis] p = np.dot(R, p) pts_cart.append(p) ax[0].plot(p[0], p[1], 'o', color=color) # fit splines to x=f(u) and y=g(u), treating both as periodic. also note that s=0 # is needed in order to force the spline fit to pass through all the input points. pts_cart = np.array(pts_cart)[..., 0] pts_cart = np.concatenate((pts_cart, pts_cart[0, ...][np.newaxis, ...]), axis=0) import pdb pdb.set_trace() tck, u = interpolate.splprep([pts_cart[:, 0], pts_cart[:, 1]], s=0, per=True) # evaluate the spline fits for 1000 evenly spaced distance values xi, yi = interpolate.splev(np.linspace(0, 1, 1000), tck) ax[0].plot(xi, yi, 'k') ax[0].set_aspect('equal') ax[0].grid() # draw full shape xi = ((xi + 1) / 2) * out_shape yi = -yi yi = ((yi + 1) / 2) * out_shape rr, cc = draw.polygon(yi, xi, (out_shape, out_shape)) img = np.zeros((out_shape, out_shape), dtype=bool) img[rr, cc] = 1 ax[1].imshow(img) fig.savefig(fname)
def upsample(self, samp_values, file_size, first_samp=0, last_samp=0, interp_tech='pchip'): if interp_tech is 'step': beg_pad = int((self.noverlap) / 2) up_version = np.zeros((file_size)) up_version[:beg_pad] = first_samp up_version[beg_pad:beg_pad+self.frame_jump*self.nframes] = \ np.repeat(samp_values, self.frame_jump) up_version[beg_pad + self.frame_jump * self.nframes:] = last_samp elif interp_tech is 'pchip' or 'spline': if np.amin(samp_values) > 0: if interp_tech is 'pchip': up_version = scipy_interp.pchip( self.frames_pos, samp_values)(range(file_size)) elif interp_tech is 'spline': tck, u_original = scipy_interp.splprep( [self.frames_pos, samp_values], u=self.frames_pos) up_version = scipy_interp.splev(range(file_size), tck)[1] else: beg_pad = int((self.noverlap) / 2) up_version = np.zeros((file_size)) up_version[:beg_pad] = first_samp voiced_frames = np.nonzero(samp_values)[0] edges = np.nonzero( (voiced_frames[1:] - voiced_frames[:-1]) > 1)[0] edges = np.insert(edges, len(edges), len(voiced_frames) - 1) voiced_frames = np.split(voiced_frames, edges + 1)[:-1] for frame in voiced_frames: up_interval = self.frames_pos[frame] tot_interval = np.arange( int(up_interval[0] - (self.frame_jump / 2)), int(up_interval[-1] + (self.frame_jump / 2))) if interp_tech is 'pchip' and len(frame) > 2: up_version[tot_interval] = scipy_interp.pchip( up_interval, samp_values[frame])(tot_interval) elif interp_tech is 'spline' and len(frame) > 3: tck, u_original = scipy_interp.splprep( [up_interval, samp_values[frame]], u=up_interval) up_version[tot_interval] = scipy_interp.splev( tot_interval, tck)[1] # MD: In case len(frame)==2, above methods fail. #Use linear interpolation instead. elif len(frame) > 1: up_version[tot_interval] = scipy_interp.interp1d( up_interval, samp_values[frame], fill_value='extrapolate')(tot_interval) elif len(frame) == 1: up_version[tot_interval] = samp_values[frame] up_version[beg_pad + self.frame_jump * self.nframes:] = last_samp return up_version
def __init__(self, a_Point): x = [p[0] for p in a_Point] y = [p[1] for p in a_Point] z = [p[2] for p in a_Point] self.control_polyline = Polyline3D(a_Point) self.tck, self.u = interpolate.splprep([x, y, z], s=3)
def interpolatePoints(xs,ys,length): ''' Interpolates points along an x-y (or lat-lon) line into a regulary spaced array. Resamples the number of points based on length of line. INPUT: xs - [long0,long1,long2....] ys - [lat0,lat1,lat2...] length - float OUTPUT: Resampled xs - [newlon0,newlon1....] Resampled ys - [newlat0,newlat1....] TODO: Warnings: Uses a global variable called 'interpolation', which is the size of the interpolation in degrees ''' from scipy.interpolate import splprep, splev #Resolution of interpolation (in degrees) if 'interpolation' in globals(): interDist=interpolation else: print 'No global interpolation, setting to 1 degree' interDist=1 #The interpolation does not do well if the interpolation is over the +-180 #So check if we need to convert it to 0-360 #If the difference in lon values is very large, I assume interpolation fails lonDiff=numpy.diff(xs) #Check if each difference is bigger than 180 degrees (arbitrary) if any(abs(x) > 180.0 for x in lonDiff): tempxs=convert180to360(xs) else: tempxs=xs #Plot the original data set #print max(abs(lonDiff)) #plt.scatter(tempxs,ys,c='b') #Interpolate the points tckp,u = splprep([tempxs,ys],k=1,s=0) u = numpy.arange(0,1,interDist/length) [xnew,ynew] = splev(u,tckp) #Now if it was converted, change it back to -180 to 180 if any(abs(x) > 180.0 for x in lonDiff): tempxnew=convert360to180(xnew) else: tempxnew=xnew #Plot the interpolated one #plt.scatter(tempxnew,ynew,s=20,c='r',marker='x') #TODO: if you need to interpolate data also #f = interpolate.interp2d(xs, ys, zs, kind='linear') #znew = f(xnew, ynew) #plt.show() return [tempxnew,ynew]
def load_and_process(path): # LOAD MAP map_ = (np.load(path) * 255).astype(np.uint8) map_visu = np.zeros((map_.shape[0], map_.shape[1], 3), dtype=np.uint8) # MOVE TO CORRECT PROJECTION map_visu = cv2.rotate(map_visu, cv2.ROTATE_90_COUNTERCLOCKWISE) map_ = cv2.rotate(map_, cv2.ROTATE_90_COUNTERCLOCKWISE) map_visu = cv2.flip(map_visu, 0) map_ = cv2.flip(map_, 0) # CREATE KERNEL, AND INFLATE IMAGE (REMOVE HOLES) kernel = np.ones((3, 3), np.uint8) map_dil_k3_t5 = cv2.dilate(map_, kernel, iterations=5) inv_dil_map = ((map_dil_k3_t5 == 0) * 255).astype(np.uint8) # REMOVE THE CENTER ISLAND blobs = cv2.connectedComponents(inv_dil_map) inside = ((blobs[1] == 2) * 255).astype(np.uint8) inside_inv = ((inside == 0) * 255).astype(np.uint8) # GET THE INSIDE OF THE LAKE blobs_inside = cv2.connectedComponents(inside_inv) clean_inside = (((blobs_inside[1] == 1) == 0) * 255).astype(np.uint8) # COMPUTE CONTOUR (ON INFLATED MAP) contours, hierarchy = cv2.findContours(clean_inside, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) map_visu = cv2.drawContours(map_visu, contours, 0, (0, 255, 0), 8) map_visu[:, :, 0] = map_ # COMPUTE CONTOUR WITH COMPENSATION FOR INFLATION OFFSET fine_contour_map = (map_.copy() * 0).astype(np.uint8) fine_contour_map = cv2.drawContours(fine_contour_map, contours, 0, (255), 8) fine_contour, fine_h = cv2.findContours(fine_contour_map, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) map_visu = cv2.drawContours(map_visu, fine_contour, 0, (0, 0, 255), 1) # COMPUTE HARD-SPAWN AREA hardspawn_contour_map = (map_.copy() * 0).astype(np.uint8) hardspawn_contour_map = cv2.drawContours(hardspawn_contour_map, fine_contour, 0, (255), 110) hardspawn_contour, fine_h = cv2.findContours(hardspawn_contour_map, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) map_visu = cv2.drawContours(map_visu, hardspawn_contour, 1, (235, 52, 210), 40) # COMPUTE PERFECT NAVIGATION DISTANCE distance_contour_map = (map_.copy() * 0).astype(np.uint8) ditance_contour_map = cv2.drawContours(distance_contour_map, fine_contour, 0, (255), 180) distance_contour, fine_h = cv2.findContours(distance_contour_map, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) map_visu = cv2.drawContours(map_visu, distance_contour, 1, (255, 204, 0), 30) # MAKE SPAWN MAP spawn_area = (map_.copy() * 0).astype(np.uint8) spawn_area = cv2.drawContours(spawn_area, hardspawn_contour, 1, (255), 40) spawn_area = cv2.drawContours(spawn_area, distance_contour, 1, (255), 30) # COMPUTE DISTANCE OF ALL PIXELS TO THE PERFECT NAVIGATION LINE map_optimal_nav = np.ones_like(map_, dtype=np.uint8) * 255 map_optimal_nav = cv2.drawContours(map_optimal_nav, distance_contour, 1, (0), 1) map_dist2optimal_nav = cv2.distanceTransform(map_optimal_nav, cv2.DIST_L2, 5) # MOVE TO CORRECT PROJECTION spawn_area = cv2.rotate(spawn_area, cv2.ROTATE_90_COUNTERCLOCKWISE) map_dist2optimal_nav = cv2.rotate(map_dist2optimal_nav, cv2.ROTATE_90_COUNTERCLOCKWISE) spawn_area = cv2.flip(spawn_area, 0) map_dist2optimal_nav = cv2.flip(map_dist2optimal_nav, 0) # TAKES NAVIGATION LINE AND FITS SMOOTH SPLNE x = distance_contour[1][:, 0, 0] y = distance_contour[1][:, 0, 1] tck, u = interpolate.splprep([x, y], s=0) unew = np.arange(0, 1.001, 0.001) out = interpolate.splev(unew, tck) sx = out[0] sy = out[1] error = 1 t = np.arange(sx.shape[0]) std = error * np.ones_like(t) t2 = np.arange(sx.shape[0] * 4) / 4 fx = UnivariateSpline(t, sx, k=4, w=1 / np.sqrt(std)) fy = UnivariateSpline(t, sy, k=4, w=1 / np.sqrt(std)) # COMPUTE CURVATURE FROM SPLINE sx2 = fx(t2) sy2 = fy(t2) x1 = fx.derivative(1)(t2) x2 = fx.derivative(2)(t2) y1 = fy.derivative(1)(t2) y2 = fy.derivative(2)(t2) curvature = (x1 * y2 - y1 * x2) / np.power(x1**2 + y1**2, 1.5) #print(sx2.shape[0]) #print(sy2.shape) # COMPUTE RUNNING CURVATURE max_speed = 1.5 #ms ep_length = 60 #seconds lake_length = 1400. #meters window_size = int(0.25 * max_speed * ep_length / (lake_length / sx2.shape[0])) running_curvature = np.zeros_like(curvature) for i in range(sx2.shape[0]): if i < sx2.shape[0] - window_size: running_curvature[i] = np.mean(np.abs(curvature[i:i + window_size])) else: running_curvature[i] = np.mean( np.abs( (list(curvature[i:curvature.shape[0]]) + list(curvature[0:i - curvature.shape[0] + window_size])))) # DISCTRETIZE THE SHORE LINE x_shore = fine_contour[0][:, 0, 0] y_shore = fine_contour[0][:, 0, 1] tck, u = interpolate.splprep([x_shore, y_shore], s=0) unew = np.arange(0, 1.0001, 0.0001) out_shore = interpolate.splev(unew, tck) sx_shore = out_shore[0] sy_shore = out_shore[1] # APPLY POLYNOMIAL FILTER fsx_shore = savgol_filter(sx_shore, 401, 2) fsy_shore = savgol_filter(sy_shore, 401, 2) # COMPUTE DISTANCE (TO CHANGE BASED ON CEDRIC'S FEEDBACK) diff_shore = (sx_shore - fsx_shore)**2 + (sy_shore - fsy_shore)**2 # WINDOWED STANDARD DEVIATION diff_window = np.zeros_like(diff_shore) for i in range(sx_shore.shape[0]): if (i > 50) and (sx_shore.shape[0] > i + 50): diff_window[i] = np.std(diff_shore[i - 50:i + 50]) elif i < 50: diff_window[i] = np.std( list(diff_shore[0:i + 50]) + list(diff_shore[-(sx_shore.shape[0] - i + 50):])) else: diff_window[i] = np.std( list(diff_shore[-(i - 50):]) + list(diff_shore[:sx_shore.shape[0] - i + 50])) rz = np.arctan2(y1, x1) nav_line_pose = np.vstack((sx2, sy2, rz)) running_curvature_pose = np.vstack((sx2, sy2, running_curvature)) spawn_poses = np.argwhere(spawn_area[:, :] == 255) # sample_pose_curvature dictionary print("Creating dictionary for sample_pose_curvature..") dict_curv_idx = {} curvature_array = running_curvature_pose[2, :] for idx in range(len(curvature_array)): key = curvature_array[idx] if key in dict_curv_idx.keys(): dict_curv_idx[key].append(idx) else: dict_curv_idx[key] = [idx] print("Done") # sample_hard_spawn dictionary print("Creating dictionary for sample_hard_spawn..") dict_diff_idx = {} print("Done") return (nav_line_pose, running_curvature_pose, spawn_poses, map_dist2optimal_nav, dict_curv_idx, dict_diff_idx)
def update_line(self, calc_inds=True): # update line while creating of editing... if not self.line: self.lineAux, = self.ax.plot(self.x, self.y, 's', mfc='w', mec='r', picker=5, zorder=1001, ms=4) self.line, = self.ax.plot(self.x, self.y, 'r', marker='None', lw=0.5, picker=2, zorder=1000, alpha=.7) else: n = np.unique((self.x + self.y * 1j)).size if self.type == 'spline' and n > 3: from scipy import interpolate if n < self.x.size: tck, u = interpolate.splprep([self.x[:-1], self.y[:-1]], s=0) else: tck, u = interpolate.splprep([self.x, self.y], s=0) xnew = np.arange(0, 1.01, 0.01) xnew = np.linspace(0, 1, 10 * (n - 1)) out = interpolate.splev(xnew, tck, der=0) xnew, ynew = out[0], out[1] else: # broken line xnew = np.array((), 'f') ynew = np.array((), 'f') for i in range(self.x.size - 1): if self.x[i + 1] != self.x[i]: m = (self.y[i + 1] - self.y[i]) / (self.x[i + 1] - self.x[i]) xx = np.linspace(self.x[i], self.x[i + 1], 10) yy = m * (xx - self.x[i]) + self.y[i] else: yy = np.linspace(self.y[i], self.y[i + 1], 10) xx = yy * 0 + self.x[i] xnew = np.append(xnew, xx[:-1]) ynew = np.append(ynew, yy[:-1]) # add last: xnew = np.append(xnew, xx[-1]) ynew = np.append(ynew, yy[-1]) if calc_inds: # find segment number for each point: iaux = [] for i in range(self.x.size): d = (xnew - self.x[i])**2 + (ynew - self.y[i])**2 j = np.where(d == d.min())[0][0] iaux += [j] # set segment number: self.Iaux = np.zeros(xnew.shape, 'i') i = -1 for i in range(len(iaux) - 2): self.Iaux[iaux[i]:iaux[i + 1]] = i self.Iaux[iaux[i + 1]:] = i + 1 # update lines: self.line.set_data(xnew, ynew) self.lineAux.set_data(self.x, self.y) # aux points stored in self.x, self.y # also store in xx and yy the line points if spline, and a copy of x,y if broken line if self.type == 'spline': self.xx = self.line.get_xdata() self.yy = self.line.get_ydata() else: self.xx, self.yy = self.x, self.y if self.axis: self.ax.axis(self.axis) self.figure.canvas.draw()