def get_error(self, data, model): (center, diameter, angle) = model dis = npy.hypot(data[:, 0] - center[0], data[:, 1] - center[1]) theta = npy.arctan2(data[:, 1] - center[1], data[:, 0] - center[0]) - angle ellipse_radius = npy.hypot(diameter[0] / 2 * npy.cos(theta), diameter[1] / 2 * npy.sin(theta)) return npy.abs(dis - ellipse_radius)
def _dots_per_unit(self, units): """ Return a scale factor for converting from units to pixels """ ax = self.ax if units in ("x", "y", "xy"): if units == "x": dx0 = ax.viewLim.width dx1 = ax.bbox.width elif units == "y": dx0 = ax.viewLim.height dx1 = ax.bbox.height else: # 'xy' is assumed dxx0 = ax.viewLim.width dxx1 = ax.bbox.width dyy0 = ax.viewLim.height dyy1 = ax.bbox.height dx1 = np.hypot(dxx1, dyy1) dx0 = np.hypot(dxx0, dyy0) dx = dx1 / dx0 else: if units == "width": dx = ax.bbox.width elif units == "height": dx = ax.bbox.height elif units == "dots": dx = 1.0 elif units == "inches": dx = ax.figure.dpi else: raise ValueError("unrecognized units") return dx
def zoomToAll(self): if self.m_nImgs < 1: return posA=N.array(self.m_imgPosArr) sizA=N.array(self.m_imgSizeArr) a=N.array([N.minimum.reduce(posA), N.maximum.reduce(posA+sizA), ]) from .all import U MC = N.array([0.5, 0.5]) # mosaic viewer's center (0.5, 0.5) a -= MC hypot = N.array((N.hypot(a[0][0], a[0][1]), N.hypot(a[1][0], a[1][1]))) theta = N.array((N.arctan2(a[0][1], a[0][0]), N.arctan2(a[1][1], a[1][0]))) # radians phi = theta + U.deg2rad(self.m_rot) mimXY = N.array((hypot[0]*N.cos(phi[0]), hypot[0]*N.sin(phi[0]))) maxXY = N.array((hypot[1]*N.cos(phi[1]), hypot[1]*N.sin(phi[1]))) a = N.array((mimXY, maxXY)) a.sort(0) if self.m_aspectRatio == -1: a = N.array(([a[0][0],-a[1][1]],[a[1][0],-a[0][1]])) self.zoomToRect(x0=a[0][0], y0=a[0][1], x1=a[-1][0],y1=a[-1][1])
def pix2deg(self, pixel_x, pixel_y, eye_distance_mm): """ Stimulus positions (pixel_x,pixel_y) are defined in x and y pixel units, with the origin (0,0) being at the **center** of the display, as to match the PsychoPy pix unit coord type. The pix2deg method is vectorized, meaning that is will perform the pixel to angle calculations on all elements of the provided pixel position numpy arrays in one numpy call. The conversion process can use either a fixed eye to calibration plane distance, or a numpy array of eye distances passed as eye_distance_mm. In this case the eye distance array must be the same length as pixel_x, pixel_y arrays. """ eye_dist_mm = self._eye_distance_mm # if eye_distance_mm is not None: # eye_dist_mm = eye_distance_mm x_mm = self.mmpp_x * pixel_x y_mm = self.mmpp_y * pixel_y Ah = np.arctan(x_mm, np.hypot(eye_dist_mm, y_mm)) Av = np.arctan(y_mm, np.hypot(eye_dist_mm, x_mm)) return np.rad2deg(Ah), np.rad2deg(Av)
def _dots_per_unit(self, units): """ Return a scale factor for converting from units to pixels """ ax = self.ax if units in ('x', 'y', 'xy'): if units == 'x': dx0 = ax.viewLim.width dx1 = ax.bbox.width elif units == 'y': dx0 = ax.viewLim.height dx1 = ax.bbox.height else: # 'xy' is assumed dxx0 = ax.viewLim.width dxx1 = ax.bbox.width dyy0 = ax.viewLim.height dyy1 = ax.bbox.height dx1 = np.hypot(dxx1, dyy1) dx0 = np.hypot(dxx0, dyy0) dx = dx1 / dx0 else: if units == 'width': dx = ax.bbox.width elif units == 'height': dx = ax.bbox.height elif units == 'dots': dx = 1.0 elif units == 'inches': dx = ax.figure.dpi else: raise ValueError('unrecognized units') return dx
def inverse_kepler_2d(xv, m, t): """Compute the Keplerian parameters for the osculating orbit. No partial derivatives are computed (even though it would be much easier) because you can use the partials for kepler_2d and invert the matrix. The value of t0 computed is the value within one half-period of t. """ mu = G*m #a_guess = np.hypot(xv[0], xv[1]) h = (xv[0]*xv[3]-xv[1]*xv[2]) r = np.hypot(xv[0], xv[1]) eps2, eps1 = np.array([xv[3], -xv[2]])*h/mu - xv[:2]/r e = np.hypot(eps1, eps2) p = h**2/mu a = p/(1-e**2) pb = 2*np.pi*(a**3/mu)**(0.5) om = np.arctan2(eps1, eps2) true_anomaly = np.arctan2(xv[1], xv[0])-om eccentric_anomaly = np.arctan2(np.sqrt(1-e**2)*np.sin(true_anomaly), e+np.cos(true_anomaly)) mean_anomaly = eccentric_anomaly - e*np.sin(eccentric_anomaly) true_anomaly_0 = -om eccentric_anomaly_0 = np.arctan2(np.sqrt(1-e**2)*np.sin(true_anomaly_0), e+np.cos(true_anomaly_0)) mean_anomaly_0 = eccentric_anomaly_0 - e*np.sin(eccentric_anomaly_0) return Kepler2DParameters(a=a, pb=pb, eps1=eps1, eps2=eps2, t0=t-(mean_anomaly-mean_anomaly_0)*pb/(2*np.pi))
def getDr(d, IO, type_): """Compute distortion with given radial distance.""" # Define radial distance range xp = np.linspace(0, d, d*50) yp = 0 # Compute distortion corrections x0 = IO["x0"] y0 = IO["y0"] xbar = xp - x0 ybar = yp - y0 r = np.hypot(xbar, ybar) if type_ == "symmetric": k1 = IO["k1"] k2 = IO["k2"] k3 = IO["k3"] dx = xbar * (r**2 * k1 + r**4 * k2 + r**6 * k3) dy = ybar * (r**2 * k1 + r**4 * k2 + r**6 * k3) elif type_ == "decentering": p1 = IO["p1"] p2 = IO["p2"] dx = (p1 * (r**2 + 2 * xbar**2) + 2 * p2 * xbar * ybar) dy = (2 * p1 * xbar * ybar + p2 * (r**2 + 2 * ybar**2)) dr = np.hypot(dx, dy) return xp, dr
def __init__(self, size=None, n=None): n = n if n else 256 self.size = size if size else (256, 256) self.order = len(self.size) # Generate WAY more numbers than we need # because we are throwing out all the numbers not inside a unit # sphere. Something of a hack but statistically speaking # it should work fine... or crash. G = (random.uniform(size=2*self.order*n)*2 - 1).reshape(-1, self.order) # GAH! How do I generalize this?! #length = hypot(G[:,i] for i in range(self.order)) if self.order == 1: length = G[:,0] elif self.order == 2: length = hypot(G[:,0], G[:,1]) elif self.order == 3: length = hypot(G[:,0], G[:,1], G[:,2]) self.G = (G[length < 1] / (length[length < 1])[:,newaxis])[:n,] self.P = arange(n, dtype=int32) random.shuffle(self.P) self.idx_ar = indices(2*ones(self.order), dtype=int8).reshape(self.order, -1).T self.drop = poly1d((-6, 15, -10, 0, 0, 1.0))
def _cart_to_sph(x, y, z): """Aux function""" hypotxy = np.hypot(x, y) r = np.hypot(hypotxy, z) elev = np.arctan2(z, hypotxy) az = np.arctan2(y, x) return az, elev, r
def plot_winds(m, lat,lon, image, u, v, name, step=16): m.pcolormesh(lon, lat, image, latlon=True, cmap='gray') h,w = lat.shape[:2] y,x = np.mgrid[step/2:h:step,step/2:w:step].reshape(2,-1) #extract data u = u[y,x] v = v[y,x] lon = lon[y,x] lat = lat[y,x] #calculate map positionf lat lons x,y = m(lon,lat) # Calculate the orientation of the vectors x1, y1 = m(lon+u, lat+v) u_map, v_map = x1-x, y1-y # Rescale the magnitudes of the vectors... mag_scale = np.hypot(u_map, v_map) / np.hypot(u, v) u_map /= mag_scale v_map /= mag_scale # Draw barbs #m.barbs(x,y,u_map,v_map, length=5, color='red') m.quiver(x,y,u_map,v_map,scale=200, color='red') # Draw some grid lines for reference m.drawparallels(np.arange(-90,90,2),labels=[1,1,0,0]) m.drawmeridians(np.arange(0,360,2),labels=[0,0,0,1]) plt.savefig(name,bbox_inches='tight') plt.close()
def averageNSLSdata (ee, mc, mb, dc, db, n) : i = ee.size / n # total number of energy points e = ee[0::n] # [0,:] is the first energy point # Reshape to put energy on one axis and readings on the other. mc.shape = ((i, n)) dc.shape = ((i, n)) mb.shape = ((i, n)) db.shape = ((i, n)) # Average over readings. mca = np.average (mc, axis=1) dca = np.average (dc, axis=1) mba = np.average (mb, axis=1) dba = np.average (db, axis=1) # Note that the average might not be best for cases where there # are outliers. Perhaps a median of some kind? # Find the standard deviation of detector current measurements. mcs = np.std (mc, axis=1) dcs = np.std (dc, axis=1) mbs = np.std (mb, axis=1) dbs = np.std (db, axis=1) ms = np.hypot (mcs, mbs) ds = np.hypot (dcs, dbs) return e, mca, mba, ms, dca, dba, ds
def get_bezier_points_at(self, at, grid=256): at = np.asarray(at) # The Bezier curve is parameterized by a value t which ranges from 0 # to 1. However, there is a nonlinear relationship between this value # and arclength. We want to parameterize by t', which measures # normalized arclength. To do this, we have to calculate the function # arclength(t), and then invert it. t = np.linspace(0, 1, grid) x, y = Bezier(list(zip(self._xp, self._yp)), t).T x_deltas = np.diff(x) y_deltas = np.diff(y) arclength_deltas = np.empty(t.shape) arclength_deltas[0] = 0 np.hypot(x_deltas, y_deltas, out=arclength_deltas[1:]) arclength = np.cumsum(arclength_deltas) arclength /= arclength[-1] # Now (t, arclength) is a LUT describing the t -> arclength mapping # Invert it to get at -> t at_t = np.interp(at, arclength, t) # And finally look up at the Bezier values at at_t # (Might be quicker to np.interp againts x and y, but eh, doesn't # really matter.) return Bezier(list(zip(self._xp, self._yp)), at_t).T
def _cartesian_to_sphere(x, y, z): """Transform cartesian coordinates to spherical""" hypotxy = np.hypot(x, y) r = np.hypot(hypotxy, z) elev = np.arctan2(z, hypotxy) az = np.arctan2(y, x) return az, elev, r
def go(start, stores, items, price_of_gas, perishable, solved): if (start, str(sorted(items)), perishable) in solved: return solved[(start, str(sorted(items)), perishable)] gas_home = price_of_gas * hypot(0 - start[0], 0 - start[1]) if len(items) == 0: return gas_home if start == (0,0): perishable = 0 costs = list() if perishable == 1: costs.append(gas_home + go((0,0), stores, items, price_of_gas, 0, solved)) filtered_stores = [x for x in stores if x[0] == start] else: filtered_stores = stores for i in range(len(items)): p = perishable if items[i][-1] == '!': p = 1 item = items[i][:-1] else: item = items[i] for j in range(len(filtered_stores)): filtered_items = [x for x in filtered_stores[j][1] if item in x] if len(filtered_items) == 1: cost = int(filtered_items[0].split(":")[1]) gas_here = price_of_gas * hypot(start[0] - filtered_stores[j][0][0], start[1] - filtered_stores[j][0][1]) costs.append( cost + gas_here + go(filtered_stores[j][0], stores, [x for x in items if item not in x], price_of_gas, p, solved)) solved[(start,str(sorted(items)),perishable)] = min(costs) return min(costs)
def cart2sph(x, y, z): """Converts cartesian coordinates x, y, z to spherical coordinates az, el, r.""" hxy = _np.hypot(x, y) r = _np.hypot(hxy, z) el = _np.arctan2(z, hxy) az = _np.arctan2(y, x) return az, el, r
def get_impact_parameters_cpair(cpair,ra0,dec0,cosmo,npoints=1000): """Returns the impact parameter between the straight line given by a cluster-pair and (ra0,dec0), as well as the distance along the cpair axis to the closest cluster of the pair. """ ra1 = cpair['ra1'] dec1 = cpair['dec1'] z1 = cpair['z1'] ra2 = cpair['ra2'] dec2 = cpair['dec2'] z2 = cpair['z2'] zmed = cpair['redshift'] ra_mpc,dec_mpc,sep_mpc,dv = get_line_radec_sepdv(ra1,dec1,z1,ra2,dec2,z2,ra0,dec0,zmed,cosmo,npoints=npoints) #get hypotenuse sep = np.hypot(ra_mpc,dec_mpc) #get minimum distance sep_min = np.min(sep) #lets add the distance along the cpair axis to the closest cluster #find the closest cluster distance first sep_cl_mpc1 = np.hypot(ra_mpc[0],dec_mpc[0]) sep_cl_mpc2 = np.hypot(ra_mpc[-1],dec_mpc[-1]) sep_cl_mpc = np.min([sep_cl_mpc1.value,sep_cl_mpc2.value]) #then, project along the cluster axis sep_x_mpc = np.sqrt(sep_cl_mpc**2 - sep_min.value**2) return sep_min.value, sep_x_mpc
def testProperMotion(self): """Test proper motion correction""" center = make_coord(93.0, -90.0) loader = LoadIndexedReferenceObjectsTask(butler=self.testButler) references = loader.loadSkyCircle(center, self.searchRadius, filterName='a').refCat original = references.copy(True) # Zero epoch change --> no proper motion correction (except minor numerical effects) loader.applyProperMotions(references, self.epoch) self.assertFloatsAlmostEqual(references["coord_ra"], original["coord_ra"], rtol=1.0e-14) self.assertFloatsAlmostEqual(references["coord_dec"], original["coord_dec"], rtol=1.0e-14) self.assertFloatsEqual(references["coord_raErr"], original["coord_raErr"]) self.assertFloatsEqual(references["coord_decErr"], original["coord_decErr"]) # One year difference loader.applyProperMotions(references, self.epoch + 1.0*astropy.units.yr) self.assertFloatsEqual(references["pm_raErr"], original["pm_raErr"]) self.assertFloatsEqual(references["pm_decErr"], original["pm_decErr"]) for orig, ref in zip(original, references): self.assertAnglesAlmostEqual(orig.getCoord().separation(ref.getCoord()), self.properMotionAmt, maxDiff=1.0e-6*lsst.geom.arcseconds) self.assertAnglesAlmostEqual(orig.getCoord().bearingTo(ref.getCoord()), self.properMotionDir, maxDiff=1.0e-4*lsst.geom.arcseconds) predictedRaErr = np.hypot(original["coord_raErr"], original["pm_raErr"]) predictedDecErr = np.hypot(original["coord_decErr"], original["pm_decErr"]) self.assertFloatsAlmostEqual(references["coord_raErr"], predictedRaErr) self.assertFloatsAlmostEqual(references["coord_decErr"], predictedDecErr)
def get_dH2(lab1, lab2): """squared hue difference term occurring in deltaE_cmc and deltaE_ciede94 Despite its name, "dH" is not a simple difference of hue values. We avoid working directly with the hue value, since differencing angles is troublesome. The hue term is usually written as: c1 = sqrt(a1**2 + b1**2) c2 = sqrt(a2**2 + b2**2) term = (a1-a2)**2 + (b1-b2)**2 - (c1-c2)**2 dH = sqrt(term) However, this has poor roundoff properties when a or b is dominant. Instead, ab is a vector with elements a and b. The same dH term can be re-written as: |ab1-ab2|**2 - (|ab1| - |ab2|)**2 and then simplified to: 2*|ab1|*|ab2| - 2*dot(ab1, ab2) """ lab1 = np.asarray(lab1) lab2 = np.asarray(lab2) a1, b1 = np.rollaxis(lab1, -1)[1:3] a2, b2 = np.rollaxis(lab2, -1)[1:3] # magnitude of (a, b) is the chroma C1 = np.hypot(a1, b1) C2 = np.hypot(a2, b2) term = (C1 * C2) - (a1 * a2 + b1 * b2) return 2 * term
def _cartesian_to_sphere(x, y, z): """Convert using old function.""" hypotxy = np.hypot(x, y) r = np.hypot(hypotxy, z) elev = np.arctan2(z, hypotxy) az = np.arctan2(y, x) return az, elev, r
def inplace_sort(self, points, i=0): """Sort the list of points (not actually inplace), starting at index i. """ p_ = points N = len(points) sorted_points = [] while True: # get the current point point = p_[i] # finish if next point is outside the solution triangle if not self.insolutiontriangle(point): break # add the current point to the sorted points sorted_points.append(point) # finish if exhausted points if len(sorted_points) == N: break # remove the current point from the points to consider nonzero = np.hypot(*(p_ - point).T).nonzero() p_ = p_[nonzero] # find the index of the nearest point to the current point distances = np.hypot(*(p_ - point).T) i = distances.argmin() return np.array(sorted_points)
def position_angle(xc,yc,x1,y1,x2,y2): # Compute the position angle (in radians, with respect to north) between three points in pixel space ''' Points are: - xc,yc: x,y center of bending angle - x1,y1: x,y center of 1st component - x2,y2: x,y center of 2nd component ''' r1 = np.array([x1,y1]) r2 = np.array([x2,y2]) center = np.array([xc,yc]) r12sum = (r1-center) + (r2-center) r12len = np.hypot(r12sum[0],r12sum[1]) north = np.array([0,1]) northlen = np.hypot(north[0],north[1]) alpha = np.arccos(np.dot(r12sum,north) / (r12len*northlen)) # Measure CCW from north if r12sum[0] > 0.: alpha = 2*np.pi - alpha return alpha
def edgedist(pixel, shape=(512,512)): x = pixel[0] y= pixel[1] circumference = 2.0*shape[0] + 2.0*shape[1] totaldist = np.sum([np.hypot(x-a, y-b) for a in range(shape[0]) for b in [0,shape[1]]]) totaldist2 = np.sum([np.hypot(x-a, y-b) for a in [0,shape[0]] for b in range(shape[1])]) return (totaldist + totaldist2) / circumference, circumference
def format_coord(self, xd, yd): """ Given the 2D view coordinates attempt to guess a 3D coordinate. Looks for the nearest edge to the point and then assumes that the point is at the same z location as the nearest point on the edge. """ if self.M is None: return '' if self.button_pressed == 1: return 'azimuth=%d deg, elevation=%d deg ' % (self.azim, self.elev) p = (xd, yd) edges = self.tunit_edges() ldists = [(proj3d.line2d_seg_dist(p0, p1, p), i) for \ i, (p0, p1) in enumerate(edges)] ldists.sort() edgei = ldists[0][1] p0, p1 = edges[edgei] x0, y0, z0 = p0 x1, y1, z1 = p1 d0 = np.hypot(x0-xd, y0-yd) d1 = np.hypot(x1-xd, y1-yd) dt = d0+d1 z = d1/dt * z0 + d0/dt * z1 x, y, z = proj3d.inv_transform(xd, yd, z, self.M) xs = self.format_xdata(x) ys = self.format_ydata(y) zs = self.format_ydata(z) return 'x=%s, y=%s, z=%s' % (xs, ys, zs)
def sky2pix_ellipse(self, pos, a, b, pa): """ Convert an ellipse from sky to pixel corrds a/b vectors are calculated at an origin pos=(ra,dec) All input parameters are in degrees Output parameters are: x,y - the x,y pixels corresponding to the ra/dec position sx, sy - the major minor axes (FWHM) in pixels theta - the position angle in degrees :param pos: [ra,dec] of the ellipse center :param a: major axis :param b: minor axis :param pa: position angle :return: x, y, sx, sy, theta """ ra, dec = pos x, y = self.sky2pix(pos) x_off, y_off = self.sky2pix(translate(ra, dec, a, pa)) sx = np.hypot((x - x_off),(y - y_off)) theta = np.arctan2((y_off - y), (x_off - x)) x_off, y_off = self.sky2pix(translate(ra, dec, b, pa-90)) sy = np.hypot((x - x_off), (y - y_off)) theta2 = np.arctan2((y_off - y), (x_off - x)) - np.pi/2 # The a/b vectors are perpendicular in sky space, but not always in pixel space # so we have to account for this by calculating the angle between the two vectors # and modifying the minor axis length defect = theta - theta2 sy *= abs(np.cos(defect)) return x, y, sx, sy, np.degrees(theta)
def get_plotsense_dict(file, NTOP=None, NANTS=None): antpos = np.loadtxt(file) if NANTS is None: NANTS = antpos.shape[0] bl_gps = {} for i in xrange(NANTS-1): a1pos = antpos[i] for j in xrange(i+1,NANTS): a2pos = antpos[j] blx, bly, blz = a2pos-a1pos has_entry = False for key in bl_gps.keys(): if np.hypot(key[0]-blx, key[1]-bly) < 1: has_entry = True bl_gps[key] = bl_gps.get(key, []) + [(i,j)] break if not has_entry: bl_gps[(blx, bly)] = [(i,j)] n_unique = len(bl_gps.keys()) n_total = NANTS*(NANTS-1)/2 print "Found %d classes among %d total baselines" % (n_unique, n_total) print "Sorting dictionary" sorted_keys = sorted(bl_gps.keys(), key=lambda x: np.hypot(*x)) if NTOP is None: NTOP = n_unique key_pool = np.arange(NTOP) top_dict = {} for i, key in enumerate(sorted_keys[:NTOP]): mult = len(bl_gps[key]) label = str(bl_gps[key][0][0])+'_'+str(bl_gps[key][0][1]) top_dict[label] = ((key[0], key[1], 0.), mult) #dict[example_bl] = ((blx, bly, blz), multiplicity) return top_dict, bl_gps
def pack_fnl(data): """Pack fnl array to fnl struct.""" # Minimal input control assert isinstance(data, np.ndarray) assert data.ndim == 2 and data.shape[1] == FNLMeta.nb_columns # Pack fnl fnl = FNLData() fnl.id = data[:, FNLMeta.nl_id_col] fnl.eos = data[:, FNLMeta.eos_id_col] fnl.x = data[:, FNLMeta.x_col] fnl.y = data[:, FNLMeta.y_col] fnl.z = data[:, FNLMeta.z_col] fnl.vx = data[:, FNLMeta.vx_col] fnl.vy = data[:, FNLMeta.vy_col] fnl.vz = data[:, FNLMeta.vz_col] fnl.m = data[:, FNLMeta.m_col] fnl.rho = data[:, FNLMeta.rho_col] fnl.P = data[:, FNLMeta.P_col] fnl.T = data[:, FNLMeta.T_col] fnl.U = data[:, FNLMeta.U_col] fnl.hmin = data[:, FNLMeta.hmin_col] fnl.hmax = data[:, FNLMeta.hmax_col] fnl.nbNodes = len(data) fnl.r = np.hypot(fnl.x, np.hypot(fnl.y, fnl.z)) # Return return fnl
def norm_dist(src1, src2): """ Calculate the normalised distance between two sources. Sources are elliptical Gaussians. The normalised distance is calculated as the GCD distance between the centers, divided by quadrature sum of the radius of each ellipse along a line joining the two ellipses. For ellipses that touch at a single point, the normalized distance will be 1/sqrt(2). Parameters ---------- src1, src2 : object The two positions to compare. Objects must have the following parameters: (ra, dec, a, b, pa). Returns ------- dist: float The normalised distance. """ if np.all(src1 == src2): return 0 dist = gcd(src1.ra, src1.dec, src2.ra, src2.dec) # degrees # the angle between the ellipse centers phi = bear(src1.ra, src1.dec, src2.ra, src2.dec) # Degrees # Calculate the radius of each ellipse along a line that joins their centers. r1 = src1.a*src1.b / np.hypot(src1.a * np.sin(np.radians(phi - src1.pa)), src1.b * np.cos(np.radians(phi - src1.pa))) r2 = src2.a*src2.b / np.hypot(src2.a * np.sin(np.radians(180 + phi - src2.pa)), src2.b * np.cos(np.radians(180 + phi - src2.pa))) R = dist / (np.hypot(r1, r2) / 3600) return R
def bending_angle(xc,yc,x1,y1,x2,y2): # Compute the bending angle (in radians) between three points in pixel space ''' Points are: - xc,yc: x,y center of IR counterpart - x1,y1: x,y center of 1st radio lobe - x2,y2: x,y center of 2nd radio lobe ''' r1 = np.array([x1,y1]) r2 = np.array([x2,y2]) center = np.array([xc,yc]) r1diff = r1-center r2diff = r2-center r1len = np.hypot(r1diff[0],r1diff[1]) r2len = np.hypot(r2diff[0],r2diff[1]) alpha = np.arccos(np.dot(r1diff,r2diff) / (r1len*r2len)) return alpha
def plateASTundo(ast, p, q): """ Map gnomonic coordinates to theta, phi. """ M = ast.M # Calculate beta and gamma bet = np.arcsin(np.hypot(p, q)) gam = np.arctan2(q, p) if bet > np.pi: return False # Init vector v v = np.zeros(3) v[0] = np.sin(bet)*np.cos(gam) v[1] = np.sin(bet)*np.sin(gam) v[2] = np.cos(bet) # Calculate vector u u = np.zeros(3) u[0] = M[0,0]*v[0] + M[0,1]*v[1] + M[0,2]*v[2] u[1] = M[1,0]*v[0] + M[1,1]*v[1] + M[1,2]*v[2] u[2] = M[2,0]*v[0] + M[2,1]*v[1] + M[2,2]*v[2] # Convert to theta, phi th = np.arctan2(np.hypot(u[0], u[1]), u[2]) phi = np.arctan2(u[1], u[0]) return th, phi
def solve_for_nearest( px,py,rx,ry ): dpx = polyder(px) dpy = polyder(py) cp = polymul( dpx, px ) + polymul( dpy, py ) cp = polyadd( cp, -rx*dpx ) cp = polyadd( cp, -ry*dpy ) t = roots(cp) t = real(t[isreal(t)]) t = t[ (t>=0) * (t<=1) ] ##tt = linspace(0,1,100) ##from pylab import plot ##plot( polyval(px,tt), polyval(py,tt), 'k', hold = 0 ) ##plot( [rx],[ry], 'r.' ) ##plot( polyval(px,t[isreal(t)*(real(t)>=0)*(real(t)<=1)]), ## polyval(py,t[isreal(t)*(real(t)>=0)*(real(t)<=1)]), 'o' ) ##pdb.set_trace() if len(t): if len(t) == 1: return t[0] else: ux = polyval( px, t ) uy = polyval( py, t ) d = hypot( ux - rx, uy - ry ) return t[ d==d.min() ][0] else: t = array([0.0,1.0]) ux = polyval( px, t ) uy = polyval( py, t ) d = hypot( ux - rx, uy - ry ) if d[0] < d[1]: return 0.0 else: return 1.0
def center_distance(self, bubble, bubbles): return np.hypot(bubble[0] - bubbles[:, 0], bubble[1] - bubbles[:, 1])
def align_face(filepath, predictor): """ :param filepath: str :return: PIL Image """ lm = get_landmark(filepath, predictor) lm_chin = lm[0: 17] # left-right lm_eyebrow_left = lm[17: 22] # left-right lm_eyebrow_right = lm[22: 27] # left-right lm_nose = lm[27: 31] # top-down lm_nostrils = lm[31: 36] # top-down lm_eye_left = lm[36: 42] # left-clockwise lm_eye_right = lm[42: 48] # left-clockwise lm_mouth_outer = lm[48: 60] # left-clockwise lm_mouth_inner = lm[60: 68] # left-clockwise # Calculate auxiliary vectors. eye_left = np.mean(lm_eye_left, axis=0) eye_right = np.mean(lm_eye_right, axis=0) eye_avg = (eye_left + eye_right) * 0.5 eye_to_eye = eye_right - eye_left mouth_left = lm_mouth_outer[0] mouth_right = lm_mouth_outer[6] mouth_avg = (mouth_left + mouth_right) * 0.5 eye_to_mouth = mouth_avg - eye_avg # Choose oriented crop rectangle. x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] x /= np.hypot(*x) x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) y = np.flipud(x) * [-1, 1] c = eye_avg + eye_to_mouth * 0.1 quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) qsize = np.hypot(*x) * 2 # read image img = PIL.Image.open(filepath) output_size = 112 transform_size = 112 enable_padding = True # Shrink. shrink = int(np.floor(qsize / output_size * 0.5)) if shrink > 1: rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) img = img.resize(rsize, PIL.Image.ANTIALIAS) quad /= shrink qsize /= shrink # Crop. border = max(int(np.rint(qsize * 0.1)), 3) crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1])) if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: img = img.crop(crop) quad -= crop[0:2] # Pad. pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0)) if enable_padding and max(pad) > border - 4: pad = np.maximum(pad, int(np.rint(qsize * 0.3))) img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') h, w, _ = img.shape y, x, _ = np.ogrid[:h, :w, :1] mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3])) blur = qsize * 0.02 img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') quad += pad[:2] # Transform. img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) if output_size < transform_size: img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) # Save aligned image. return img
def run(self): mode = self.mode if mode not in ('zone', 'both', 'full'): self.log.error("Unlnowm mode '%s' . Allowed: [zone|full|both]", mode) return -1 recipe = self.recipe # self.log.debug(recipe) zone = np.array(recipe.get('zone')) products = recipe['products'] if mode in ('zone', 'both') and zone is None: self.log.error('No zone info in recipe') return -1 sigma_avg_names = recipe['channels'].get('sigma_avg', []) sigma_names = recipe['channels'].get('sigma', []) sigma_vv_names = recipe['channels'].get('sigmaVV', []) sigma_vh_names = recipe['channels'].get('sigmaVH', []) coh_avg_names = recipe['channels'].get('coh_avg', []) coh_names = recipe['channels'].get('coh', []) coh_vv_names = recipe['channels'].get('cohVV', []) coh_vh_names = recipe['channels'].get('cohVH', []) channel_names = sigma_names + sigma_avg_names + sigma_vv_names + sigma_vh_names + coh_names + coh_avg_names + coh_vv_names + coh_vh_names full_shape, _ = self.envi.read_header(channel_names[0]) self.log.info({'full shape:': full_shape}) # zone = [[0, 0], [full_shape[0], full_shape[1]]] if zone is not None: zone_shape = (zone[1][0] - zone[0][0], zone[1][1] - zone[0][1]) self.log.info({'Zone': zone, 'Shape': full_shape}) nproducts = ( (len(sigma_names) if 'sigma' in products else 0) + (1 if 'sigma_avg' in products else 0) + (len(sigma_vv_names) if 'sigma_hypot' in products else 0) + (len(sigma_vv_names) if 'sigma_pol' in products else 0) + (len(coh_names) if 'coh' in products else 0) + (1 if 'coh_avg' in products else 0) + (len(coh_vv_names) if 'coh_hypot' in products else 0) + (len(coh_vv_names) if 'coh_pol' in products else 0)) if mode in ('zone', 'both'): tnsr_zone = np.empty((zone_shape[0], zone_shape[1], nproducts), dtype=np.float32) bd_zone = np.zeros((zone_shape[0], zone_shape[1]), dtype=np.bool) if mode in ('full', 'both'): tnsr_full = np.empty((full_shape[0], full_shape[1], nproducts), dtype=np.float32) bd_full = np.zeros((full_shape[0], full_shape[1]), dtype=np.bool) product_index = 0 if ('sigma' in products): params = products['sigma'] for sn in sigma_names: self.log.debug('sigma %s', sn) s = self.envi.load(sn)[0] if mode == 'zone': s = s[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] bad_data = (s < 1e-6) | (s > 10) | (s < 1e-6) | (s > 10) s = np.clip(s, 1e-6, 10) s = np.log10(s) fix_pixels(s, bad_data) s = anisotropic_diffusion(s, params[0], params[1], 0.2, option=1) if mode == 'zone': tnsr_zone[..., product_index] = s product_index += 1 bd_zone |= bad_data elif mode == 'full': tnsr_full[..., product_index] = s product_index += 1 bd_full |= bad_data elif mode == 'both': tnsr_full[..., product_index] = s tnsr_zone[..., product_index] = s[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] product_index += 1 bd_full |= bad_data bd_zone |= bad_data[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] if ('sigma_avg' in products): params = products['sigma_avg'] if mode in ('zone', 'both'): savg_zone = np.zeros(zone_shape, dtype=np.float32) if mode in ('full', 'both'): savg_full = np.zeros(full_shape, dtype=np.float32) for sn in sigma_avg_names: self.log.debug("sigma_avg %s", sn) s = self.envi.load(sn)[0] if mode == 'zone': s = s[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] bad_data = (s < 1e-6) | (s > 10) | (s < 1e-6) | (s > 10) s = np.clip(s, 1e-6, 10) s = np.log10(s) fix_pixels(s, bad_data) if mode == 'zone': savg_zone += s bd_zone |= bad_data elif mode == 'full': savg_full += s bd_full |= bad_data elif mode == 'both': savg_full += s savg_zone += s[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] bd_full |= bad_data bd_zone |= bad_data[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] if mode in ('zone', 'both'): tnsr_zone[..., product_index] = anisotropic_diffusion( savg_zone / len(sigma_avg_names), params[0], params[1], 0.2, option=1) if mode in ('full', 'both'): tnsr_full[..., product_index] = anisotropic_diffusion( savg_full / len(sigma_avg_names), params[0], params[1], 0.2, option=1) product_index += 1 if ('sigma_hypot' in products) or ('sigma_pol' in products): if 'sigma_hypot' in products: params = products['sigma_hypot'] else: params = products['sigma_pol'] for svvn, svhn in zip(sigma_vv_names, sigma_vh_names): self.log.debug({'svvn': svvn, 'svhn': svhn}) svv = self.envi.load(svvn)[0] svh = self.envi.load(svhn)[0] if mode == 'zone': svv = svv[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] svh = svh[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] bad_data = (svv < 1e-6) | (svv > 10) | (svh < 1e-6) | (svh > 10) svh = np.clip(svh, 1e-6, 10) sv = np.clip(np.hypot(svv, svh), 1e-6, 10) svpol = None if 'sigma_pol' in products: svpol = np.arcsin(svh / sv) fix_pixels(svpol, bad_data) svpol = gaussian_filter(svpol, params[2]) svpol = anisotropic_diffusion(svpol, params[3], params[4], 0.2, option=1) svv = None svh = None sv = np.log10(sv) fix_pixels(sv, bad_data) sv = anisotropic_diffusion(sv, params[0], params[1], 0.2, option=1) if mode == 'zone': if 'sigma_hypot' in products: tnsr_zone[..., product_index] = sv product_index += 1 if 'sigma_pol' in products: tnsr_zone[..., product_index] = svpol product_index += 1 bd_zone |= bad_data elif mode == 'full': if 'sigma_hypot' in products: tnsr_full[..., product_index] = sv product_index += 1 if 'sigma_pol' in products: tnsr_full[..., product_index] = svpol product_index += 1 bd_full |= bad_data elif mode == 'both': if 'sigma_hypot' in products: tnsr_full[..., product_index] = sv tnsr_zone[..., product_index] = sv[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] product_index += 1 if 'sigma_pol' in products: tnsr_full[..., product_index] = svpol tnsr_zone[..., product_index] = svpol[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] product_index += 1 bd_full |= bad_data bd_zone |= bad_data[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] if ('coh' in products): params = products['coh'] for cn in coh_names: self.log.debug('coh %s', cn) c = self.envi.load(cn)[0] if mode == 'zone': c = c[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] bad_data = (c < 0) | (c > 1) | (c < 0) | (c > 1) c = np.clip(c, 0, 1) fix_pixels(c, bad_data) c = anisotropic_diffusion(c, params[0], params[1], 0.2, option=1) if mode == 'zone': tnsr_zone[..., product_index] = c product_index += 1 bd_zone |= bad_data elif mode == 'full': tnsr_full[..., product_index] = c product_index += 1 bd_full |= bad_data elif mode == 'both': tnsr_full[..., product_index] = c tnsr_zone[..., product_index] = c[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] product_index += 1 bd_full |= bad_data bd_zone |= bad_data[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] if ('coh_avg' in products): if mode in ('zone', 'both'): cavg_zone = np.zeros(zone_shape, dtype=np.float32) if mode in ('full', 'both'): cavg_full = np.zeros(full_shape, dtype=np.float32) params = products['coh_avg'] for cn in coh_avg_names: self.log.debug("coh_avg %s", cn) c = self.envi.load(cn)[0] if mode == 'zone': c = c[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] bad_data = (c < 0) | (c > 1) | (c < 0) | (c > 1) c = np.clip(c, 0, 1) fix_pixels(c, bad_data) if mode == 'zone': cavg_zone += c bd_zone |= bad_data elif mode == 'full': cavg_full += c bd_full |= bad_data elif mode == 'both': cavg_full += c cavg_zone += c[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] bd_full |= bad_data bd_zone |= bad_data[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] if mode in ('zone', 'both'): tnsr_zone[..., product_index] = anisotropic_diffusion( cavg_zone / len(coh_avg_names), params[0], params[1], 0.2, option=1) if mode in ('full', 'both'): tnsr_full[..., product_index] = anisotropic_diffusion( cavg_full / len(coh_avg_names), params[0], params[1], 0.2, option=1) product_index += 1 if ('coh_hypot' in products) or ('coh_pol' in products): if 'coh_hypot' in products: params = products['coh_hypot'] else: params = products['coh_pol'] for cvvn, cvhn in zip(coh_vv_names, coh_vh_names): self.log.debug({'cvvn': cvvn, 'cvhn': cvhn}) cvv = self.envi.load(cvvn)[0] cvh = self.envi.load(cvhn)[0] if mode == 'zone': cvv = cvv[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] cvh = cvh[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] bad_data = (cvv < 0) | (cvv > 1) | (cvh < 0) | (cvh > 1) cvh = np.clip(cvh, 0, 1) cv = np.clip(np.hypot(cvv, cvh), 0, 2) cvpol = None if 'coh_pol' in products: cvpol = np.arcsin(cvh / cv) fix_pixels(cvpol, bad_data) cvpol = gaussian_filter(cvpol, params[2]) cvpol = anisotropic_diffusion(cvpol, params[3], params[4], 0.2, option=1) cvv = None cvh = None fix_pixels(cv, bad_data) cv = anisotropic_diffusion(cv, params[0], params[1], 0.2, option=1) if mode == 'zone': if 'coh_hypot' in products: tnsr_zone[..., product_index] = cv product_index += 1 if 'coh_pol' in products: tnsr_zone[..., product_index] = cvpol product_index += 1 bd_zone |= bad_data elif mode == 'full': if 'coh_hypot' in products: tnsr_full[..., product_index] = cv product_index += 1 if 'coh_pol' in products: tnsr_full[..., product_index] = cvpol product_index += 1 bd_full |= bad_data elif mode == 'both': if 'coh_hypot' in products: tnsr_full[..., product_index] = cv tnsr_zone[..., product_index] = cv[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] product_index += 1 if 'coh_pol' in products: tnsr_full[..., product_index] = cvpol tnsr_zone[..., product_index] = cvpol[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] product_index += 1 bd_full |= bad_data bd_zone |= bad_data[zone[0][0]:zone[1][0], zone[0][1]:zone[1][1]] if not os.path.exists(self.WORKDIR): os.makedirs(self.WORKDIR) self.log.debug("Saving tnsr and bd into %s", self.WORKDIR) if mode in ('zone', 'both'): np.save(self.WORKDIR + 'tnsr_zone.npy', tnsr_zone) np.save(self.WORKDIR + 'bd_zone.npy', bd_zone) if mode in ('full', 'both'): np.save(self.WORKDIR + 'tnsr_full.npy', tnsr_full) np.save(self.WORKDIR + 'bd_full.npy', bd_full) self.log.info('tensors processed') # system("say 'assembling complete'") return 0
def reverseUnitCycleKernel(rad, size): #TO USE WITH AI rx, ry = size/2, size/2 x, y = np.indices((size, size)) return ((np.hypot(rx - x, ry - y) - rad) > 0.5).astype(int)
def getBracketingIndices(evalColRow, cr): """ Get the indices of `evalColRow` that bracket `cr` This is a special function used by TessPrf This function encapsulates some fairly knotty bookkeeping. Unless something is broken you probably want to leave this function well alone Inputs -------- evalColRow (3d np array) See discussion below cr (2 element np array) The column and row to be bracketed Returns ---------- A 4x2 numpy array. Each row represents the indices into `evalColRow[,,:]` representing the 4 points in `evalColRow` that bracket the location represented by evalColRow Note ----- The model prf is evaluated on a regular grid across the CCD. Each grid point can be represented in two coordinate systems; the CCD pixel coordinates (this PRF is evaluated at col,row=24,36, and a grid Index (this is the second grid location in column, and the third in row). `evalColRow` encodes a mapping from one coord sys to the other. The zeroth dimension of `evalColRow` encodes the column of the grid location (e.g. 2 in the example above). The first dimension encodes row of the grid location (3 in the example), the second dimension encodes whether the value represents CCD column (`evalColRow[:,:,0]`) or CCD row (`evalColRow[:,:,1]`). The value at each array element represents the CCD position (either column or row). The return value of this function is a list of the 4 grid locations that bracket the input `cr` in column and row (below left, below right, above left, above right) Example --------- `evalColRow` consists of 4 points at which the model prf is evalulated .. code-block:: python a[0,0,0] = 45 a[0,0,1] = 1 #Zeroth prf evalulated at (col, row) = (45,1) a[0,1,0] = 45 a[0,1,1] = 128 a[1,0,0] = 183 a[1,0,1] = 1 a[1,1,0] = 183 a[1,1,1] = 128 cr = (45, 45) #Somewhere in the middle The return value is .. code-block:: python [ [0,0], [1,0], [1,0], [1,1] ] Because these are the indices that bracket the input col,row """ tmp = (evalColRow - cr) dist = np.hypot(tmp[:,:,0], tmp[:,:,1]) wh = np.unravel_index( np.argmin(dist), dist.shape) nearestEval = evalColRow[wh] delta = cr - nearestEval #Find the 3 other evaluations of the PRF that bracket (col, row) tmp = [] if delta[0] >= 0 and delta[1] >= 0: #A tmp.append( wh ) tmp.append( wh + np.array((+1, +0)) ) tmp.append( wh + np.array((+0, +1)) ) tmp.append( wh + np.array((+1, +1)) ) elif delta[0] < 0 and delta[1] >= 0: #S tmp.append( wh + np.array((-1, +0)) ) tmp.append( wh ) tmp.append( wh + np.array((-1, +1)) ) tmp.append( wh + np.array((+0, +1)) ) elif delta[0] < 0 and delta[1] < 0: #T tmp.append( wh + np.array((-1, -1)) ) tmp.append( wh + np.array((-0, -1)) ) tmp.append( wh + np.array((-1, +0)) ) tmp.append( wh ) else: #C tmp.append( wh + np.array((-0, -1)) ) tmp.append( wh + np.array((+1, -1)) ) tmp.append( wh ) tmp.append( wh + np.array((+1, +0)) ) tmp = np.array(tmp) #Check the order of values is correct c0 = tmp[:,0] r0 = tmp[:,1] assert c0[0] == c0[2] assert c0[1] == c0[3] assert r0[0] == r0[1] assert r0[2] == r0[3] #Bounds checking assert np.min(tmp) >= 0 assert np.max(tmp[:,0]) < evalColRow.shape[0] assert np.max(tmp[:,1]) < evalColRow.shape[1] return tmp
levels = 15 cset = ax.contourf(X2, X1, RESULT_DEPS.T, levels=levels, inline=1, cmap='coolwarm') cset = ax.contour(X2, X1, RESULT_DEPS.T, levels=levels, inline=1, colors='k') ax.clabel(cset, colors='k', fmt='%2.0f') M = np.hypot(RESULT_DX, RESULT_DY) q = ax.quiver(X2__, X1__, -RESULT_DY, RESULT_DX, units='x', scale=120) ax.scatter(X2__, X1__, color='0.5', s=10) ax.grid() plt.ylim(min(X1) - 5, max(X1) + 10) plt.savefig('DXDYDE.png') # %% PLOT DE fig, ax = plt.subplots(num='DEPS') plt.title('DEPS') levels = 15 cset = ax.contourf(X2, X1, RESULT_DEPS.T,
from scipy import misc import matplotlib.pyplot as plt from scipy import ndimage import numpy as np face = misc.face(gray=True) im = np.zeros((256, 256)) im[64:-64, 64:-64] = 1 facerotated = ndimage.rotate(face, 15, mode="constant") facefiltered = ndimage.gaussian_filter(facerotated, 5) sobelx = ndimage.sobel(im, axis=0, mode="constant") sobely = ndimage.sobel(im, axis=1, mode="constant") sobel = np.hypot(sobelx, sobely) plt.figure() plt.subplot(331) plt.imshow(face, cmap=plt.cm.gray) plt.subplot(332) plt.imshow(facerotated, cmap=plt.cm.gray) plt.subplot(333) plt.imshow(facefiltered, cmap=plt.cm.gray) plt.subplot(335) plt.imshow(sobelx, cmap=plt.cm.gray) plt.subplot(336) plt.imshow(sobely, cmap=plt.cm.gray) plt.subplot(338) plt.imshow(sobel, cmap=plt.cm.gray) plt.show()
def ecef2geodetic_old(x: float, y: float, z: float, ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]: """ convert ECEF (meters) to geodetic coordinates input ----- x,y,z [meters] target ECEF location [0,Infinity) ell reference ellipsoid deg degrees input/output (False: radians in/out) output ------ lat,lon (degrees/radians) alt (meters) Algorithm is based on http://www.astro.uni.torun.pl/~kb/Papers/geod/Geod-BG.htm This algorithm provides a converging solution to the latitude equation in terms of the parametric or reduced latitude form (v) This algorithm provides a uniform solution over all latitudes as it does not involve division by cos(phi) or sin(phi) """ if ell is None: ell = Ellipsoid() ea = ell.a eb = ell.b rad = hypot(x, y) # Constant required for Latitude equation rho = arctan2(eb * z, ea * rad) # Constant required for latitude equation c = (ea**2 - eb**2) / hypot(ea * rad, eb * z) # Starter for the Newtons Iteration Method vnew = arctan2(ea * z, eb * rad) # Initializing the parametric latitude v = 0 for _ in range(5): v = deepcopy(vnew) # %% Newtons Method for computing iterations vnew = v - ((2 * sin(v - rho) - c * sin(2 * v)) / (2 * (cos(v - rho) - c * cos(2 * v)))) if allclose(v, vnew): break # %% Computing latitude from the root of the latitude equation lat = arctan2(ea * tan(vnew), eb) # by inspection lon = arctan2(y, x) alt = (((rad - ea * cos(vnew)) * cos(lat)) + ((z - eb * sin(vnew)) * sin(lat))) with np.errstate(invalid='ignore'): # NOTE: need np.any() to handle scalar and array cases if np.any((lat < -pi / 2) | (lat > pi / 2)): raise ValueError('-90 <= lat <= 90') if np.any((lon < -pi) | (lon > 2 * pi)): raise ValueError('-180 <= lat <= 360') if deg: return degrees(lat), degrees(lon), alt else: return lat, lon, alt # radians
selem.astype(np.int) a = np.zeros((7,7), dtype=np.int) a[1:6, 2:5] = 1 print(a) print(ndimage.binary_erosion(a).astype(a.dtype)) print(ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype)) # Feature Extraction im = np.zeros((256, 256)) im[64:-64, 64:-64] = 1 im = ndimage.rotate(im, 15, mode='constant') im = ndimage.gaussian_filter(im, 8) plt.imshow(im, cmap=plt.cm.gray) sx = ndimage.sobel(im, axis=0, mode='constant') sy = ndimage.sobel(im, axis=1, mode='constant') sob = np.hypot(sx, sy) plt.imshow(sob) # Segmentation # Histogram-based n = 10 l = 256 im = np.zeros((l, l)) np.random.seed(1) points = l*np.random.random((2, n**2)) im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1 im = ndimage.gaussian_filter(im, sigma=l/(4.*n)) mask = (im > im.mean()).astype(np.float) mask += 0.1 * im img = mask + 0.2*np.random.randn(*mask.shape)
q = np.dot(scipy.linalg.inv(innerMat), np.linalg.multi_dot([bPolyMat.T, W, z])) # this is just for visual controlPoints = ConstructControlPoints(q, order) print(controlPoints) # Evaluate it on a grid... startIt = time.time() xx, yy, zz = GenerateSurfaceMesh(pGridsize, controlPoints[:,2], u, v, xDataMin, yDataMin, xDataRange, yDataRange, order) ############## # image filter idea, should pull this to separate class & file sobX = ndimage.sobel(zz, axis=1) sobY = ndimage.sobel(zz, axis=0) sobG = np.hypot(sobX, sobY) sobXX = ndimage.sobel(sobX, axis=1) sobYY = ndimage.sobel(sobY, axis=0) sobXY = ndimage.sobel(sobX, axis=0) hessDet = sobXX * sobYY - sobXY * sobXY # note element-wise multiplication # remove extra pad from computation (maybe need more) cut = 2 sobG = sobG[cut:-cut, cut:-cut] sobXX = sobXX[cut:-cut, cut:-cut] hessDet = hessDet[cut:-cut, cut:-cut] # filter test maskedG = np.zeros(sobG.shape) zDepth = zz.max() - zz.min()
def detect_edges(self, im, thr=0): sx = ndimage.sobel(im, axis=0, mode='constant') sy = ndimage.sobel(im, axis=1, mode='constant') sob = np.hypot(sx, sy) return sob > thr
def runSim(self): self.makeModelDirs() self.getTimeParams() self.getBrainModelParams() self.getSeedParams() for seed, seedParams in enumerate(self.seedsParamsList): print('\nSeed {}'.format(seed)) self.brainModelParams = copy.copy(seedParams) self.makeSeedDir(seed) self.getBrainElementsInitialActivities() self.integrateBrainElementsActivities() self.plotBrainElementsActivities() # self.getHealtySteadyStateMAPE() # if self.saveData: # self.saveEvents() # self.saveBrainElementsActivities() self.CX = self.y[:, 3].copy() self.CXHealthyMean = self.CX[self.healtyRange[0] * 100:self.healtyRange[1] * 100].mean() self.CXHealthyRange = [0, self.CXHealthyMean * 2] self.efPosHistory = np.zeros([self.nStep, 2]) self.DA = self.y[:, 5].copy() self.DAHealthyMean = self.DA[self.healtyRange[0] * 100:self.healtyRange[1] * 100].mean() self.DADamaged1Mean = self.DA[self.damagedRange1[0] * 100:self.damagedRange1[1] * 100].mean() self.DADamaged2Mean = self.DA[self.damagedRange2[0] * 100:self.damagedRange2[1] * 100].mean() self.DADamaged3Mean = self.DA[self.damagedRange3[0] * 100:self.damagedRange3[1] * 100].mean() self.shoulderRange = np.deg2rad(np.array([-60.0, 150.0])) self.elbowRange = np.deg2rad(np.array([0.0, 180.0])) self.arm = Arm(self.shoulderRange, self.elbowRange) self.desiredAngles = np.ones(2) * 0.5 self.Kp1 = 20.0 self.Kd1 = 1.5 self.Kp2 = 10.0 self.Kd2 = 1.0 if self.armPlot == True: self.simFig = plt.figure("ARM SIMULATION", dpi=300) self.simPlot = self.simFig.add_subplot(111) self.simPlot.set_xlim([-1.0, 1.0]) self.simPlot.set_ylim([0.0, 1.0]) self.text1 = plt.figtext(.02, .72, "sec = %s" % (0.00), style='italic', bbox={'facecolor': 'yellow'}) self.armPlot, = self.simPlot.plot( [0, self.arm.xElbow, self.arm.xEndEf], [0, self.arm.yElbow, self.arm.yEndEf], 'k-', color='black', linewidth=5) self.xDesAng = self.arm.L1*np.cos(self.desiredAngles[0]) + \ self.arm.L2*np.cos(self.desiredAngles[0]+self.desiredAngles[1]) self.yDesAng = self.arm.L1*np.sin(self.desiredAngles[0]) + \ self.arm.L2*np.sin(self.desiredAngles[0]+self.desiredAngles[1]) self.desEnd, = self.simPlot.plot([self.xDesAng], [self.yDesAng], 'o', color='green', markersize=10) for t in range(self.nStep): self.desiredAngles[0] = utils.changeRange( self.CX[t], self.CXHealthyRange[0], self.CXHealthyRange[1], self.shoulderRange[0], self.shoulderRange[1]) self.desiredAngles[1] = utils.changeRange( self.CX[t], self.CXHealthyRange[0], self.CXHealthyRange[1], self.elbowRange[0], self.elbowRange[1]) self.Torque = self.arm.PD_controller( [self.desiredAngles[0], self.desiredAngles[1]], self.Kp1, self.Kp2, self.Kd1, self.Kd2) # compute torques self.arm.SolveDirectDynamics(self.Torque[0], self.Torque[1]) # move the arm self.efPosHistory[t, :] = np.array( [self.arm.xEndEf, self.arm.yEndEf]) if self.armPlot and t > self.startArmPlot: self.text1.set_text("sec = {}".format(t / 100.0)) self.xDesAng = self.arm.L1*np.cos(self.desiredAngles[0]) +\ self.arm.L2*np.cos(self.desiredAngles[0]+self.desiredAngles[1]) self.yDesAng = self.arm.L1*np.sin(self.desiredAngles[0]) +\ self.arm.L2*np.sin(self.desiredAngles[0]+self.desiredAngles[1]) self.desEnd.set_data([self.xDesAng], [self.yDesAng]) self.armPlot.set_data( [0, self.arm.xElbow, self.arm.xEndEf], [0, self.arm.yElbow, self.arm.yEndEf]) plt.pause(self.dt) if self.saveData: efColNames = ['X', 'Y'] self.efDataFrame = pd.DataFrame(data=self.efPosHistory, columns=efColNames) efPath = os.path.join(self.armDir, 'efPos.csv') self.efDataFrame.to_csv(efPath) self.efX = self.efPosHistory[:, 0].copy() self.efY = self.efPosHistory[:, 1].copy() self.efdX = np.ediff1d(self.efX, to_begin=np.array([0])) self.efdY = np.ediff1d(self.efY, to_begin=np.array([0])) self.euclDist = np.hypot(self.efdX, self.efdY) self.meanPhys, self.stdPhys = \ self.getTremorOscillationAmplitude(self.healtyRange) if self.modelType == 'DRNDamage': self.meanDmg1, self.stdDmg1 = \ self.getTremorOscillationAmplitude(self.damagedRange1) self.meanDmg2, self.stdDmg2 = \ self.getTremorOscillationAmplitude(self.damagedRange2) self.meanDmg3, self.stdDmg3 = \ self.getTremorOscillationAmplitude(self.damagedRange3) self.seedTremorData[seed, 0] = self.meanPhys if self.modelType == 'DRNDamage': self.seedTremorData[seed, 1] = self.meanDmg1 self.seedTremorData[seed, 2] = self.meanDmg2 self.seedTremorData[seed, 3] = self.meanDmg3 self.seedDA5HTData[seed, 0] = self.DAHealthyMean self.seedDA5HTData[seed, 1] = self.DADamaged1Mean self.seedDA5HTData[seed, 2] = self.DADamaged2Mean self.seedDA5HTData[seed, 3] = self.DADamaged3Mean self.dataColumns = ['healthy', 'damaged 1', 'damaged 2', 'damaged 3'] self.tremorResults = pd.DataFrame(data=self.seedTremorData, columns=self.dataColumns) if self.tremorPlot: self.healthyLabel = 'HEALTHY' self.xTicksLabels = [ self.healthyLabel, 'DAMAGE 1', 'DAMAGE 2', 'DAMAGE 3', ] self.oscillationAmpFig = \ plt.figure( "oscillation amplitude", dpi=300 ) self.oscillationAmpPlot = self.oscillationAmpFig.add_subplot(111) self.oscillationAmpPlot.set_ylabel("Oscillation amplitude (m)", fontsize=10, fontweight='bold') self.oscillationAmpPlot.set_xlabel("DRN Lesion", fontsize=10, fontweight='bold') self.oscillationAmpPlot.set_ylim(0.0, 0.01) self.oscillationAmpPlot.set_xticks(np.arange(5)) self.oscillationAmpPlot.set_xticklabels(self.xTicksLabels) self.oscillationAmpPlot.tick_params(axis='both', which='major', labelsize=7) self.oscillationAmpPlot.tick_params(axis='both', which='minor', labelsize=7) self.oscillationAmpPlot.errorbar([0], self.seedTremorData[:, 0].mean(), yerr=self.seedTremorData[:, 0].std(), marker='s', markersize='7', mec='black', mfc='white', ecolor='black', fmt='', linestyle='solid') self.oscillationAmpPlot.errorbar([1], self.seedTremorData[:, 1].mean(), yerr=self.seedTremorData[:, 1].std(), marker='s', markersize='7', mec='black', mfc='white', ecolor='black', fmt='', linestyle='solid') self.oscillationAmpPlot.errorbar([2], self.seedTremorData[:, 2].mean(), yerr=self.seedTremorData[:, 2].std(), marker='s', markersize='7', mec='black', mfc='white', ecolor='black', fmt='', linestyle='solid') self.oscillationAmpPlot.errorbar([3], self.seedTremorData[:, 3].mean(), yerr=self.seedTremorData[:, 3].std(), marker='s', markersize='7', mec='black', mfc='white', ecolor='black', fmt='', linestyle='solid') self.oscillationAmpPlot.plot(list(range(4)), [ self.seedTremorData[:, 0].mean(), self.seedTremorData[:, 1].mean(), self.seedTremorData[:, 2].mean(), self.seedTremorData[:, 3].mean() ], color='black') if self.saveData: self.dataColumns = [ 'healthy', 'damaged1', 'damaged2', 'damaged3' ] self.tremorResults = pd.DataFrame(data=self.seedTremorData, columns=self.dataColumns) tremorPath = os.path.join(self.modelDir, 'tremors.csv') self.tremorResults.to_csv(tremorPath) self.DAConcFig = \ plt.figure( "DA concentration reduction", dpi=300 ) self.DAConcPlot = self.DAConcFig.add_subplot(111) self.DAConcPlot.set_ylabel("DA concentration (nM)", fontsize=10, fontweight='bold') self.DAConcPlot.set_xlabel("DRN Lesion", fontsize=10, fontweight='bold') # self.DAConcPlot.set_ylim(0.0, 3.0) self.DAConcPlot.set_xticks(np.arange(5)) self.DAConcPlot.set_xticklabels(self.xTicksLabels) self.DAConcPlot.tick_params( axis='both', which='major', labelsize=7, # fontweight= 'bold' ) self.DAConcPlot.tick_params( axis='both', which='minor', labelsize=7, # fontweight= 'bold' ) self.DAConcPlot.errorbar([0], self.seedDA5HTData[:, 0].mean(), yerr=self.seedDA5HTData[:, 0].std(), marker='s', markersize='7', mec='black', mfc='white', ecolor='black', fmt='', linestyle='none') self.DAConcPlot.errorbar([1], self.seedDA5HTData[:, 1].mean(), yerr=self.seedDA5HTData[:, 1].std(), marker='s', markersize='7', mec='black', mfc='white', ecolor='black', fmt='', linestyle='none') self.DAConcPlot.errorbar([2], self.seedDA5HTData[:, 2].mean(), yerr=self.seedDA5HTData[:, 2].std(), marker='s', markersize='7', mec='black', mfc='white', ecolor='black', fmt='', linestyle='none') self.DAConcPlot.errorbar([3], self.seedDA5HTData[:, 3].mean(), yerr=self.seedDA5HTData[:, 3].std(), marker='s', markersize='7', mec='black', mfc='white', ecolor='black', fmt='', linestyle='none') self.DAConcPlot.plot(list(range(4)), [ self.seedDA5HTData[:, 0].mean(), self.seedDA5HTData[:, 1].mean(), self.seedDA5HTData[:, 2].mean(), self.seedDA5HTData[:, 3].mean() ], color='black') if self.saveData: self.dataColumns = [ 'healthy', 'damaged1', 'damaged2', 'damaged3' ] self.DAResults = pd.DataFrame(data=self.seedDA5HTData, columns=self.dataColumns) DAPath = os.path.join(self.modelDir, 'DAconc.csv') self.DAResults.to_csv(DAPath)
# fig.savefig('interp_data_cut_%s_%i.png'%(filename.split('.')[0], number)) # Load data data = np.loadtxt(filename, delimiter=',') x = data[:, 0] y = data[:, 1] z = data[:, 2] v = data[:, 3] plt.style.use('./PaperFig_0.8_textwidth.mplstyle') fig2, ax2 = plt.subplots() fig3, ax3 = plt.subplots() for xx, yy in [[250, 0], [0, 0], [-250, 0]]: distances = np.hypot(x - xx, y - yy) axon_i = np.where(distances == distances.min()) # Modify xx and yy so that they match the real ones xx = x[axon_i][0] yy = y[axon_i][0] ax2.plot(z[axon_i], v[axon_i], label=r'$x = %i$ ' % xx + r'$\rm \mu m$') ax3.plot(z[axon_i], np.abs(v[axon_i]), label=r'$x = %i$ ' % xx + r'$\rm \mu m$') ax2.set_xlim(0.2875e4, 0.7125e4) ax2.set_xlabel(r'$\rm z$' + ' (' + r'$\rm \mu m$' + ')') ax2.set_ylabel(r'$\rm v_{E}$' + ' (' + r'$\rm mV$' + ')') # ax2.legend(loc='best', fontsize=8)
def hog(image, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), block_norm='L2-Hys', visualize=False, visualise=None, transform_sqrt=False, feature_vector=True, multichannel=None): """Extract Histogram of Oriented Gradients (HOG) for a given image. Compute a Histogram of Oriented Gradients (HOG) by 1. (optional) global image normalization 2. computing the gradient image in `row` and `col` 3. computing gradient histograms 4. normalizing across blocks 5. flattening into a feature vector Parameters ---------- image : (M, N[, C]) ndarray Input image. orientations : int, optional Number of orientation bins. pixels_per_cell : 2-tuple (int, int), optional Size (in pixels) of a cell. cells_per_block : 2-tuple (int, int), optional Number of cells in each block. block_norm : str {'L1', 'L1-sqrt', 'L2', 'L2-Hys'}, optional Block normalization method: ``L1`` Normalization using L1-norm. ``L1-sqrt`` Normalization using L1-norm, followed by square root. ``L2`` Normalization using L2-norm. ``L2-Hys`` Normalization using L2-norm, followed by limiting the maximum values to 0.2 (`Hys` stands for `hysteresis`) and renormalization using L2-norm. (default) For details, see [3]_, [4]_. visualize : bool, optional Also return an image of the HOG. For each cell and orientation bin, the image contains a line segment that is centered at the cell center, is perpendicular to the midpoint of the range of angles spanned by the orientation bin, and has intensity proportional to the corresponding histogram value. transform_sqrt : bool, optional Apply power law compression to normalize the image before processing. DO NOT use this if the image contains negative values. Also see `notes` section below. feature_vector : bool, optional Return the data as a feature vector by calling .ravel() on the result just before returning. multichannel : boolean, optional If True, the last `image` dimension is considered as a color channel, otherwise as spatial. Returns ------- out : (n_blocks_row, n_blocks_col, n_cells_row, n_cells_col, n_orient) ndarray HOG descriptor for the image. If `feature_vector` is True, a 1D (flattened) array is returned. hog_image : (M, N) ndarray, optional A visualisation of the HOG image. Only provided if `visualize` is True. References ---------- .. [1] https://en.wikipedia.org/wiki/Histogram_of_oriented_gradients .. [2] Dalal, N and Triggs, B, Histograms of Oriented Gradients for Human Detection, IEEE Computer Society Conference on Computer Vision and Pattern Recognition 2005 San Diego, CA, USA, https://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf, :DOI:`10.1109/CVPR.2005.177` .. [3] Lowe, D.G., Distinctive image features from scale-invatiant keypoints, International Journal of Computer Vision (2004) 60: 91, http://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf, :DOI:`10.1023/B:VISI.0000029664.99615.94` .. [4] Dalal, N, Finding People in Images and Videos, Human-Computer Interaction [cs.HC], Institut National Polytechnique de Grenoble - INPG, 2006, https://tel.archives-ouvertes.fr/tel-00390303/file/NavneetDalalThesis.pdf Notes ----- The presented code implements the HOG extraction method from [2]_ with the following changes: (I) blocks of (3, 3) cells are used ((2, 2) in the paper; (II) no smoothing within cells (Gaussian spatial window with sigma=8pix in the paper); (III) L1 block normalization is used (L2-Hys in the paper). Power law compression, also known as Gamma correction, is used to reduce the effects of shadowing and illumination variations. The compression makes the dark regions lighter. When the kwarg `transform_sqrt` is set to ``True``, the function computes the square root of each color channel and then applies the hog algorithm to the image. """ image = np.atleast_2d(image) if multichannel is None: multichannel = (image.ndim == 3) ndim_spatial = image.ndim - 1 if multichannel else image.ndim if ndim_spatial != 2: raise ValueError('Only images with 2 spatial dimensions are ' 'supported. If using with color/multichannel ' 'images, specify `multichannel=True`.') """ The first stage applies an optional global image normalization equalisation that is designed to reduce the influence of illumination effects. In practice we use gamma (power law) compression, either computing the square root or the log of each color channel. Image texture strength is typically proportional to the local surface illumination so this compression helps to reduce the effects of local shadowing and illumination variations. """ if transform_sqrt: image = np.sqrt(image) """ The second stage computes first order image gradients. These capture contour, silhouette and some texture information, while providing further resistance to illumination variations. The locally dominant color channel is used, which provides color invariance to a large extent. Variant methods may also include second order image derivatives, which act as primitive bar detectors - a useful feature for capturing, e.g. bar like structures in bicycles and limbs in humans. """ if image.dtype.kind == 'u': # convert uint image to float # to avoid problems with subtracting unsigned numbers image = image.astype('float') if multichannel: g_row_by_ch = np.empty_like(image, dtype=np.double) g_col_by_ch = np.empty_like(image, dtype=np.double) g_magn = np.empty_like(image, dtype=np.double) for idx_ch in range(image.shape[2]): g_row_by_ch[:, :, idx_ch], g_col_by_ch[:, :, idx_ch] = \ _hog_channel_gradient(image[:, :, idx_ch]) g_magn[:, :, idx_ch] = np.hypot(g_row_by_ch[:, :, idx_ch], g_col_by_ch[:, :, idx_ch]) # For each pixel select the channel with the highest gradient magnitude idcs_max = g_magn.argmax(axis=2) rr, cc = np.meshgrid(np.arange(image.shape[0]), np.arange(image.shape[1]), indexing='ij', sparse=True) g_row = g_row_by_ch[rr, cc, idcs_max] g_col = g_col_by_ch[rr, cc, idcs_max] else: g_row, g_col = _hog_channel_gradient(image) """ The third stage aims to produce an encoding that is sensitive to local image content while remaining resistant to small changes in pose or appearance. The adopted method pools gradient orientation information locally in the same way as the SIFT [Lowe 2004] feature. The image window is divided into small spatial regions, called "cells". For each cell we accumulate a local 1-D histogram of gradient or edge orientations over all the pixels in the cell. This combined cell-level 1-D histogram forms the basic "orientation histogram" representation. Each orientation histogram divides the gradient angle range into a fixed number of predetermined bins. The gradient magnitudes of the pixels in the cell are used to vote into the orientation histogram. """ s_row, s_col = image.shape[:2] c_row, c_col = pixels_per_cell b_row, b_col = cells_per_block n_cells_row = int(s_row // c_row) # number of cells along row-axis n_cells_col = int(s_col // c_col) # number of cells along col-axis # compute orientations integral images orientation_histogram = np.zeros((n_cells_row, n_cells_col, orientations)) _hoghistogram.hog_histograms(g_col, g_row, c_col, c_row, s_col, s_row, n_cells_col, n_cells_row, orientations, orientation_histogram) # now compute the histogram for each cell hog_image = None if visualise is not None: visualize = visualise warn('Argument `visualise` is deprecated and will ' 'be changed to `visualize` in v0.16', skimage_deprecation) if visualize: from .. import draw radius = min(c_row, c_col) // 2 - 1 orientations_arr = np.arange(orientations) # set dr_arr, dc_arr to correspond to midpoints of orientation bins orientation_bin_midpoints = ( np.pi * (orientations_arr + .5) / orientations) dr_arr = radius * np.sin(orientation_bin_midpoints) dc_arr = radius * np.cos(orientation_bin_midpoints) hog_image = np.zeros((s_row, s_col), dtype=float) for r in range(n_cells_row): for c in range(n_cells_col): for o, dr, dc in zip(orientations_arr, dr_arr, dc_arr): centre = tuple([r * c_row + c_row // 2, c * c_col + c_col // 2]) rr, cc = draw.line(int(centre[0] - dc), int(centre[1] + dr), int(centre[0] + dc), int(centre[1] - dr)) hog_image[rr, cc] += orientation_histogram[r, c, o] """ The fourth stage computes normalization, which takes local groups of cells and contrast normalizes their overall responses before passing to next stage. Normalization introduces better invariance to illumination, shadowing, and edge contrast. It is performed by accumulating a measure of local histogram "energy" over local groups of cells that we call "blocks". The result is used to normalize each cell in the block. Typically each individual cell is shared between several blocks, but its normalizations are block dependent and thus different. The cell thus appears several times in the final output vector with different normalizations. This may seem redundant but it improves the performance. We refer to the normalized block descriptors as Histogram of Oriented Gradient (HOG) descriptors. """ n_blocks_row = (n_cells_row - b_row) + 1 n_blocks_col = (n_cells_col - b_col) + 1 normalized_blocks = np.zeros((n_blocks_row, n_blocks_col, b_row, b_col, orientations)) for r in range(n_blocks_row): for c in range(n_blocks_col): block = orientation_histogram[r:r + b_row, c:c + b_col, :] normalized_blocks[r, c, :] = \ _hog_normalize_block(block, method=block_norm) """ The final step collects the HOG descriptors from all blocks of a dense overlapping grid of blocks covering the detection window into a combined feature vector for use in the window classifier. """ if feature_vector: normalized_blocks = normalized_blocks.ravel() if visualize: return normalized_blocks, hog_image else: return normalized_blocks
def cart2pol(x, y): theta = np.arctan2(y, x) rho = np.hypot(x, y) return theta, rho
def calibrate_objects(obj, cat, sr=15.0 / 3600, smooth=False, correct_brightness=False, smoothr=100, smoothmin=20, retval='std'): h = htm.HTM(10) m = h.match(obj['ra'], obj['dec'], cat['ra'], cat['dec'], sr, maxmatch=0) oidx = m[0] cidx = m[1] dist = m[2] * 3600 if len(oidx) < 300: return None x, y = obj['x'][oidx], obj['y'][oidx] x = (x - 2560 / 2) * 2 / 2560 y = (y - 2160 / 2) * 2 / 2160 omag = obj['mag'][oidx] omagerr = obj['magerr'][oidx] oflux = obj['flux'][oidx] ofluxerr = obj['fluxerr'][oidx] oflags = obj['flags'][oidx] ochisq = obj['chisq'][oidx] omag0 = (omag - np.min(omag)) / (np.max(omag) - np.min(omag)) try: cb = cat['g'][cidx] cv = cat['r'][cidx] cr = cat['i'][cidx] cmagerr = np.hypot(cat['gerr'][cidx], cat['rerr'][cidx], cat['ierr'][cidx]) except: cb = cat['b'][cidx] cv = cat['v'][cidx] cr = cat['r'][cidx] try: cmagerr = np.hypot(cat['berr'][cidx], cat['verr'][cidx], cat['rerr'][cidx]) except: cmagerr = np.hypot(cat['ebt'][cidx], cat['evt'][cidx]) cmag = cv tmagerr = np.hypot(cmagerr, omagerr) delta_mag = cmag - omag weights = 1.0 / tmagerr X = [ np.ones(len(delta_mag)), x, y, x * x, x * y, y * y, x * x * x, x * x * y, x * y * y, y * y * y, x * x * x * x, x * x * x * y, x * x * y * y, x * y * y * y, y * y * y * y, x * x * x * x * x, x * x * x * x * y, x * x * x * y * y, x * x * y * y * y, x * y * y * y * y, y * y * y * y * y, x * x * x * x * x * x, x * x * x * x * x * y, x * x * x * x * y * y, x * x * x * y * y * y, x * x * y * y * y * y, x * y * y * y * y * y, y * y * y * y * y * y ] #X += [omag0, omag0**2, omag0**3, omag0**4, omag0**5, omag0**6] X += [ cb - cv, (cb - cv) * x, (cb - cv) * y, (cb - cv) * x * x, (cb - cv) * x * y, (cb - cv) * y * y, (cb - cv) * x * x * x, (cb - cv) * x * x * y, (cb - cv) * x * y * y, (cb - cv) * y * y * y ] X += [ cv - cr, (cv - cr) * x, (cv - cr) * y, (cv - cr) * x * x, (cv - cr) * x * y, (cv - cr) * y * y, (cv - cr) * x * x * x, (cv - cr) * x * x * y, (cv - cr) * x * y * y, (cv - cr) * y * y * y ] X = np.vstack(X).T Y = delta_mag idx = (oflags == 0) & (ochisq < 2.0) for iter in range(5): if len(X[idx]) < 100: print "Fit failed - %d objects" % len(X[idx]) return None C = sm.WLS(Y[idx], X[idx], weights=weights[idx]).fit() YY = np.sum(X * C.params, axis=1) idx = (np.abs(Y - YY) < 2.0 * np.std(Y - YY)) & (oflags == 0) & (ochisq < 2.0) print np.std((Y - YY)[idx]), np.std(Y - YY), '-', np.std( ((Y - YY) / tmagerr)), np.std( (((Y - YY) / tmagerr))[idx]), '-', len(idx[idx]) mag0 = YY mag = omag + mag0 x = (obj['x'] - 2560 / 2) * 2 / 2560 y = (obj['y'] - 2160 / 2) * 2 / 2160 omag0 = (obj['mag'] - np.min(omag)) / (np.max(omag) - np.min(omag)) obj['mag0'] = C.params[0] * np.ones_like( obj['mag'] ) + C.params[1] * x + C.params[2] * y + C.params[3] * x * x + C.params[ 4] * x * y + C.params[5] * y * y + C.params[6] * x * x * x + C.params[ 7] * x * x * y + C.params[8] * x * y * y + C.params[9] * y * y * y + C.params[ 10] * x * x * x * x + C.params[11] * x * x * x * y + C.params[ 12] * x * x * y * y + C.params[13] * x * y * y * y + C.params[ 14] * y * y * y * y + C.params[15] * x * x * x * x * x + C.params[ 16] * x * x * x * x * y + C.params[17] * x * x * x * y * y + C.params[ 18] * x * x * y * y * y + C.params[19] * x * y * y * y * y + C.params[ 20] * y * y * y * y * y + C.params[ 21] * x * x * x * x * x * x + C.params[ 22] * x * x * x * x * x * y + C.params[ 23] * x * x * x * x * y * y + C.params[ 24] * x * x * x * y * y * y + C.params[ 25] * x * x * y * y * y * y + C.params[ 26] * x * y * y * y * y * y + C.params[ 27] * y * y * y * y * y * y #obj['mag0'] += C.params[28]*omag0 + C.params[29]*omag0**2 + C.params[30]*omag0**3 + C.params[31]*omag0**4 + C.params[32]*omag0**5 + C.params[33]*omag0**6 obj['cmag'] = obj['mag'] + obj['mag0'] obj['Cbv'] = np.ones_like(obj['mag']) * C.params[-20] + x * C.params[ -19] + y * C.params[-18] + x * x * C.params[-17] + x * y * C.params[ -16] + y * y * C.params[-15] + x * x * x * C.params[ -14] + x * x * y * C.params[-13] + x * y * y * C.params[ -12] + y * y * y * C.params[-11] obj['Cvr'] = np.ones_like(obj['mag']) * C.params[-10] + x * C.params[ -9] + y * C.params[-8] + x * x * C.params[-7] + x * y * C.params[ -6] + y * y * C.params[-5] + x * x * x * C.params[ -4] + x * x * y * C.params[-3] + x * y * y * C.params[ -2] + y * y * y * C.params[-1] # Correct uncorrected brightness-dependent variations obj['bcorr'] = np.zeros_like(obj['cmag']) if correct_brightness: step = 0.5 omag0 = omag + np.median(cmag - omag) for vmin in np.arange(5, 16, step): idx0 = (omag0[idx] >= vmin) & (omag0[idx] < vmin + step) if len(omag0[idx][idx0] > 20): omag1 = obj['mag'] + np.median(cmag - omag) idx1 = (omag1 >= vmin) & (omag1 < vmin + step) obj['bcorr'][idx1] = np.average( (Y - YY)[idx][idx0], weights=1.0 / tmagerr[idx][idx0]) # Local smoothing of residuals if smooth: kd0 = cKDTree(np.array([obj['x'][oidx][idx], obj['y'][oidx][idx]]).T) kd = cKDTree(np.array([obj['x'], obj['y']]).T) m = kd.query_ball_tree(kd0, smoothr) nm = np.array(([len(_) for _ in m])) corr = np.array([ np.average((Y - YY)[idx][_], weights=weights[idx][_]) if len(_) > smoothmin else 0 for _ in m ]) icorr = nm > smoothmin obj['corr'] = corr obj['corrected'] = icorr obj['ncorr'] = nm else: obj['corr'] = np.zeros_like(obj['mag']) obj['corrected'] = np.ones_like(obj['mag'], dtype=np.bool) obj['ncorr'] = np.zeros_like(obj['mag']) if retval == 'std': return np.std(Y - YY) elif retval == 'map': return Y elif retval == 'fit': return YY elif retval == 'diff': return Y - YY elif retval == 'all': return Y, YY, obj['corr'], oidx
def _integrate_rk12(x0, y0, dmap, f, maxlength): """ 2nd-order Runge-Kutta algorithm with adaptive step size. This method is also referred to as the improved Euler's method, or Heun's method. This method is favored over higher-order methods because: 1. To get decent looking trajectories and to sample every mask cell on the trajectory we need a small timestep, so a lower order solver doesn't hurt us unless the data is *very* high resolution. In fact, for cases where the user inputs data smaller or of similar grid size to the mask grid, the higher order corrections are negligible because of the very fast linear interpolation used in `interpgrid`. 2. For high resolution input data (i.e. beyond the mask resolution), we must reduce the timestep. Therefore, an adaptive timestep is more suited to the problem as this would be very hard to judge automatically otherwise. This integrator is about 1.5 - 2x as fast as RK4 and RK45 solvers (using similar Python implementations) in most setups. """ # This error is below that needed to match the RK4 integrator. It # is set for visual reasons -- too low and corners start # appearing ugly and jagged. Can be tuned. maxerror = 0.003 # This limit is important (for all integrators) to avoid the # trajectory skipping some mask cells. We could relax this # condition if we use the code which is commented out below to # increment the location gradually. However, due to the efficient # nature of the interpolation, this doesn't boost speed by much # for quite a bit of complexity. maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1) ds = maxds stotal = 0 xi = x0 yi = y0 xf_traj = [] yf_traj = [] while True: try: if dmap.grid.within_grid(xi, yi): xf_traj.append(xi) yf_traj.append(yi) else: raise OutOfBounds # Compute the two intermediate gradients. # f should raise OutOfBounds if the locations given are # outside the grid. k1x, k1y = f(xi, yi) k2x, k2y = f(xi + ds * k1x, yi + ds * k1y) except OutOfBounds: # Out of the domain during this step. # Take an Euler step to the boundary to improve neatness # unless the trajectory is currently empty. if xf_traj: ds, xf_traj, yf_traj = _euler_step(xf_traj, yf_traj, dmap, f) stotal += ds break except TerminateTrajectory: break dx1 = ds * k1x dy1 = ds * k1y dx2 = ds * 0.5 * (k1x + k2x) dy2 = ds * 0.5 * (k1y + k2y) nx, ny = dmap.grid.shape # Error is normalized to the axes coordinates error = np.hypot((dx2 - dx1) / (nx - 1), (dy2 - dy1) / (ny - 1)) # Only save step if within error tolerance if error < maxerror: xi += dx2 yi += dy2 try: dmap.update_trajectory(xi, yi) except InvalidIndexError: break if stotal + ds > maxlength: break stotal += ds # recalculate stepsize based on step error if error == 0: ds = maxds else: ds = min(maxds, 0.85 * ds * (maxerror / error)**0.5) return stotal, xf_traj, yf_traj
def transect_yx(z, atr, start_yx, end_yx, interpolation='nearest'): """Extract 2D matrix (z) value along the line [x0,y0;x1,y1] Ref link: http://stackoverflow.com/questions/7878398/how-to-e xtract-an-arbitrary-line-of-values-from-a-numpy-array Parameters: z : (np.array) 2D data matrix atr : (dict) attribute start_yx : (list) y,x coordinate of start point end_yx : (list) y,x coordinate of end point interpolation : str, sampling/interpolation method, including: 'nearest' - nearest neighbour, by default 'cubic' - cubic interpolation 'bilinear' - bilinear interpolation Returns: transect: (dict) containing 1D matrix: 'X' - 1D np.array for X/column coordinates in float32 'Y' - 1D np.array for Y/row. coordinates in float32 'value' - 1D np.array for z value in float32 'distance' - 1D np.array for distance in float32 Example: transect = transect_yx(dem, demRsc, [10,15], [100,115]) """ [y0, x0] = start_yx [y1, x1] = end_yx # check length, width = int(atr['LENGTH']), int(atr['WIDTH']) if not all(0 <= i < width and 0 <= j < length for i, j in zip([x0, x1], [y0, y1])): msg = 'input start/end point is out of data coverage' msg += '\nstart_yx: {}'.format(start_yx) msg += '\nend_yx:{}'.format(end_yx) msg += '\ndata size: ({}, {})'.format(length, width) raise ValueError(msg) # Determine points coordinates along the line num_pts = int(np.hypot(x1 - x0, y1 - y0)) ys = np.linspace(y0, y1, num_pts, dtype=np.float32) xs = np.linspace(x0, x1, num_pts, dtype=np.float32) # Extract z value along the line if interpolation.lower() == 'nearest': z_line = z[np.rint(ys).astype(np.int), np.rint(xs).astype(np.int)] elif interpolation.lower() == 'cubic': z_line = map_coordinates(z, np.vstack((ys, xs)), order=3) elif interpolation.lower() == 'bilinear': z_line = map_coordinates(z, np.vstack((ys, xs)), order=2) else: print('Un-recognized interpolation method: ' + interpolation) print('Continue with nearest ...') z_line = z[np.rint(ys).astype(np.int), np.rint(xs).astype(np.int)] # nearest neighbour # Calculate Distance along the line earth_radius = 6.3781e6 # in meter if 'Y_FIRST' in atr.keys(): [lat0, lat1] = coordinate(atr).yx2lalo([y0, y1], coord_type='y') lat_c = (lat0 + lat1) / 2. x_step = float(atr['X_STEP']) * np.pi / 180.0 * earth_radius * np.cos( lat_c * np.pi / 180) y_step = float(atr['Y_STEP']) * np.pi / 180.0 * earth_radius else: x_step = range_ground_resolution(atr) y_step = azimuth_ground_resolution(atr) dist_line = np.hypot((xs - x0) * x_step, (ys - y0) * y_step) # remove points in masked out areas mask = ~np.isnan(z_line) mask *= z_line != 0.0 # prepare output transect = {} transect['Y'] = ys[mask] transect['X'] = xs[mask] transect['value'] = z_line[mask] transect['distance'] = dist_line[mask] return transect
def get_euclidean_dist(a, b): dx = a[0] - b[0] dy = a[1] - b[1] dist = np.hypot(dx, dy) return dist
def sobel_filter(im): sx = sp.ndimage.sobel(im, axis=0, mode='constant') sy = sp.ndimage.sobel(im, axis=1, mode='constant') sob = np.hypot(sx, sy) return sob
# from mpl_toolkits.mplot3d import Axes3D # from matplotlib import cm # from matplotlib.ticker import LinearLocator, FormatStrFormatter import matplotlib.pyplot as plt import numpy as np fig = plt.figure() ax = fig.gca(projection='3d') n = 20 x, y = 8 * np.pi * (np.indices((n, n)) - n // 2) / n d = 0.5 z = np.sin(np.hypot(x, y - d)) + np.sin(np.hypot(x, y + d)) surf = ax.plot_surface(x, y, z, antialiased=True, color='#3BBDF7', linewidth=0) #, cmap=cm.coolwarm, antialiased=False) # Customize the z axis. # ax.set_zlim(0., 2.) # ax.zaxis.set_major_locator(LinearLocator(10)) # ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) # Add a color bar which maps values to colors. # fig.colorbar(surf, shrink=0.5, aspect=5) plt.show()
def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None, cmap=None, norm=None, arrowsize=1, arrowstyle='-|>', minlength=0.1, transform=None, zorder=None, start_points=None, maxlength=4.0, integration_direction='both'): """ Draw streamlines of a vector flow. Parameters ---------- x, y : 1D/2D arrays Evenly spaced strictly increasing arrays to make a grid. u, v : 2D arrays *x* and *y*-velocities. The number of rows and columns must match the length of *y* and *x*, respectively. density : float or (float, float) Controls the closeness of streamlines. When ``density = 1``, the domain is divided into a 30x30 grid. *density* linearly scales this grid. Each cell in the grid can have, at most, one traversing streamline. For different densities in each direction, use a tuple (density_x, density_y). linewidth : float or 2D array The width of the stream lines. With a 2D array the line width can be varied across the grid. The array must have the same shape as *u* and *v*. color : color or 2D array The streamline color. If given an array, its values are converted to colors using *cmap* and *norm*. The array must have the same shape as *u* and *v*. cmap : `~matplotlib.colors.Colormap` Colormap used to plot streamlines and arrows. This is only used if *color* is an array. norm : `~matplotlib.colors.Normalize` Normalize object used to scale luminance data to 0, 1. If ``None``, stretch (min, max) to (0, 1). This is only used if *color* is an array. arrowsize : float Scaling factor for the arrow size. arrowstyle : str Arrow style specification. See `~matplotlib.patches.FancyArrowPatch`. minlength : float Minimum length of streamline in axes coordinates. start_points : Nx2 array Coordinates of starting points for the streamlines in data coordinates (the same coordinates as the *x* and *y* arrays). zorder : int The zorder of the stream lines and arrows. Artists with lower zorder values are drawn first. maxlength : float Maximum length of streamline in axes coordinates. integration_direction : {'forward', 'backward', 'both'}, default: 'both' Integrate the streamline in forward, backward or both directions. Returns ------- StreamplotSet Container object with attributes - ``lines``: `.LineCollection` of streamlines - ``arrows``: `.PatchCollection` containing `.FancyArrowPatch` objects representing the arrows half-way along stream lines. This container will probably change in the future to allow changes to the colormap, alpha, etc. for both lines and arrows, but these changes should be backward compatible. """ grid = Grid(x, y) mask = StreamMask(density) dmap = DomainMap(grid, mask) if zorder is None: zorder = mlines.Line2D.zorder # default to data coordinates if transform is None: transform = axes.transData if color is None: color = axes._get_lines.get_next_color() if linewidth is None: linewidth = matplotlib.rcParams['lines.linewidth'] line_kw = {} arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize) _api.check_in_list(['both', 'forward', 'backward'], integration_direction=integration_direction) if integration_direction == 'both': maxlength /= 2. use_multicolor_lines = isinstance(color, np.ndarray) if use_multicolor_lines: if color.shape != grid.shape: raise ValueError("If 'color' is given, it must match the shape of " "'Grid(x, y)'") line_colors = [] color = np.ma.masked_invalid(color) else: line_kw['color'] = color arrow_kw['color'] = color if isinstance(linewidth, np.ndarray): if linewidth.shape != grid.shape: raise ValueError("If 'linewidth' is given, it must match the " "shape of 'Grid(x, y)'") line_kw['linewidth'] = [] else: line_kw['linewidth'] = linewidth arrow_kw['linewidth'] = linewidth line_kw['zorder'] = zorder arrow_kw['zorder'] = zorder # Sanity checks. if u.shape != grid.shape or v.shape != grid.shape: raise ValueError("'u' and 'v' must match the shape of 'Grid(x, y)'") u = np.ma.masked_invalid(u) v = np.ma.masked_invalid(v) integrate = get_integrator(u, v, dmap, minlength, maxlength, integration_direction) trajectories = [] if start_points is None: for xm, ym in _gen_starting_points(mask.shape): if mask[ym, xm] == 0: xg, yg = dmap.mask2grid(xm, ym) t = integrate(xg, yg) if t is not None: trajectories.append(t) else: sp2 = np.asanyarray(start_points, dtype=float).copy() # Check if start_points are outside the data boundaries for xs, ys in sp2: if not (grid.x_origin <= xs <= grid.x_origin + grid.width and grid.y_origin <= ys <= grid.y_origin + grid.height): raise ValueError("Starting point ({}, {}) outside of data " "boundaries".format(xs, ys)) # Convert start_points from data to array coords # Shift the seed points from the bottom left of the data so that # data2grid works properly. sp2[:, 0] -= grid.x_origin sp2[:, 1] -= grid.y_origin for xs, ys in sp2: xg, yg = dmap.data2grid(xs, ys) t = integrate(xg, yg) if t is not None: trajectories.append(t) if use_multicolor_lines: if norm is None: norm = mcolors.Normalize(color.min(), color.max()) if cmap is None: cmap = cm.get_cmap(matplotlib.rcParams['image.cmap']) else: cmap = cm.get_cmap(cmap) streamlines = [] arrows = [] for t in trajectories: tgx = np.array(t[0]) tgy = np.array(t[1]) # Rescale from grid-coordinates to data-coordinates. tx, ty = dmap.grid2data(*np.array(t)) tx += grid.x_origin ty += grid.y_origin points = np.transpose([tx, ty]).reshape(-1, 1, 2) streamlines.extend(np.hstack([points[:-1], points[1:]])) # Add arrows half way along each trajectory. s = np.cumsum(np.hypot(np.diff(tx), np.diff(ty))) n = np.searchsorted(s, s[-1] / 2.) arrow_tail = (tx[n], ty[n]) arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2])) if isinstance(linewidth, np.ndarray): line_widths = interpgrid(linewidth, tgx, tgy)[:-1] line_kw['linewidth'].extend(line_widths) arrow_kw['linewidth'] = line_widths[n] if use_multicolor_lines: color_values = interpgrid(color, tgx, tgy)[:-1] line_colors.append(color_values) arrow_kw['color'] = cmap(norm(color_values[n])) p = patches.FancyArrowPatch(arrow_tail, arrow_head, transform=transform, **arrow_kw) axes.add_patch(p) arrows.append(p) lc = mcollections.LineCollection(streamlines, transform=transform, **line_kw) lc.sticky_edges.x[:] = [grid.x_origin, grid.x_origin + grid.width] lc.sticky_edges.y[:] = [grid.y_origin, grid.y_origin + grid.height] if use_multicolor_lines: lc.set_array(np.ma.hstack(line_colors)) lc.set_cmap(cmap) lc.set_norm(norm) axes.add_collection(lc) axes.autoscale_view() ac = matplotlib.collections.PatchCollection(arrows) stream_container = StreamplotSet(lc, ac) return stream_container
def process_func(idx): # Load original image. orig_idx = fields['orig_idx'][idx] orig_file = fields['orig_file'][idx] orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file) img = PIL.Image.open(orig_path) # Choose oriented crop rectangle. lm = landmarks[orig_idx] eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5 mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5 eye_to_eye = lm[1] - lm[0] eye_to_mouth = mouth_avg - eye_avg x = eye_to_eye - rot90(eye_to_mouth) x /= np.hypot(*x) x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) y = rot90(x) c = eye_avg + eye_to_mouth * 0.1 quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) zoom = 1024 / (np.hypot(*x) * 2) # Shrink. shrink = int(np.floor(0.5 / zoom)) if shrink > 1: size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink))) img = img.resize(size, PIL.Image.ANTIALIAS) quad /= shrink zoom *= shrink # Crop. border = max(int(np.round(1024 * 0.1 / zoom)), 3) crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1])) if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: img = img.crop(crop) quad -= crop[0:2] # Simulate super-resolution. superres = int(np.exp2(np.ceil(np.log2(zoom)))) if superres > 1: img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS) quad *= superres zoom /= superres # Pad. pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0)) if max(pad) > border - 4: pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom))) img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') h, w, _ = img.shape y, x, _ = np.mgrid[:h, :w, :1] mask = 1.0 - np.minimum( np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]), np.minimum( np.float32(w - 1 - x) / pad[2], np.float32(h - 1 - y) / pad[3])) blur = 1024 * 0.02 / zoom img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) img += (np.median(img, axis=(0, 1)) - img) * np.clip( mask, 0.0, 1.0) img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB') quad += pad[0:2] # Transform. img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) img = img.resize((1024, 1024), PIL.Image.ANTIALIAS) img = np.asarray(img).transpose(2, 0, 1) # Verify MD5. md5 = hashlib.md5() md5.update(img.tobytes()) assert md5.hexdigest() == fields['proc_md5'][idx] # Load delta image and original JPG. with zipfile.ZipFile( os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip: delta_bytes = zip.read('delta%05d.dat' % idx) with open(orig_path, 'rb') as file: orig_bytes = file.read() # Decrypt delta image, using original JPG data as decryption key. algorithm = cryptography.hazmat.primitives.hashes.SHA256() backend = cryptography.hazmat.backends.default_backend() salt = bytes(orig_file, 'ascii') kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC( algorithm=algorithm, length=32, salt=salt, iterations=100000, backend=backend) key = base64.urlsafe_b64encode(kdf.derive(orig_bytes)) delta = np.frombuffer(bz2.decompress( cryptography.fernet.Fernet(key).decrypt(delta_bytes)), dtype=np.uint8).reshape(3, 1024, 1024) # Apply delta image. img = img + delta # Verify MD5. md5 = hashlib.md5() md5.update(img.tobytes()) assert md5.hexdigest() == fields['final_md5'][idx] return img
def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None, cmap=None, norm=None, arrowsize=1, arrowstyle='-|>', minlength=0.1, transform=None, zorder=None, start_points=None, maxlength=4.0, integration_direction='both'): """Draw streamlines of a vector flow. *x*, *y* : 1d arrays an *evenly spaced* grid. *u*, *v* : 2d arrays x and y-velocities. Number of rows should match length of y, and the number of columns should match x. *density* : float or 2-tuple Controls the closeness of streamlines. When `density = 1`, the domain is divided into a 30x30 grid---*density* linearly scales this grid. Each cell in the grid can have, at most, one traversing streamline. For different densities in each direction, use [density_x, density_y]. *linewidth* : numeric or 2d array vary linewidth when given a 2d array with the same shape as velocities. *color* : matplotlib color code, or 2d array Streamline color. When given an array with the same shape as velocities, *color* values are converted to colors using *cmap*. *cmap* : :class:`~matplotlib.colors.Colormap` Colormap used to plot streamlines and arrows. Only necessary when using an array input for *color*. *norm* : :class:`~matplotlib.colors.Normalize` Normalize object used to scale luminance data to 0, 1. If None, stretch (min, max) to (0, 1). Only necessary when *color* is an array. *arrowsize* : float Factor scale arrow size. *arrowstyle* : str Arrow style specification. See :class:`~matplotlib.patches.FancyArrowPatch`. *minlength* : float Minimum length of streamline in axes coordinates. *start_points*: Nx2 array Coordinates of starting points for the streamlines. In data coordinates, the same as the ``x`` and ``y`` arrays. *zorder* : int any number *maxlength* : float Maximum length of streamline in axes coordinates. *integration_direction* : ['forward', 'backward', 'both'] Integrate the streamline in forward, backward or both directions. Returns: *stream_container* : StreamplotSet Container object with attributes - lines: `matplotlib.collections.LineCollection` of streamlines - arrows: collection of `matplotlib.patches.FancyArrowPatch` objects representing arrows half-way along stream lines. This container will probably change in the future to allow changes to the colormap, alpha, etc. for both lines and arrows, but these changes should be backward compatible. """ grid = Grid(x, y) mask = StreamMask(density) dmap = DomainMap(grid, mask) if zorder is None: zorder = mlines.Line2D.zorder # default to data coordinates if transform is None: transform = axes.transData if color is None: color = axes._get_lines.get_next_color() if linewidth is None: linewidth = matplotlib.rcParams['lines.linewidth'] line_kw = {} arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize) if integration_direction not in ['both', 'forward', 'backward']: errstr = ("Integration direction '%s' not recognised. " "Expected 'both', 'forward' or 'backward'." % integration_direction) raise ValueError(errstr) if integration_direction == 'both': maxlength /= 2. use_multicolor_lines = isinstance(color, np.ndarray) if use_multicolor_lines: if color.shape != grid.shape: raise ValueError( "If 'color' is given, must have the shape of 'Grid(x,y)'") line_colors = [] color = np.ma.masked_invalid(color) else: line_kw['color'] = color arrow_kw['color'] = color if isinstance(linewidth, np.ndarray): if linewidth.shape != grid.shape: raise ValueError( "If 'linewidth' is given, must have the shape of 'Grid(x,y)'") line_kw['linewidth'] = [] else: line_kw['linewidth'] = linewidth arrow_kw['linewidth'] = linewidth line_kw['zorder'] = zorder arrow_kw['zorder'] = zorder ## Sanity checks. if u.shape != grid.shape or v.shape != grid.shape: raise ValueError("'u' and 'v' must be of shape 'Grid(x,y)'") u = np.ma.masked_invalid(u) v = np.ma.masked_invalid(v) integrate = get_integrator(u, v, dmap, minlength, maxlength, integration_direction) trajectories = [] if start_points is None: for xm, ym in _gen_starting_points(mask.shape): if mask[ym, xm] == 0: xg, yg = dmap.mask2grid(xm, ym) t = integrate(xg, yg) if t is not None: trajectories.append(t) else: sp2 = np.asanyarray(start_points, dtype=float).copy() # Check if start_points are outside the data boundaries for xs, ys in sp2: if not (grid.x_origin <= xs <= grid.x_origin + grid.width and grid.y_origin <= ys <= grid.y_origin + grid.height): raise ValueError("Starting point ({}, {}) outside of data " "boundaries".format(xs, ys)) # Convert start_points from data to array coords # Shift the seed points from the bottom left of the data so that # data2grid works properly. sp2[:, 0] -= grid.x_origin sp2[:, 1] -= grid.y_origin for xs, ys in sp2: xg, yg = dmap.data2grid(xs, ys) t = integrate(xg, yg) if t is not None: trajectories.append(t) if use_multicolor_lines: if norm is None: norm = mcolors.Normalize(color.min(), color.max()) if cmap is None: cmap = cm.get_cmap(matplotlib.rcParams['image.cmap']) else: cmap = cm.get_cmap(cmap) streamlines = [] arrows = [] for t in trajectories: tgx = np.array(t[0]) tgy = np.array(t[1]) # Rescale from grid-coordinates to data-coordinates. tx, ty = dmap.grid2data(*np.array(t)) tx += grid.x_origin ty += grid.y_origin points = np.transpose([tx, ty]).reshape(-1, 1, 2) streamlines.extend(np.hstack([points[:-1], points[1:]])) # Add arrows half way along each trajectory. s = np.cumsum(np.hypot(np.diff(tx), np.diff(ty))) n = np.searchsorted(s, s[-1] / 2.) arrow_tail = (tx[n], ty[n]) arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2])) if isinstance(linewidth, np.ndarray): line_widths = interpgrid(linewidth, tgx, tgy)[:-1] line_kw['linewidth'].extend(line_widths) arrow_kw['linewidth'] = line_widths[n] if use_multicolor_lines: color_values = interpgrid(color, tgx, tgy)[:-1] line_colors.append(color_values) arrow_kw['color'] = cmap(norm(color_values[n])) p = patches.FancyArrowPatch(arrow_tail, arrow_head, transform=transform, **arrow_kw) axes.add_patch(p) arrows.append(p) lc = mcollections.LineCollection(streamlines, transform=transform, **line_kw) lc.sticky_edges.x[:] = [grid.x_origin, grid.x_origin + grid.width] lc.sticky_edges.y[:] = [grid.y_origin, grid.y_origin + grid.height] if use_multicolor_lines: lc.set_array(np.ma.hstack(line_colors)) lc.set_cmap(cmap) lc.set_norm(norm) axes.add_collection(lc) axes.autoscale_view() ac = matplotlib.collections.PatchCollection(arrows) stream_container = StreamplotSet(lc, ac) return stream_container
from platescale_ss import Tpsfunc, Rps, Mps, Sps, Tps from petal_metrology import get_petal for petal_id in [4, #5, 6, 3, #8, 10, #11, 2, 7, #9 ]: print('Petal', petal_id) petal = get_petal(petal_id) g1x,g1y = petal.gfa_mm_to_focal_mm(petal.gfa.gif_1_mm_x, petal.gfa.gif_1_mm_y) g2x,g2y = petal.gfa_mm_to_focal_mm(petal.gfa.gif_2_mm_x, petal.gfa.gif_2_mm_y) print('Scatter in GIF1 fit positions: %.3f mm' % (np.mean(np.hypot(petal.gif1.x - g1x, petal.gif1.y - g1y)))) print('Scatter in GIF2 fit positions: %.3f mm' % (np.mean(np.hypot(petal.gif2.x - g2x, petal.gif2.y - g2y)))) # Now fit a SIP polynomial distortion model for how the echo22 optics projects onto GFA CCD pixels. # # Evaluate a grid of points in CCD space and in RA,Dec, using the metrology transformations to go from GFA CCD pixels # to focal plane coordinates, and from there to Theta and RA,Dec. x0 = min([min(petal.gfa.gif_1_pix_x), min(petal.gfa.gif_2_pix_x), 0]) - 100 y0 = min([min(petal.gfa.gif_1_pix_y), min(petal.gfa.gif_2_pix_y), 0]) - 100 x1 = max([max(petal.gfa.gif_1_pix_x), max(petal.gfa.gif_2_pix_x), petal.ccdw]) + 100 y1 = max([max(petal.gfa.gif_1_pix_y), max(petal.gfa.gif_2_pix_y), petal.ccdh]) + 100 ccdgridpx, ccdgridpy = np.meshgrid(np.linspace(x0, x1, 20), np.linspace(y0, y1, 20)) ccdgridpx = ccdgridpx.ravel() ccdgridpy = ccdgridpy.ravel()
def cost(self, from_node, to_node): a = from_node b = to_node v = (b[0] - a[0], b[1] - a[1]) return np.hypot(v[0], v[1])
# load image yuv_frame = Image.open("../images/output-161.png").convert("YCbCr") # creates array from the image frame_asArray = np.array(yuv_frame) # extract y chanel: y_frame_asArray = frame_asArray[:, :, Y] # creates image from the y chanel for display y_frame = Image.fromarray(y_frame_asArray, "L") # convert the array to displayable type y_frame_asArray = y_frame_asArray.astype('int32') # apply Sobel filter to the Y channel # apply sobel for x, y directions sobel_frame_x = ndimage.sobel(y_frame_asArray, axis=0, mode='constant') sobel_frame_y = ndimage.sobel(y_frame_asArray, axis=1, mode='constant') # combine the x, y deriatives sobel_frame = np.hypot(sobel_frame_x, sobel_frame_y) # plot the original image, the Y component, and the Sobel filtered Y component plt.subplot(1, 3, 1), plt.imshow(yuv_frame) plt.title('Original Frame'), plt.xticks([]), plt.yticks([]) plt.subplot(1, 3, 2), plt.imshow(y_frame) plt.title('Y channel of Frame'), plt.xticks([]), plt.yticks([]) plt.subplot(1, 3, 3), plt.imshow(sobel_frame, cmap='gray') plt.title('Sobel filtered Y channel of Frame'), plt.xticks([]), plt.yticks([]) plt.savefig("../figures/q1_b-c-scipy.png") plt.show()
def __init__(self, nbi=None, nshot=None, runid=''): # distances: cm; angles: rad # op: distance box-torus axis # phi_box: absolute angle of NBI box (does not matter in poloidal cross section) # inj_tor_box: toroidal angle between mid source and torus axis # inj: toroidal angle between source and box aligned # src_slit: distance ion source - aperture, for the center of the beamlines # src_hw: horizontal semi-displacement of sources # xybsca: source vertical elevation w.r.t. midplane # rabsca: source horizontal half width w.r.t. mid-point # RTCENA: R_tang # vert_incl: vertical inclination # XLBAPA: length = midpl1 / COS(vert_incl) # midpl2: Grid - P(RTCENA) projected on horizont. midplane # XLBTNA: length = midpl2 / COS(vert_incl) # XYBAPA: elevation at slit # Rectangular half aperture in point P self.rapedga = 16 self.xzpedga = 19 self.phi_box = np.array(4 * [np.radians(33.75)] + 4 * [np.radians(209)]) # Read from namelist import tr_path from parsenml import parsenml tr = tr_path.TR_PATH(runid) print('Namelist: %s' % tr.fnml) self.rtcena = parsenml(tr.fnml, 'RTCENA', fmt=5) self.xlbapa = parsenml(tr.fnml, 'XLBAPA', fmt=5) self.xybapa = parsenml(tr.fnml, 'XYBAPA', fmt=5) self.xybsca = parsenml(tr.fnml, 'XYBSCA', fmt=5) self.xlbtna = parsenml(tr.fnml, 'XLBTNA', fmt=5) self.xbzeta = parsenml(tr.fnml, 'XBZETA', fmt=5) self.src_slit = parsenml(tr.fnml, 'FOCLRA', fmt=5) self.ffulla = parsenml(tr.fnml, 'FFULLA', fmt=5) self.fhalfa = parsenml(tr.fnml, 'FHALFA', fmt=5) self.einja = parsenml(tr.fnml, 'EINJA', fmt=5) # Derived TRANSP variables self.theta_los = np.arcsin((self.xybapa - self.xybsca) / self.xlbapa) self.vert_incl = np.abs(self.theta_los) self.midpl2 = self.xlbtna * np.cos(self.vert_incl) self.midpl1 = self.xlbapa * np.cos(self.vert_incl) self.op = np.hypot((self.midpl2 - self.midpl1), self.rtcena) self.alpha = np.arcsin(self.rtcena / self.op) self.xsrc, self.ysrc, self.phi_los = tr2xy(self.rtcena, self.midpl2, \ self.alpha, self.phi_box) self.magic_angle = np.arccos(self.src_slit / self.midpl1) # missing sign self.rabsca = self.midpl1 * np.sin(self.magic_angle) # missing sign self.inj_tor_box = self.alpha - self.magic_angle
def f(x, y): s = np.hypot(x, y) phi = np.arctan2(y, x) tau = s + s * (1 - s) / 5 * np.sin(6 * phi) return 5 * (1 - tau) + tau
gc.collect() ## Make Gaia subset of useful objects: need_srcs = 3 useful_ids = [kk for kk,vv in gcounter.items() if vv>need_srcs] use_gaia = gm._srcdata[gm._srcdata.source_id.isin(useful_ids)] n_useful = len(use_gaia) sys.stderr.write("Found possible matches to %d of %d Gaia sources.\n" % (n_useful, len(gm._srcdata))) gc.collect() if n_useful < 5: sys.stderr.write("Gaia match error: found %d useful objects\n" % n_useful) sys.exit(1) ## Total Gaia-detected PM in surviving object set: use_gaia = use_gaia.assign(pmtot=np.hypot(use_gaia.pmra, use_gaia.pmdec)) gaia_pmsrt = use_gaia.sort_values(by='pmtot', ascending=False) #for nmin in range(100): # passing = [kk for kk,vv in gcounter.items() if vv>nmin] # nkept = len(passing) # sys.stderr.write("Kept %d sources for nmin=%d.\n" % (nkept, nmin)) ## Robust (non-double-counted) matching of Gaia sources using slimmed list: sys.stderr.write("Associating catalog objects with Gaia sources:\n") tik = time.time() gmatches = {x:[] for x in use_gaia.source_id} for ci,extcat in enumerate(cdata, 1): #for ci,extcat in enumerate(cdata[:10], 1): #sys.stderr.write("\n------------------------------\n") sys.stderr.write("\rChecking image %d of %d ... " % (ci, len(cdata)))