def gint(self, axis=-1): """ Area integral of a field over a given axis. """ # If axis size is one, return itself sh = self.data.shape if sh[axis] == 1: return self r = 6380e3 X = sp.deg2rad(self.grid['lon']) Y = sp.deg2rad(self.grid['lat']) X *= sp.cos(Y) x=[dates.date2num(self.time),self.grid['lev'],\ r*Y[sp.newaxis,sp.newaxis,:],r*X[sp.newaxis,sp.newaxis,:]] y = self.data.view(sp.ma.MaskedArray) # New dimensions newsh = list(sh) newsh[axis] = 1 # New grid and time ind = [slice(None)] * 4 ind[axis] = slice(0, 1) time = self.time[ind[0]] g = self.grid.subset(kind=ind[1], jind=ind[2], iind=ind[3]) data = utl.gint(y, x[axis], axis) data = data.reshape(newsh) return Field(data, time, g, self.name)
def test_calc_kernel_f2_plot(self): f2 = self.brdf.calc_kernel_f2(scipy.deg2rad(45), self.sensor_zenith, self.relative_azimuth) pylab.plot(self.relative_azimuth, f2) f2 = self.brdf.calc_kernel_f2(scipy.deg2rad(30), self.sensor_zenith, self.relative_azimuth) pylab.plot(self.relative_azimuth, f2) f2 = self.brdf.calc_kernel_f2(scipy.deg2rad(60), self.sensor_zenith, self.relative_azimuth) pylab.plot(self.relative_azimuth, f2) pylab.show()
def Hillshade_Smooth(RasterData, altitude, azimuth, z_factor): """Plots a Hillshade a la LSDRaster""" zenith_rad = sp.deg2rad(altitude) azimuth_rad = sp.deg2rad(azimuth)
def getangle(A, B): """ When A and B are two angles around the clock returns an angle of the line that is connecting them. """ x = array([cos(deg2rad(A)), sin(deg2rad(A))]) y = array([cos(deg2rad(B)), sin(deg2rad(B))]) d = y - x return rad2deg(math.atan2(d[1], d[0]))
def cylinders(shape: List[int], radius: int, nfibers: int, phi_max: float = 0, theta_max: float = 90): r""" Generates a binary image of overlapping cylinders. This is a good approximation of a fibrous mat. Parameters ---------- phi_max : scalar A value between 0 and 90 that controls the amount that the fibers lie out of the XY plane, with 0 meaning all fibers lie in the XY plane, and 90 meaning that fibers are randomly oriented out of the plane by as much as +/- 90 degrees. theta_max : scalar A value between 0 and 90 that controls the amount rotation in the XY plane, with 0 meaning all fibers point in the X-direction, and 90 meaning they are randomly rotated about the Z axis by as much as +/- 90 degrees. Returns ------- image : ND-array A boolean array with ``True`` values denoting the pore space """ shape = sp.array(shape) if sp.size(shape) == 1: shape = sp.full((3, ), int(shape)) elif sp.size(shape) == 2: raise Exception("2D fibers don't make sense") im = sp.zeros(shape) R = sp.sqrt(sp.sum(sp.square(shape))) n = 0 while n < nfibers: x = sp.rand(3) * shape phi = sp.deg2rad(90 + 90 * (0.5 - sp.rand()) * phi_max / 90) theta = sp.deg2rad(180 - 90 * (0.5 - sp.rand()) * 2 * theta_max / 90) X0 = R * sp.array([ sp.sin(theta) * sp.cos(phi), sp.sin(theta) * sp.sin(phi), sp.cos(theta) ]) [X0, X1] = [X0 + x, -X0 + x] crds = line_segment(X0, X1) lower = ~sp.any(sp.vstack(crds).T < [0, 0, 0], axis=1) upper = ~sp.any(sp.vstack(crds).T >= shape, axis=1) valid = upper * lower if sp.any(valid): im[crds[0][valid], crds[1][valid], crds[2][valid]] = 1 n += 1 im = sp.array(im, dtype=bool) dt = spim.distance_transform_edt(~im) < radius return ~dt
def decodeMessageSensorUDP(self, msg): """ This is used to decode message from sensorUDP application from the android market. The orientation field was first used, but its conventions were unclear. So now acceleration and magnetic vectors should be used""" data = msg.split(', ') if data[0]=='G': # This is GPS message time = decimalstr2float(data[2]) latitude_deg = decimalstr2float(data[3]) longitude_deg = decimalstr2float(data[4]) altitude = decimalstr2float(data[5]) hdop = decimalstr2float(data[7]) # Horizontal dilution of precision vdop = decimalstr2float(data[8]) # Vertical dilution of precision print time, latitude_deg, longitude_deg, altitude, hdop, vdop if data[0]=='O': # \note This is no more used as orientation convention were unclear # 'O, 146, 1366575961732, 230,1182404, -075,2031250, 001,7968750' [ u, u, # data not used \ heading_deg, # pointing direction of top of phone \ roll_deg, # around horizontal axis, positive clockwise [-180:180] \ pitch_deg] = decimalstr2float(data[1:]) # around vertical axis [_90:90] elevation_deg = -sp.rad2deg(sp.arctan2( \ sp.cos(sp.deg2rad(pitch_deg))*sp.cos(sp.deg2rad(roll_deg)), \ sp.sqrt(1+sp.cos(sp.deg2rad(roll_deg))**2*(sp.sin(sp.deg2rad(pitch_deg))**2-1)))) #positive up inclinaison_deg = pitch_deg #positive clockwise print heading_deg, roll_deg, pitch_deg, elevation_deg, inclinaison_deg if data[0] == 'A': # Accelerometer data # Index and sign are adjusted to obtain x through the screen, and z down deltaT = decimalstr2float(data[2])/1000 - self.time_acceleration if self.filterTimeConstant == 0.0: alpha = 1 else: alpha = 1-sp.exp(-deltaT/self.filterTimeConstant) self.time_acceleration = decimalstr2float(data[2])/1000 self.acceleration_raw[0] = decimalstr2float(data[3]) self.acceleration_raw[1] = decimalstr2float(data[4]) self.acceleration_raw[2] = decimalstr2float(data[5]) # Filter the data self.acceleration_filtered +=alpha*(sp.array(self.acceleration_raw)-self.acceleration_filtered) if data[0] == 'M': # Magnetometer data # Index and sign are adjusted to obtain x through the screen, and z down deltaT = decimalstr2float(data[2])/1000-self.time_magnetic if self.filterTimeConstant == 0.0: alpha = 1 else: alpha = 1-sp.exp(-deltaT/self.filterTimeConstant) self.time_magnetic = decimalstr2float(data[2])/1000 self.magnetic_raw[0] = decimalstr2float(data[3]) self.magnetic_raw[1] = decimalstr2float(data[4]) self.magnetic_raw[2] = -decimalstr2float(data[5])# Adapt to a bug in sensorUDP? # Filter the data self.magnetic_filtered += alpha*(sp.array(self.magnetic_raw)-self.magnetic_filtered)
def setUp(self): self.test_file = '/home/marrabld/projects/DIMITRI_2.0/Input/Site_Libya4/MERIS/Proc_3rd_Reprocessing/MERIS_TOA_REF.dat' tmp_dict = libdimitripy.ingest.DimitriFiles.read_dimitri_sav_file(self.test_file, 'MERIS') # Test that we can use the dictionary to make a DimitriObject self.test_object = libdimitripy.base.DimitriObject(tmp_dict) self.test_band = 4 self.brdf = libdimitripy.brdf.RoujeanBRDF() self.relative_azimuth = scipy.deg2rad(scipy.linspace(-90, 90, 180)) self.sensor_zenith = scipy.deg2rad(30)
def geoidheight(lat, lon): """ Calculate geoid height using the EGM96 Geopotential Model. Parameters ---------- lat : array_like Lateral coordinates [degrees]. Values must be -90 <= lat <= 90. lon : array_like Longitudinal coordinates [degrees]. Values must be 0 <= lon <= 360. Returns ------- out : array_like Geoidheight [meters] Examples -------- >>> geoidheight(30, 20) 25.829999999999995 >>> geoidheight([30, 20],[40, 20]) [9.800000000000002, 14.43] """ global _EGM96 #convert the input value to array itype, lat = to_ndarray(lat) itype, lon = to_ndarray(lon) if lat.shape != lon.shape: raise Exception("Inputs must contain equal number of values.") if (lat < -90).any() or (lat > 90).any() or not sp.isreal(lat).all(): raise Exception("Lateral coordinates must be real numbers" \ " between -90 and 90 degrees.") if (lon < 0).any() or (lon > 360).any() or not sp.isreal(lon).all(): raise Exception("Longitudinal coordinates must be real numbers" \ " between 0 and 360 degrees.") #if the model is not loaded, do so if _EGM96 is None: _EGM96 = _loadEGM96() #shift lateral values to the right reference and flatten coordinates lats = sp.deg2rad(-lat + 90).ravel() lons = sp.deg2rad(lon).ravel() #evaluate the spline and reshape the result evl = _EGM96.ev(lats, lons).reshape(lat.shape) return from_ndarray(itype, evl)
def geoidheight(lat, lon): """ Calculate geoid height using the EGM96 Geopotential Model. Parameters ---------- lat : array_like Lateral coordinates [degrees]. Values must be -90 <= lat <= 90. lon : array_like Longitudinal coordinates [degrees]. Values must be 0 <= lon <= 360. Returns ------- out : array_like Geoidheight [meters] Examples -------- >>> geoidheight(30, 20) 25.829999999999995 >>> geoidheight([30, 20],[40, 20]) [9.800000000000002, 14.43] """ global _EGM96 # convert the input value to array itype, lat = to_ndarray(lat) itype, lon = to_ndarray(lon) if lat.shape != lon.shape: raise AerotbxValueError("Inputs must contain equal number of values.") if (lat < -90).any() or (lat > 90).any() or not sp.isreal(lat).all(): raise AerotbxValueError("Lateral coordinates must be real numbers " "between -90 and 90 degrees.") if (lon < 0).any() or (lon > 360).any() or not sp.isreal(lon).all(): raise AerotbxValueError("Longitudinal coordinates must be real numbers " "between 0 and 360 degrees.") # if the model is not loaded, do so if _EGM96 is None: _EGM96 = _loadEGM96() # shift lateral values to the right reference and flatten coordinates lats = sp.deg2rad(-lat + 90).ravel() lons = sp.deg2rad(lon).ravel() # evaluate the spline and reshape the result evl = _EGM96.ev(lats, lons).reshape(lat.shape) return from_ndarray(itype, evl)
def geodetic2ecef(lat, lon, alt, degrees=True): """geodetic2ecef(lat, lon, alt) [deg][deg][m] Convert geodetic coordinates to ECEF.""" if degrees: lat = deg2rad(lat) lon = deg2rad(lon) #lat, lon = radians(lat), radians(lon) xi = sqrt(1 - esq * sin(lat)) x = (a / xi + alt) * cos(lat) * cos(lon) y = (a / xi + alt) * cos(lat) * sin(lon) z = (a / xi * (1 - esq) + alt) * sin(lat) return x, y, z
def geodetic2ecef(lat, lon, alt, degrees=True): """geodetic2ecef(lat, lon, alt) [deg][deg][m] Convert geodetic coordinates to ECEF.""" if degrees: lat=deg2rad(lat) lon=deg2rad(lon) #lat, lon = radians(lat), radians(lon) xi = sqrt(1 - esq * sin(lat)) x = (a / xi + alt) * cos(lat) * cos(lon) y = (a / xi + alt) * cos(lat) * sin(lon) z = (a / xi * (1 - esq) + alt) * sin(lat) return x, y, z
def callback(msg): pcloud = PointCloud2() pcloud.header.frame_id = '/radar' cloud = [] for ii in range(0, 64): x = msg.range[ii] * cos(deg2rad(msg.angle[ii])) y = msg.range[ii] * sin(deg2rad(msg.angle[ii])) z = 0.0 cloud.append([x, y, z]) pcloud = pc2.create_cloud_xyz32(pcloud.header, cloud) pc_pub.publish(pcloud)
def callback(msg): pcloud = PointCloud2() pcloud.header.frame_id = '/radar' cloud = [] for ii in range(0,64): x = msg.range[ii] * cos(deg2rad(msg.angle[ii])) y = msg.range[ii] * sin(deg2rad(msg.angle[ii])) z = 0.0 cloud.append([x,y,z]) pcloud = pc2.create_cloud_xyz32(pcloud.header, cloud) pc_pub.publish(pcloud)
def pca(self, keep=None, center=False, weight=True): ''' Performss principal component analysis on data field, and stores a PCA object. Please, remove climatology, detrend data etc before calling this method. If center=True, PCA object will center data using mean and standard deviation. If weight=True, multiply data by area weights. ''' nt, km, jm, im = self.data.shape # multiply data by area factor, reshape, return matrix if weight: factor = sp.cos(sp.deg2rad(self.grid['lat'])) factor[factor < 0.] = 0. factor = sp.sqrt(factor) else: factor = sp.ones(self.grid['lat'].shape) mask = sp.ma.getmaskarray(self.data).copy() self.data[mask] = 0.0 self.data *= factor[sp.newaxis, sp.newaxis] X = self.data.reshape((nt, km * jm * im)).view(sp.ndarray) self._pc = pca.PCA(X, center=center, keep=keep) self.data /= factor[sp.newaxis, sp.newaxis] self.data[mask] = self.data.fill_value self.data.mask = mask
def gradient_patchset(patchset,**kwargs): ax = patchset[0].axes extremes = [ rect.get_extents().transformed(ax.transData.inverted()).extents for rect in patchset] xmin_etr = min( i[0] for i in extremes ) xmax_etr = max( i[2] for i in extremes ) ymin_etr = min( i[1] for i in extremes ) ymax_etr = max( i[3] for i in extremes ) direction = multiget(kwargs,['direction','dir'],0) try: dir2rad = scipy.deg2rad(1.*direction) xmean = (xmax_etr+xmin_etr)/2. ymean = (ymax_etr+ymin_etr)/2. xampl = (xmax_etr-xmin_etr)/2. yampl = (ymax_etr-ymin_etr)/2. dir_func = lambda x,y: ((x-xmean)/xampl)*np.cos(dir2rad) + ((y-ymean)/yampl)*np.sin(dir2rad) except TypeError: dir_func = direction yy,xx = np.ogrid[ymin_etr:ymax_etr:101j,xmin_etr:xmax_etr:101j] z = dir_func(xx,yy) cmin,cmax = np.min(z),np.max(z) kwargs.update(direction=dir_func,cmax=cmax,cmin=cmin) imgs = [] for (xmin,ymin,xmax,ymax),rect in zip(extremes,patchset): im = patch_gradient(rect,xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax,**kwargs) imgs.append(im) return imgs
def rotate(self, roll=0, tilt=0, yaw=0): """ Rotate the scatterer in space. Parameters ---------- roll, tilt, yaw : float Angles (in degrees) by which to rotate the scatterer. The rotations are applied in order (roll, then tilt, then yaw). """ tilt, roll, yaw = sp.deg2rad(tilt), sp.deg2rad(roll), sp.deg2rad(yaw) Rx = sp.matrix([[1, 0, 0], [0, sp.cos(roll), -sp.sin(roll)], [0, sp.sin(roll), sp.cos(roll)]]) Ry = sp.matrix([[sp.cos(tilt), 0, sp.sin(tilt)], [0, 1, 0], [-sp.sin(tilt), 0, sp.cos(tilt)]]) Rz = sp.matrix([[sp.cos(yaw), -sp.sin(yaw), 0], [sp.sin(yaw), sp.cos(yaw), 0], [0, 0, 1]]) R = Rz * Ry * Rx self.cum_rotation = R * self.cum_rotation for i in range(self.r.shape[1]): self.r[:, i] = R * self.r[:, i]
def test_healpix_sphere(self): # Sphere parameters. R = 5 # Expected outputs of healpix_sphere() applied to inputs. if RADIANS: sigma_a = sqrt(3 - 3 * sin(a[1])) else: sigma_a = sqrt(3 - 3 * sin(deg2rad(a[1]))) ha = (pi / 4 * (1 - sigma_a), pi / 4 * (2 - sigma_a)) hb = (ha[0], -ha[1]) healpix_sphere_outputs = [(0, 0), (0, pi / 4), (0, -pi / 4), (pi / 2, 0), (-pi / 2, 0), (-pi, 0), (-3 * pi / 4, pi / 2), (-3 * pi / 4, -pi / 2), ha, hb] healpix_sphere_outputs = [ tuple(R * array(p)) for p in healpix_sphere_outputs ] # Forward projection should be correct on test points. f = Proj(proj='healpix', R=R) given = inputs get = [f(*p, radians=RADIANS) for p in given] expect = healpix_sphere_outputs # Fuzz to allow for rounding errors: error = 1e-12 print() print('=' * 80) print('HEALPix forward projection, sphere with radius R = %s' % R) print('input (radians) / expected output (meters) / received output') print('=' * 80) for i in range(len(get)): print(given[i], expect[i], get[i]) self.assertTrue(rel_err(get[i], expect[i]) < error) # Inverse of projection of a point p should yield p. given = get get = [f(*q, radians=RADIANS, inverse=True) for q in given] expect = inputs print('=' * 80) print('HEALPix inverse projection, sphere with radius R = %s' % R) print('input (meters) / expected output (radians) / received output') print('=' * 80) for i in range(len(get)): print(given[i], expect[i], get[i]) self.assertTrue(rel_err(get[i], expect[i]) < error) # Inverse projection of p below should return longitude of -pi. # Previously, it was returning a number slightly less than pi # because of a rounding error, which got magnified by # wrap_longitude() p = R * array((-7 * pi / 8, 3 * pi / 8)) get = f(*p, radians=RADIANS, inverse=True) p1 = arcsin(1 - 1.0 / 12) if not RADIANS: p1 = rad2deg(p1) expect = (-PI, p1) self.assertTrue(rel_err(get, expect) < error)
def plot_angular_distribution(): """ Plot the distribution of quasars in ra and dec for real data and for simulated data """ ra_cen = 213.704 dec_cen = 53.083 # real and simulated data real_file = helion_path + 'lya_forest/data/data_delta_transmission_RMplate.fits' # real_file = helion_path + 'lya_forest/saclay/v4.4/v4.4.0_delta_transmission_RM.fits.gz' simul_file = helion_path + 'lya_forest/london/v6.0/v6.0.0_delta_transmission_RM.fits.gz' # simul_file = helion_path + 'lya_forest/saclay/v4.4/v4.4.0_delta_transmission_RM.fits.gz' rd = tau_class.TauClass(real_file) sd = tau_class.TauClass(simul_file) tb = fitsio.read(helion_path + 'picca_run_delta/Catalogs/RM-qso.fits', 1) fig, ax = plt.subplots(1, figsize=(7, 6.7)) stretch = sp.cos(sp.deg2rad(dec_cen)) ax.scatter((tb['RA'] - ra_cen) * stretch, tb['DEC'] - dec_cen, marker='+', c='k', alpha=0.6, label=r'$\mathrm{All\ RM\ QSOs}$') ax.scatter((rd.q_loc[:, 0] - ra_cen) * stretch, rd.q_loc[:, 1] - dec_cen, marker='o', c='r', alpha=0.6, label=r'$\mathrm{QSOs\ used}$') ax.scatter((sd.q_loc[:, 0] - ra_cen) * stretch, sd.q_loc[:, 1] - dec_cen, marker='o', c='g', alpha=0.6, label=r'$\mathrm{Simulation\ 0}$') circ = Circle([0, 0], 1.5, facecolor='none', edgecolor='k') ax.add_patch(circ) title_str = r'$\mathrm{RA}_{cen} = %.2f\ \mathrm{deg},' + \ r'\mathrm{DEC}_{cen} = %.2f\ \mathrm{deg}$' plt.title(title_str % (ra_cen, dec_cen)) plt.xlabel(r'$\mathrm{RA\ offset\ [deg]}$') plt.ylabel(r'$\mathrm{DEC\ offset\ [deg]}$') plt.legend(loc='upper right') plt.tight_layout() # plt.savefig('ra_dec_dist.pdf') plt.show()
def make_initial_catalogue(path): from pywindow.catalogue import sky_to_cartesian rng = scipy.random.RandomState(seed=42) rmin, rmax = 2000., 3000. size = 100000 catalogue = Catalogue() distance = rng.uniform(2000., 3000., size=size) ramin, ramax, decmin, decmax = 0., 30., -15, 15. u1, u2 = rng.uniform(size=(2, size)) cmin = scipy.sin(scipy.deg2rad(decmin)) cmax = scipy.sin(scipy.deg2rad(decmax)) ra = ramin + u1 * (ramax - ramin) dec = 90. - scipy.rad2deg(scipy.arccos(cmin + u2 * (cmax - cmin))) catalogue['Position'] = sky_to_cartesian(distance, ra, dec) catalogue['Weight'] = catalogue.ones() decfrac = scipy.diff(scipy.sin(scipy.deg2rad([decmin, decmax])), axis=0) rafrac = scipy.diff(scipy.deg2rad([ramin, ramax]), axis=0) area = decfrac * rafrac catalogue['NZ'] = catalogue['Weight'].sum() / (area * (rmax - rmin)) / distance**2 catalogue.to_fits(path)
def get_angles(jsonresults): """ Extract the viewing angle information from the JSON object returned from the database :param jsonresults: JSON formatted string result from database query :returns: sun zenith angle, sensor zenith angle, relative azimuth angle (in radians) """ angles = {} for angle in ('SZA', 'SAA', 'VZA', 'VAA'): angles[angle] = np.array([entry[angle] for entry in jsonresults]) sun_zenith = scipy.deg2rad(angles['SZA']) sensor_zenith = scipy.deg2rad(angles['VZA']) # Relative azimuth angle, should be in range 0-180 relative_azimuth = np.abs(scipy.deg2rad(angles['SAA']) - scipy.deg2rad(angles['VAA'])) fix = relative_azimuth > scipy.pi relative_azimuth[fix] = 2*scipy.pi - relative_azimuth[fix] return sun_zenith, sensor_zenith, relative_azimuth
def Rotate_Face(tilt): """ ########################################################## # Rotate_Face # # ################ # # Inputs: N/A # # # # Outputs: R_GF (Trans. matrix from Geodetic to Face) # # R_FG (Trans. matrix from Face to Geodetic) # # # # Summary: This helper function defines a transformation# # matrix to convert x,y,z coordinates in face # # relative coordinates to geodetic relative # # x,y,z coordinates, or vice versa. # # # ########################################################## """ AZ_ROT = scipy.deg2rad(tilt[1]) E_TILT = scipy.deg2rad(tilt[0]) #R_FG1 describes step 1 (transform xyz geodetic to a new xyz) R_FG1 = scipy.array([[scipy.cos(AZ_ROT), scipy.sin(AZ_ROT), 0], [-scipy.sin(AZ_ROT), scipy.cos(AZ_ROT), 0], [0, 0, 1]]) #R_FG2 describes step 2 (transform new xyz to xyz face relative) R_FG2 = scipy.array([[scipy.cos(E_TILT), 0, scipy.sin(E_TILT)], [0, 1, 0], [-scipy.sin(E_TILT), 0, scipy.cos(E_TILT)]]) #R_FG is the total transformation R_FG = R_FG1.dot(R_FG2) #R_FG is the inverse of the R_GF transformation and # defines a face to geodetic transformation R_GF = scipy.linalg.inv(R_FG) return R_GF, R_FG
def rotation_transform_center(image, angle, center_xy=None): """ This function returns the transformation matrix for a rotation of a given image for a given angle around a given center The operation is implemented by the following steps to avoid unwanted translational side effects: 1.) Translate the image to the rotation center 2.) Rotating the image for the given angle 3.) Translate the image back to its original translatory position image...ndarray angle...angle in degree center_xy...coordinates of rotation in image coordinates """ #If no rotation center is defined, set the center of the image as center if center_xy is None: cols, rows = image.shape[:2] center_xy = sp.array((rows, cols)) / 2. - 0.5 #Calculate transformation matrices tform1 = transform.AffineTransform(translation=-center_xy) tform2 = transform.AffineTransform(rotation=sp.deg2rad(angle)) tform3 = transform.AffineTransform(translation=center_xy) #Return transformation matrix return tform1 + tform2 + tform3
def get_avg_sep(): real = tau_class.TauClass('../../data/delta-2.fits.gz') real.get_data(skewers_perc=1., ylabel='DELTA') avg_r = real.pixel_data[:, 2].mean() print("Average distance is {}".format(avg_r)) ra, dec = real.q_loc[:, 0], real.q_loc[:, 1] delta_ra = sp.abs(ra - ra[:, None]) delta_dec = sp.abs(dec - dec[:, None]) angle = 2 * sp.arcsin( sp.sqrt( sp.sin(delta_dec / 2.)**2 + sp.cos(dec) * sp.cos(dec[:, None]) * sp.sin(delta_ra / 2.)**2)) # remove self-distances sp.fill_diagonal(angle, sp.inf) min_angle = angle.min(0) * 180 / sp.pi avg_angle = min_angle.mean() p_cmd = "Average distance is {} h^-1 Mpc \n".format(avg_r) p_cmd += "Average angle is {} degree \n".format(avg_angle) avg_sep = avg_angle * avg_r * sp.pi / 180 p_cmd += "Average transverse separation is {} h^-1 Mpc \n".format(avg_sep) print(p_cmd) # ************************************************************************* simul = tau_class.TauClass('../../data/simulations/' 'v6.0.1_delta_transmission_RMplate.fits') simul.get_data(skewers_perc=1.) avg_r = simul.pixel_data[:, 2].mean() print("Average distance is {}".format(avg_r)) ra, dec = sp.deg2rad(simul.q_loc[:, 0]), sp.deg2rad(simul.q_loc[:, 1]) delta_ra = sp.abs(ra - ra[:, None]) delta_dec = sp.abs(dec - dec[:, None]) angle = 2 * sp.arcsin( sp.sqrt( sp.sin(delta_dec / 2.)**2 + sp.cos(dec) * sp.cos(dec[:, None]) * sp.sin(delta_ra / 2.)**2)) # remove self-distances sp.fill_diagonal(angle, sp.inf) min_angle = angle.min(0) * 180 / sp.pi avg_angle = min_angle.mean() p_cmd = "Average distance is {} h^-1 Mpc \n".format(avg_r) p_cmd += "Average angle is {} degree \n".format(avg_angle) avg_sep = avg_angle * avg_r * sp.pi / 180 p_cmd += "Average transverse separation is {} h^-1 Mpc \n".format(avg_sep) print(p_cmd)
def test_healpix_sphere(self): # Sphere parameters. R = 5 # Expected outputs of healpix_sphere() applied to inputs. if RADIANS: sigma_a = sqrt(3 - 3*sin(a[1])) else: sigma_a = sqrt(3 - 3*sin(deg2rad(a[1]))) ha = (pi/4*(1 - sigma_a), pi/4*(2 - sigma_a)) hb = (ha[0], -ha[1]) healpix_sphere_outputs = [ (0, 0), (0, pi/4), (0, -pi/4), (pi/2, 0), (-pi/2, 0), (-pi, 0), (-3*pi/4, pi/2), (-3*pi/4, -pi/2), ha, hb ] healpix_sphere_outputs = [tuple(R*array(p)) for p in healpix_sphere_outputs] # Forward projection should be correct on test points. f = Proj(proj='healpix', R=R) given = inputs get = [f(*p, radians=RADIANS) for p in given] expect = healpix_sphere_outputs # Fuzz to allow for rounding errors: error = 1e-12 print() print('='*80) print('HEALPix forward projection, sphere with radius R = %s' % R) print('input (radians) / expected output (meters) / received output') print('='*80) for i in range(len(get)): print(given[i], expect[i], get[i]) self.assertTrue(rel_err(get[i], expect[i]) < error) # Inverse of projection of a point p should yield p. given = get get = [f(*q, radians=RADIANS, inverse=True) for q in given] expect = inputs print('='*80) print('HEALPix inverse projection, sphere with radius R = %s' % R) print('input (meters) / expected output (radians) / received output') print('='*80) for i in range(len(get)): print(given[i], expect[i], get[i]) self.assertTrue(rel_err(get[i], expect[i]) < error) # Inverse projection of p below should return longitude of -pi. # Previously, it was returning a number slightly less than pi # because of a rounding error, which got magnified by # wrap_longitude() p = R*array((-7*pi/8, 3*pi/8)) get = f(*p, radians=RADIANS, inverse=True) p1 = arcsin(1 - 1.0/12) if not RADIANS: p1 = rad2deg(p1) expect = (-PI, p1) self.assertTrue(rel_err(get, expect) < error)
def flowprandtlmeyer(**flow): """ Prandtl-Meyer function for expansion waves. This function accepts a given set of specific heat ratios and an input of either Mach number, Mach angle or Prandtl-Meyer angle. Inputs can be a single scalar or an array_like data structure. Parameters ---------- gamma : array_like, optional Specific heat ratio. Values must be greater than 1. M : array_like Mach number. Values must be greater than or equal to 1. nu : array_like Prandtl-Meyer angle [degrees]. Values must be 0 <= M <= 90*(sqrt((g+1)/(g-1))-1). mu : array_like Mach angle [degrees]. Values must be 0 <= M <= 90. Returns ------- out : (M, nu, mu) Tuple of Mach number, Prandtl-Meyer angle, Mach angle. Examples -------- >>> flowprandtlmeyer(M=5) (5.0, 76.920215508538789, 11.536959032815489) """ #parse the input gamma, flow, mtype, itype = _flowinput(flow) #calculate gamma-ratios for use in the equations l = sp.sqrt((gamma-1)/(gamma+1)) #preshape mach array M = sp.empty(flow.shape, sp.float64) #use prandtl-meyer relation to solve for the mach number if mtype in ["mach", "m"]: if (flow < 1).any(): raise Exception("Mach number inputs must be real numbers greater" \ " than or equal to 1.") M = flow elif mtype in ["mu", "machangle"]: if (flow < 0).any() or (flow > 90).any(): raise Exception("Mach angle inputs must be real numbers" \ " 0 <= M <= 90.") M = 1 / sp.sin(sp.deg2rad(flow)) elif mtype in ["nu", "pm", "pmangle"]: if (flow < 0).any() or (flow > 90*((1/l)-1)).any(): raise Exception("Prandtl-Meyer angle inputs must be real" \ " numbers 0 <= M <= 90*(sqrt((g+1)/(g-1))-1).") M[:] = 2 #initial guess for the solution for _ in xrange(_AETB_iternum): b = sp.sqrt(M**2 - 1) f = -sp.deg2rad(flow) + (1/l) * sp.arctan(l*b) - sp.arctan(b) g = b*(1 - l**2) / (M*(1 + (l**2)*(b**2))) #derivative M = M - (f / g) #Newton-Raphson else: raise Exception("Keyword input must be an acceptable string to" \ " select input parameter.") #normal shock relations b = sp.sqrt(M**2 - 1) V = (1/l) * sp.arctan(l*b) - sp.arctan(b) U = sp.arcsin(1 / M) return from_ndarray(itype, M, sp.rad2deg(V), sp.rad2deg(U))
def test_calc_kernel_f1(self): f1 = self.brdf.calc_kernel_f1(scipy.deg2rad(self.test_object.sun_zenith), scipy.deg2rad(self.test_object.sensor_zenith), scipy.deg2rad(self.test_object.relative_azimuth())) self.assertIsInstance(f1, scipy.ndarray)
def cal_qx(self, tth, th, energy): return 2.0 * sp.pi / self.cal_wave(energy) * ( sp.cos(sp.deg2rad((tth - th))) - sp.cos(sp.deg2rad(th)))
def patch_gradient(patch, direction = lambda x,y: x, **kwargs): """ take a patch and apply a gradient to it. :patch: the patch to be decorated :direction: if a number, indicates the direction of the linear gradient, otherway it should be a callable the function take several optional arguments: colormap: the colormap used for the gradient [any valid colormap, default cm.jet] BUG: if the clipping patch is not a rectangle the alpha value get lost """ ax = plt.gca() #loading of the default keywords colormap = multiget(kwargs,['colormap','cmap','cm'],cm.jet) colormap = plt.get_cmap(colormap) resolution = multiget(kwargs,['resolution','res'],101j) alpha = multiget(kwargs,['alpha'],1) x_min = multiget(kwargs,['x_min','xmin'],-1) x_max = multiget(kwargs,['x_max','xmax'],1) y_min = multiget(kwargs,['y_min','ymin'],-1) y_max = multiget(kwargs,['y_max','ymax'],1) c_min = multiget(kwargs,['c_min','cmin'],None) c_max = multiget(kwargs,['c_max','cmax'],None) edgecolor = multiget(kwargs,['edgecolor','ec'],None) linestyle = multiget(kwargs,['linestyle','ls'],None) linewidth = multiget(kwargs,['linewidth','lw'],None) #set the function of the gradient try: dir2rad = scipy.deg2rad(1.*direction) xmean = (x_max+x_min)/2. ymean = (y_max+y_min)/2. xampl = (x_max-x_min)/2. yampl = (y_max-y_min)/2. dir_func = lambda x,y: ((x-xmean)/xampl)*np.cos(dir2rad) + ((y-ymean)/yampl)*np.sin(dir2rad) except TypeError: dir_func = direction #get the extent of the patch extent = patch.get_extents().transformed(ax.transData.inverted()).extents extent[1],extent[2] = extent[2],extent[1] #create the grid on which the function will be evaluated yy,xx = np.ogrid[y_min:y_max:resolution,x_min:x_max:resolution] data = dir_func(xx,yy) #temporally disable the autoscale to avoid problem with the imshow autoscale = ax.get_autoscale_on() ax.set_autoscale_on(False) #create the image on the patch props = dict(extent=extent,origin='lower',cmap=colormap,alpha=alpha,aspect='auto') if c_min is not None: props.update(vmin=c_min) if c_max is not None: props.update(vmax=c_max) im = ax.imshow(data,**props) im.set_alpha(alpha) #remove the foreground from the patch and set the line properties patch.set_fc('none') patch.set_alpha(alpha) if edgecolor is not None: patch.set_edgecolor(edgecolor) if linestyle is not None: patch.set_linestyle(linestyle) if linewidth is not None: patch.set_linewidth(linewidth) #apply the clipping and restore the original autoscale setting im.set_clip_path(patch) ax.set_autoscale_on(autoscale) return im
operating_frequency = 2.477e9 wavelength = scipy.constants.c / operating_frequency randarray = sp.loadtxt("arrays/randarray.dat") / wavelength ourlinarray = sp.loadtxt("arrays/linarray.dat") / wavelength ourcircarray = sp.loadtxt("arrays/circarray.dat") / wavelength #Test Parameters ants = randarray nsamp = 2000 snr = -30 s1_aoa_deg = [80, 0] s2_aoa_deg = [120, 0] s1_aoa = (sp.deg2rad(s1_aoa_deg[0]), sp.deg2rad(s1_aoa_deg[1])) #s2_aoa = (pi/2 + sp.randn()/2, sp.randn()/2) s2_aoa = (sp.deg2rad(s2_aoa_deg[0]), sp.deg2rad(s2_aoa_deg[1])) s1 = util.makesamples(ants, s1_aoa[0], s1_aoa[1], nsamp) s2 = util.makesamples(ants, s2_aoa[0], s2_aoa[1], nsamp) samples = s2 + s1 samples = util.awgn(samples, snr) # add noise to s1 and s2 s1 = util.awgn(s1, snr) s2 = util.awgn(s2, snr) R = music.covar(samples) est = music.Estimator(ants, R, nsignals=2)
def purcell_bi(physics, phase, network, r_toroid, surface_tension='pore.surface_tension', contact_angle='pore.contact_angle', diameter='throat.diameter', h_max='pore.diameter', max_dist=True, **kwargs): r""" Computes the throat capillary entry pressure assuming the throat is a toroid. Pressure is a function of both pore and throat diameters so it becomes a bi-dirctional calculation. Parameters ---------- network : OpenPNM Network Object The Network on which to apply the calculation sigma : dict key (string) The dictionary key containing the surface tension values to be used. If a pore property is given, it is interpolated to a throat list. theta : dict key (string) The dictionary key containing the contact angle values to be used. If a pore property is given, it is interpolated to a throat list. throat_diameter : dict key (string) The dictionary key containing the throat diameter values to be used. r_toroid : float or array_like The radius of the toroid surrounding the pore Notes ----- This approach accounts for the converging-diverging nature of many throat types. Advancing the meniscus beyond the apex of the toroid requires an increase in capillary pressure beyond that for a cylindical tube of the same radius. The details of this equation are described by Mason and Morrow [1]_, and explored by Gostick [2]_ in the context of a pore network model. """ entity = diameter.split('.')[0] if surface_tension.split('.')[0] == 'pore' and entity == 'throat': sigma = phase[surface_tension] sigma = phase.interpolate_data(data=sigma) else: sigma = phase[surface_tension] if contact_angle.split('.')[0] == 'pore' and entity == 'throat': theta = phase[contact_angle] theta = phase.interpolate_data(data=theta) else: theta = phase[contact_angle] # Mason and Morrow have the definitions switched theta = 180 - theta th = _sp.deg2rad(theta) rt = network[diameter]/2 R = r_toroid a_max = th - np.arcsin((np.sin(th))/(1+rt/R)) if max_dist and entity == 'throat': # Perform analysis for entry into both pores r_max = np.zeros([network.Nt, 2]) for j in range(2): Pj = network['throat.conns'][:, j] dj = network[h_max][Pj] max_reached = np.zeros(network.Nt, dtype=bool) alpha_reached = np.zeros(network.Nt) # With increasing filling angle assess whether interface has passed # the critical distance and record the critical angle a_space = _sp.linspace(1e-3, _sp.pi, 181) nudge = 0.001 for a_test in a_space: nudgers = network.throats()[np.around(th-a_test, 0) == 90] if len(nudgers) > 0: th[nudgers] += nudge r = R*(1+(rt/R)-_sp.cos(a_test))/_sp.cos(th-a_test) # Vertical adjustment for centre of circle y_off = R*np.sin(a_test) # Angle between contact point - centre - vertical zeta = (th-a_test-(np.pi/2)) c = y_off - r*np.cos(zeta) y_max = c+r ts = network.throats()[(y_max > (dj)) * (~max_reached)] if len(ts) > 0: max_reached[ts] = True alpha_reached[ts] = a_test if len(nudgers) > 0: th[nudgers] -= nudge # Any interfaces that never reach a wall are ok" alpha_reached[~max_reached] = a_max[~max_reached] temp = max_reached[np.abs(a_max) > np.abs(alpha_reached)] temp_sum = np.around(100*np.sum(temp)/len(a_max), 2) logger.info("Percentage max before BT " + str(temp_sum)) # Any interfaces that can expand to maximum curvature before # hitting a pore wall are ok mask = np.abs(a_max) < np.abs(alpha_reached) alpha_reached[mask] = a_max[mask] r_max[:, j] = (R*(1 + (rt/R) - _sp.cos(alpha_reached)) / _sp.cos(th - alpha_reached)) value = (2*np.vstack((sigma, sigma)).T) / r_max else: r_max = R*(1+(rt/R)-_sp.cos(a_max))/_sp.cos(th-a_max) value = 2*sigma/r_max if entity == 'throat': value = value[phase.throats(physics.name)] else: value = value[phase.pores(physics.name)] return value
def toroidal(target, mode='max', r_toroid=5e-6, target_Pc=None, surface_tension='pore.surface_tension', contact_angle='pore.contact_angle', diameter='throat.diameter'): r""" Calculate the filling angle (alpha) for a given capillary pressure Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. mode : string (Default is 'max') Determines what information to send back. Options are: 'max' : the maximum capillary pressure along the throat axis 'men' : return the meniscus info for a target pressure r_toroid : float or array_like The radius of the toroid surrounding the pore target_Pc : float The target capillary pressure surface_tension : dict key (string) The dictionary key containing the surface tension values to be used. If a pore property is given, it is interpolated to a throat list. contact_angle : dict key (string) The dictionary key containing the contact angle values to be used. If a pore property is given, it is interpolated to a throat list. diameter : dict key (string) The dictionary key containing the throat diameter values to be used. Notes ----- This approach accounts for the converging-diverging nature of many throat types. Advancing the meniscus beyond the apex of the toroid requires an increase in capillary pressure beyond that for a cylindical tube of the same radius. The details of this equation are described by Mason and Morrow [1]_, and explored by Gostick [2]_ in the context of a pore network model. """ network = target.project.network phase = target.project.find_phase(target) element, sigma, theta = _get_key_props(phase=phase, diameter=diameter, surface_tension=surface_tension, contact_angle=contact_angle) # Mason and Morrow have the definitions switched theta = _sp.deg2rad(theta) rt = network[diameter]/2 Rf = r_toroid a, r, R, s, t, p = syp.symbols('a, r, R, s, t, p') rhs = -2*s/(R*(1+(r/R)-syp.cos(a))/syp.cos(t-a)) # lhs = p # roots = syp.solve(lhs-rhs, a) # _, r1, r2, _ = roots eit = syp.exp(syp.I*t) # There are 2 non-trivial solutions when rearranging the Purcell Pc eqn # To solve for alpha (a), r1 and r2 - r2 is outside a_min and a_max, # r1 is inside the range. It takes sympy to solve the equations so the # root inside the range is provided below but can be verified by running # the commented out lines above r1 = -syp.I*syp.log((R*p*eit + p*r*eit - syp.sqrt((2*R*p**2*r*eit + 2*R*p*s*syp.exp(2*syp.I*t) + 2*R*p*s + p**2*r**2*eit - 4*s**2*eit)*eit))/(R*p*eit - 2*s)) r2 = -syp.I*syp.log((R*p*eit + p*r*eit + syp.sqrt((2*R*p**2*r*eit + 2*R*p*s*syp.exp(2*syp.I*t) + 2*R*p*s + p**2*r**2*eit - 4*s**2*eit)*eit))/(R*p*eit - 2*s)) a_min = t - syp.asin((syp.sin(t))/(1+r/R)) a_max = t - syp.pi + syp.asin((syp.sin(t))/(1+r/R)) # alpha at given Pc fa_Pc = syp.lambdify((p, r, R, s, t), r1, 'numpy') # Pc at given alpha fPc = syp.lambdify((a, r, R, s, t), rhs, 'numpy') # alphas where max and min Pc occurs fa_max = syp.lambdify((r, R, t), a_max, 'numpy') fa_min = syp.lambdify((r, R, t), a_min, 'numpy') # Values at min and max a_maxs = fa_max(rt, Rf, theta) pc_max = fPc(a_maxs, rt, Rf, sigma, theta) a_mins = fa_min(rt, Rf, theta) pc_min = fPc(a_mins, rt, Rf, sigma, theta) if mode == 'max': return pc_max elif target_Pc is None: logger.exception(msg='Please supply a target capillary pressure' + ' when mode is not max') if np.abs(target_Pc) < 1.0: target_Pc = 1.0 # Masks to determine which throats to actually calculate alpha for # Outside the valid range of pressures min or max values are used over_range = target_Pc > pc_max undr_range = target_Pc < pc_min in_range = ~over_range * ~undr_range alpha = np.zeros(len(rt)) if np.any(in_range): alpha[in_range] = np.real(fa_Pc(target_Pc, rt[in_range], Rf, sigma[in_range], theta[in_range])) if np.any(over_range): alpha[over_range] = a_maxs[over_range] if np.any(undr_range): alpha[undr_range] = a_mins[undr_range] logger.info('Filling angles calculated for Pc: '+str(target_Pc)) men_data = {} f = theta-alpha # Handle potential divide by zero f[np.abs(f) == np.pi/2] = f[np.abs(f) == np.pi/2]*(1-1e-12) # Meniscus radius r_men = Rf*(1 + rt/Rf - np.cos(alpha)) / np.cos((f)) # Vertical adjustment for centre of circle y_off = Rf*np.sin(alpha) # Angle between contact point - centre - vertical zeta = (theta-alpha-np.pi/2) # Distance that center of meniscus is below the plane of the throat center = y_off - r_men*np.cos(zeta) men_data['alpha'] = alpha men_data['alpha_max'] = a_maxs men_data['alpha_min'] = a_mins men_data['radius'] = r_men men_data['center'] = center men_data['zeta'] = zeta return men_data
def track(self): print "Press right mouse button to pause or play" print "Use left mouse button to select target" print "Target color must be different from background" print "Target must have width larger than height" print "Target can be upside down" #Parameters isUDPConnection = False # Currently switched manually in the code display = True displayDebug = True useBasemap = False maxRelativeMotionPerFrame = 2 # How much the target can moved between two succesive frames pixelPerRadians = 320 radius = pixelPerRadians referenceImage = '../ObjectTracking/kite_detail.jpg' scaleFactor = 0.5 isVirtualCamera = True useHDF5 = False # Open reference image: this is used at initlalisation target_detail = Image(referenceImage) # Get RGB color palette of target (was found to work better than using hue) pal = target_detail.getPalette(bins = 2, hue = False) # Open video to analyse or live stream #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480 if isVirtualCamera: #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video') #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video') #cam = VirtualCamera('output.avi', 'video') cam = VirtualCamera('../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.flv','video') virtualCameraFPS = 25 else: cam = JpegStreamCamera('http://192.168.43.1:8080/videofeed')#640 * 480 #cam = Camera() # Get a sample image to initialize the display at the same size img = cam.getImage().scale(scaleFactor) print img.width, img.height # Create a pygame display if display: if img.width>img.height: disp = Display((27*640/10,25*400/10))#(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor))) else: disp = Display((810,1080)) #js = JpegStreamer() # Initialize variables previous_angle = 0 # target has to be upright when starting. Target width has to be larger than target heigth. previous_coord_px = (0, 0) # Initialized to top left corner, which always exists previous_dCoord = previous_coord_px previous_dAngle = previous_angle angles = [] coords_px = [] coord_px = [0, 0] angle = 0 target_elevations = [] target_bearings = [] times = [] wasTargetFoundInPreviousFrame = False i_frame = 0 isPaused = False selectionInProgress = False th = [100, 100, 100] skycolor = Color.BLUE timeLastTarget = 0 # Prepare recording recordFilename = datetime.datetime.utcnow().strftime("%Y%m%d_%Hh%M_")+ 'simpleTrack' if useHDF5: try: os.remove(recordFilename + '.hdf5') except: print('Creating file ' + recordFilename + '.hdf5') """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error) #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file major: File accessability minor: Unable to open file""" h5py._errors.silence_errors() recordFile = h5py.File(recordFilename + '.hdf5', 'a') hdfSize = 0 dset = recordFile.create_dataset('kite', (2,2), maxshape=(None,7)) imset = recordFile.create_dataset('image', (2,img.width,img.height,3 ), maxshape=(None, img.width, img.height, 3)) else: try: os.remove(recordFilename + '.csv') except: print('Creating file ' + recordFilename + '.csv') recordFile = file(recordFilename + '.csv', 'a') csv_writer = csv.writer(recordFile) csv_writer.writerow(['Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)', 'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)']) # Launch a thread to get UDP message with orientation of the camera mobile = mobileState.mobileState() if isUDPConnection: a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,)) a.start() # Loop while not canceled by user t0 = time.time() previousTime = t0 while not(display) or disp.isNotDone(): t = time.time() deltaT = (t-previousTime) FPS = 1.0/deltaT #print 'FPS =', FPS if isVirtualCamera: deltaT = 1.0/virtualCameraFPS previousTime = t i_frame = i_frame + 1 timestamp = datetime.datetime.utcnow() # Receive orientation of the camera if isUDPConnection: mobile.computeRPY([2, 0, 1], [-1, 1, 1]) ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \ [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix if useBasemap: # Warning this really slows down the computation m = Basemap(width=img.width, height=img.height, projection='aeqd', lat_0=sp.rad2deg(mobile.pitch), lon_0=sp.rad2deg(mobile.yaw), rsphere = radius) # Get an image from camera if not isPaused: img = cam.getImage() img = img.resize(int(scaleFactor*img.width), int(scaleFactor*img.height)) if display: # Pause image when right button is pressed dwn = disp.rightButtonDownPosition() if dwn is not None: isPaused = not(isPaused) dwn = None if display: # Create a layer to enable user to make a selection of the target selectionLayer = DrawingLayer((img.width, img.height)) if img: if display: # Create a new layer to host information retrieved from video layer = DrawingLayer((img.width, img.height)) # Selection is a rectangle drawn while holding mouse left button down if disp.leftButtonDown: corner1 = (disp.mouseX, disp.mouseY) selectionInProgress = True if selectionInProgress: corner2 = (disp.mouseX, disp.mouseY) bb = disp.pointsToBoundingBox(corner1, corner2)# Display the temporary selection if disp.leftButtonUp: # User has finished is selection selectionInProgress = False selection = img.crop(bb[0], bb[1], bb[2], bb[3]) if selection != None: # The 3 main colors in the area selected are considered. # Note that the selection should be included in the target and not contain background try: selection.save('../ObjectTracking/'+ 'kite_detail_tmp.jpg') img0 = Image("kite_detail_tmp.jpg") # For unknown reason I have to reload the image... pal = img0.getPalette(bins = 2, hue = False) except: # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite pal = pal wasTargetFoundInPreviousFrame = False previous_coord_px = (bb[0] + bb[2]/2, bb[1] + bb[3]/2) if corner1 != corner2: selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), width = 5, color = Color.YELLOW) # If the target was already found, we can save computation time by # reducing the Region Of Interest around predicted position if wasTargetFoundInPreviousFrame: ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \ max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2)) ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1], \ maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \ centered = False) if display : # Draw the rectangle corresponding to the ROI on the complete image layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width, \ previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \ (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \ color = Color.GREEN, width = 2) else: # Search on the whole image if no clue of where is the target ROITopLeftCorner = (0, 0) ROI = img '''#Option 1 target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150) target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150) target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150) target_raw_img = target_part0+target_part1+target_part2 target_img = target_raw_img.erode(5).dilate(5) #Option 2 target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)''' # Find sky color sky = (img-img.binarize()).findBlobs(minsize=10000) if sky: skycolor = sky[0].meanColor() # Option 3 target_img = ROI-ROI # Black image # Loop through palette of target colors if display and displayDebug: decomposition = [] i_col = 0 for col in pal: c = tuple([int(col[i]) for i in range(0,3)]) # Search the target based on color ROI.save('../ObjectTracking/'+ 'ROI_tmp.jpg') img1 = Image('../ObjectTracking/'+ 'ROI_tmp.jpg') filter_img = img1.colorDistance(color = c) h = filter_img.histogram(numbins=256) cs = np.cumsum(h) thmax = np.argmin(abs(cs- 0.02*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color thmin = np.argmin(abs(cs- 0.005*img.width*img.height)) # find the threshold to have 10% of the pixel in the expected color if thmin==thmax: newth = thmin else: newth = np.argmin(h[thmin:thmax]) + thmin alpha = 0.5 th[i_col] = alpha*th[i_col]+(1-alpha)*newth filter_img = filter_img.threshold(max(40,min(200,th[i_col]))).invert() target_img = target_img + filter_img #print th i_col = i_col + 1 if display and displayDebug: [R, G, B] = filter_img.splitChannels() white = (R-R).invert() r = R*1.0/255*c[0] g = G*1.0/255*c[1] b = B*1.0/255*c[2] tmp = white.mergeChannels(r, g, b) decomposition.append(tmp) # Get a black background with with white target foreground target_img = target_img.threshold(150) target_img = target_img - ROI.colorDistance(color = skycolor).threshold(80).invert() if display and displayDebug: small_ini = target_img.resize(int(img.width/(len(pal)+1)), int(img.height/(len(pal)+1))) for tmp in decomposition: small_ini = small_ini.sideBySide(tmp.resize(int(img.width/(len(pal)+1)), int(img.height/(len(pal)+1))), side = 'bottom') small_ini = small_ini.adaptiveScale((int(img.width), int(img.height))) toDisplay = img.sideBySide(small_ini) else: toDisplay = img #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert() # Search for binary large objects representing potential target target = target_img.findBlobs(minsize = 500) if target: # If a target was found if wasTargetFoundInPreviousFrame: predictedTargetPosition = (width*maxRelativeMotionPerFrame/2, height*maxRelativeMotionPerFrame/2) # Target will most likely be close to the center of the ROI else: predictedTargetPosition = previous_coord_px # If there are several targets in the image, take the one which is the closest of the predicted position target = target.sortDistance(predictedTargetPosition) # Get target coordinates according to minimal bounding rectangle or centroid. coordMinRect = ROITopLeftCorner + np.array((target[0].minRectX(), target[0].minRectY())) coord_px = ROITopLeftCorner + np.array(target[0].centroid()) # Rotate the coordinates of roll angle around the middle of the screen rot_coord_px = np.dot(ctm, coord_px - np.array([img.width/2, img.height/2])) + np.array([img.width/2, img.height/2]) if useBasemap: coord = sp.deg2rad(m(rot_coord_px[0], img.height-rot_coord_px[1], inverse = True)) else: coord = localProjection(rot_coord_px[0]-img.width/2, img.height/2-rot_coord_px[1], radius, mobile.yaw, mobile.pitch, inverse = True) target_bearing, target_elevation = coord # Get minimum bounding rectangle for display purpose minR = ROITopLeftCorner + np.array(target[0].minRect()) contours = target[0].contour() contours = [ ROITopLeftCorner + np.array(contour) for contour in contours] # Get target features angle = sp.deg2rad(target[0].angle()) + mobile.roll angle = sp.deg2rad(unwrap180(sp.rad2deg(angle), sp.rad2deg(previous_angle))) width = target[0].width() height = target[0].height() # Check if the kite is upside down # First rotate the kite ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \ [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix rotated_contours = [np.dot(ctm2, contour-coordMinRect) for contour in contours] y = [-tmp[1] for tmp in rotated_contours] itop = np.argmax(y) # Then looks at the points at the top ibottom = np.argmin(y) # and the point at the bottom # The point the most excentered is at the bottom if abs(rotated_contours[itop][0])>abs(rotated_contours[ibottom][0]): isInverted = True else: isInverted = False if isInverted: angle = angle + sp.pi # Filter the data alpha = 1-sp.exp(-deltaT/self.filterTimeConstant) if not(isPaused): dCoord = np.array(previous_dCoord)*(1-alpha) + alpha*(np.array(coord_px) - previous_coord_px) # related to the speed only if cam is fixed dAngle = np.array(previous_dAngle)*(1-alpha) + alpha*(np.array(angle) - previous_angle) else : dCoord = np.array([0, 0]) dAngle = np.array([0]) #print coord_px, angle, width, height, dCoord # Record important data times.append(timestamp) coords_px.append(coord_px) angles.append(angle) target_elevations.append(target_elevation) target_bearings.append(target_bearing) # Export data to controller self.elevation = target_elevation self.bearing = target_bearing self.orientation = angle dt = time.time()-timeLastTarget self.ROT = dAngle/dt self.lastUpdateTime = t # Save for initialisation of next step previous_dCoord = dCoord previous_angle = angle previous_coord_px = (int(coord_px[0]), int(coord_px[1])) wasTargetFoundInPreviousFrame = True timeLastTarget = time.time() else: wasTargetFoundInPreviousFrame = False if useHDF5: hdfSize = hdfSize+1 dset.resize((hdfSize, 7)) imset.resize((hdfSize, img.width, img.height, 3)) dset[hdfSize-1,:] = [time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT] imset[hdfSize-1,:,:,:] = img.getNumpy() recordFile.flush() else: csv_writer.writerow([time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT]) if display : if target: # Add target features to layer # Minimal rectange and its center in RED layer.polygon(minR[(0, 1, 3, 2), :], color = Color.RED, width = 5) layer.circle((int(coordMinRect[0]), int(coordMinRect[1])), 10, filled = True, color = Color.RED) # Target contour and centroid in BLUE layer.circle((int(coord_px[0]), int(coord_px[1])), 10, filled = True, color = Color.BLUE) layer.polygon(contours, color = Color.BLUE, width = 5) # Speed vector in BLACK layer.line((int(coord_px[0]), int(coord_px[1])), (int(coord_px[0]+20*dCoord[0]), int(coord_px[1]+20*dCoord[1])), width = 3) # Line giving angle layer.line((int(coord_px[0]+200*sp.cos(angle)), int(coord_px[1]+200*sp.sin(angle))), (int(coord_px[0]-200*sp.cos(angle)), int(coord_px[1]-200*sp.sin(angle))), color = Color.RED) # Line giving rate of turn #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10)))) # Add the layer to the raw image toDisplay.addDrawingLayer(layer) toDisplay.addDrawingLayer(selectionLayer) # Add time metadata toDisplay.drawText(str(i_frame)+" "+ str(timestamp), x=0, y=0, fontsize=20) # Add Line giving horizon #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED) # Plot parallels for lat in range(-90, 90, 15): r = range(0, 361, 10) if useBasemap: # \todo improve for high roll l = m (r, [lat]*len(r)) pix = [np.array(l[0]), img.height-np.array(l[1])] else: l = localProjection(sp.deg2rad(r), \ sp.deg2rad([lat]*len(r)), \ radius, \ lon_0 = mobile.yaw, \ lat_0 = mobile.pitch, \ inverse = False) l = np.dot(ctm, l) pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])] for i in range(len(r)-1): if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img): layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2) # Plot meridians for lon in range(0, 360, 15): r = range(-90, 91, 10) if useBasemap: # \todo improve for high roll l = m ([lon]*len(r), r) pix = [np.array(l[0]), img.height-np.array(l[1])] else: l= localProjection(sp.deg2rad([lon]*len(r)), \ sp.deg2rad(r), \ radius, \ lon_0 = mobile.yaw, \ lat_0 = mobile.pitch, \ inverse = False) l = np.dot(ctm, l) pix = [np.array(l[0])+img.width/2, img.height/2-np.array(l[1])] for i in range(len(r)-1): if isPixelInImage((pix[0][i],pix[1][i]), img) or isPixelInImage((pix[0][i+1],pix[1][i+1]), img): layer.line((pix[0][i],pix[1][i]), (pix[0][i+1], pix[1][i+1]), color=Color.WHITE, width = 2) # Text giving bearing # \todo improve for high roll for bearing_deg in range(0, 360, 30): l = localProjection(sp.deg2rad(bearing_deg), sp.deg2rad(0), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False) l = np.dot(ctm, l) layer.text(str(bearing_deg), ( img.width/2+int(l[0]), img.height-20), color = Color.RED) # Text giving elevation # \todo improve for high roll for elevation_deg in range(-60, 91, 30): l = localProjection(0, sp.deg2rad(elevation_deg), radius, lon_0 = mobile.yaw, lat_0 = mobile.pitch, inverse = False) l = np.dot(ctm, l) layer.text(str(elevation_deg), ( img.width/2 ,img.height/2-int(l[1])), color = Color.RED) #toDisplay.save(js) toDisplay.save(disp) if display : toDisplay.removeDrawingLayer(1) toDisplay.removeDrawingLayer(0) recordFile.close()
def cal_qz(self, tth, th, energy): return 2.0 * sp.pi / self.cal_wave(energy) * ( sp.sin(sp.deg2rad((tth - th))) + sp.sin(sp.deg2rad(th)))
def llz2xyz(lat, lon, depth): """ convert latitude, longitude, and altitude to earth-centered, earth-fixed (ECEF) cartesian the output coordinates will be normalized to the radius of interest for the displayed sphere as defined by the rad parameter above code is based on: http://www.mathworks.com/matlabcentral/fileexchange/7942-covert-lat--lon--alt-to-ecef-cartesian/content/lla2ecef.m latitude, longitude, altitude to ECEF ("Earth-Centered, Earth-Fixed") http://www.gmat.unsw.edu.au/snap/gps/clynch_pdfs/coordcvt.pdf calculations based on: http://rbrundritt.wordpress.com/2008/10/14/conversion-between-spherical-and-cartesian-coordinates-systems/ http://stackoverflow.com/questions/10473852/convert-latitude-and-longitude-to-point-in-3d-space http://www.oosa.unvienna.org/pdf/icg/2012/template/WGS_84.pdf computation verified using: http://www.sysense.com/products/ecef_lla_converter/index.html Parameters ---------- lat: float latitude (deg) lon: float longitude (deg) depth: float depth (km) Returns ------- x: float x-coordinate normalized to the radius of Earh y: float y-coordinate normalized to the radius of Earh z: float z-coordinate normalized to the radius of Earh """ import numpy as np from scipy import deg2rad, rad2deg alt = -1.0 * 1000.0 * depth # height above WGS84 ellipsoid (m) lat = deg2rad(lat) cosLat = np.cos(lat) sinLat = np.sin(lat) # # World Geodetic System 1984 # WGS 84 # erad = np.float64( 6378137.0) # Radius of the Earth in meters (equatorial radius, WGS84) rad = 1 # sphere radius e = np.float64(8.1819190842622e-2) n = erad / np.sqrt( 1.0 - e * e * sinLat * sinLat) # prime vertical radius of curvature lon = deg2rad(lon) cosLon = np.cos(lon) sinLon = np.sin(lon) x = (n + alt) * cosLat * cosLon # meters y = (n + alt) * cosLat * sinLon # meters z = ((1 - e * e) * n + alt) * sinLat # meters x = x * rad / erad # normalize to radius of rad y = y * rad / erad # normalize to radius of rad z = z * rad / erad # normalize to radius of rad return x, y, z
def find_pattern_rotated(PF, pattern, image, rescale = 1.0, rotate=(-60,61,120), roi_center=None, roi_size=(41,41), plot=False): #Get current time to determine runtime of search start_time = time.time() #Initialize values needed later on result = [] vmax = 0.0 vmin = sp.Inf #Set region of interest if roi_center is None: roi_center = sp.array(im.shape[:2])/2.0 - 0.5 roi = center_roi_around(roi_center*rescale, roi_size) #Give user some feedback on what is happening print("Rescaling image and target by scale={rescale}.\n" " image {0}x{1} px to {2:.2f}x{3:.02f} px." .format(image.shape[0], image.shape[1], image.shape[0]*rescale, image.shape[1]*rescale, rescale=rescale), flush=True) print("ROI: center={0}, {1}, in unscaled image.\n" " height={2}, width={3} in scaled image" .format(roi_center[0], roi_center[1], roi_size[0], roi_size[1])) if rotate[2]>1: print("Now correlating rotations from {0}º to {1}º in {2} steps:" .format(*rotate)) else: print("Rotation is kept constant at {0}°".format(rotate[0])) # Create rescaled copies of image and pattern, determine center coordinates of both pattern_scaled = transform.rescale(pattern, rescale) image_scaled = transform.rescale(image, rescale) PF.set_image(image_scaled) cols_scaled, rows_scaled = pattern_scaled.shape[:2] pattern_scaled_center = sp.array((rows_scaled, cols_scaled))/2. - 0.5 cols, rows = pattern.shape[:2] pattern_center = sp.array((rows, cols))/2. - 0.5 # Launch PatternFinder for all rotations defined in function input rotations = sp.linspace(*rotate) for r in rotations: # Calculate transformation matrix for rotation around center of scaled pattern rotation_matrix = rotation_transform_center(pattern_scaled,r,center_xy=pattern_scaled_center) # Launch Patternfinder out, min_coords, value = PF.find(transform.warp(pattern_scaled,rotation_matrix), image=None, roi=roi) # Collect Min and Max values for plotting later on outmax = out.max() outmin = out.min() if outmax > vmax: vmax = outmax if outmin < vmin: vmin = outmin # undo the rescale for the coordinates min_coords = min_coords.astype(sp.float64) / rescale # create a list of results for all rotations result.append([r, min_coords, value, out]) # Progress bar... kind of :) print(".",end="", flush=True) print("") print("took {0} seconds.".format(time.time()-start_time)) #Select the best result from the result list and extract its parameters best_param_set = result[sp.argmin([r[2] for r in result])] best_angle = best_param_set[0] # The rotation angle is the 0-th element in result best_coord = best_param_set[1] # The coordinates are in the 2-nd element best_value = best_param_set[2] # The actual value is the 3-rd element # Calculate transformation to transform image onto pattern move_to_center = transform.AffineTransform(translation=-(best_coord)[::-1]) move_back = transform.AffineTransform(translation=(best_coord[::-1])) rotation = transform.AffineTransform(rotation=-sp.deg2rad(best_angle)) translation = transform.AffineTransform(translation=sp.asmatrix((best_coord-pattern_center)[::-1])) T = translation + move_to_center + rotation + move_back #Create a plot showing error over angle if plot and rotate[2] > 1: fig, ax = plt.subplots(1) ax.plot([a[0] for a in result], [a[2] for a in result]) ax.set_xlabel('Angle (rotation)') ax.set_ylabel('difference image-target') plt.show() #Create heat plot of where target is in image if plot == 'all': n_rows = int(sp.sqrt(len(result))) n_cols = int(sp.ceil(len(result)/n_rows)) fig, ax = plt.subplots(n_rows, n_cols, squeeze=False, figsize = (2 * n_cols, 2 * n_rows)) fig.tight_layout(rect=[0, 0.03, 1, 0.97]) fig.suptitle("Correlation map of where target is in image\n", size=16) n = 0 for i in range(n_rows): for j in range(n_cols): ax[i,j].axis("off") if n < len(result): ax[i,j].imshow(result[n][3], interpolation="nearest", cmap='cubehelix', vmin=vmin, vmax=vmax) ax[i,j].annotate('Angle:{0:.2f}; Value:{1:.2f}' .format(result[n][0],result[n][2]),[0,0]) n += 1 plt.show() return T, best_value
def mason_model(network, phase, physics, f=0.6667, **kwargs): Dt = network['throat.diameter'] theta = phase['throat.contact_angle'] sigma = phase['throat.surface_tension'] Pc = 4*sigma*sp.cos(f*sp.deg2rad(theta))/Dt return Pc[network.throats(physics.name)]
def decodeMessageSensorUDP(self, msg): """ This is used to decode message from sensorUDP application from the android market. The orientation field was first used, but its conventions were unclear. So now acceleration and magnetic vectors should be used""" data = msg.split(', ') if data[0] == 'G': # This is GPS message time = decimalstr2float(data[2]) latitude_deg = decimalstr2float(data[3]) longitude_deg = decimalstr2float(data[4]) altitude = decimalstr2float(data[5]) hdop = decimalstr2float( data[7]) # Horizontal dilution of precision vdop = decimalstr2float(data[8]) # Vertical dilution of precision print time, latitude_deg, longitude_deg, altitude, hdop, vdop if data[0] == 'O': # \note This is no more used as orientation convention were unclear # 'O, 146, 1366575961732, 230,1182404, -075,2031250, 001,7968750' [ u, u, # data not used \ heading_deg, # pointing direction of top of phone \ roll_deg, # around horizontal axis, positive clockwise [-180:180] \ pitch_deg ] = decimalstr2float(data[1:]) # around vertical axis [_90:90] elevation_deg = -sp.rad2deg(sp.arctan2( \ sp.cos(sp.deg2rad(pitch_deg))*sp.cos(sp.deg2rad(roll_deg)), \ sp.sqrt(1+sp.cos(sp.deg2rad(roll_deg))**2*(sp.sin(sp.deg2rad(pitch_deg))**2-1)))) #positive up inclinaison_deg = pitch_deg #positive clockwise print heading_deg, roll_deg, pitch_deg, elevation_deg, inclinaison_deg if data[0] == 'A': # Accelerometer data # Index and sign are adjusted to obtain x through the screen, and z down deltaT = decimalstr2float(data[2]) / 1000 - self.time_acceleration if self.filterTimeConstant == 0.0: alpha = 1 else: alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant) self.time_acceleration = decimalstr2float(data[2]) / 1000 self.acceleration_raw[0] = decimalstr2float(data[3]) self.acceleration_raw[1] = decimalstr2float(data[4]) self.acceleration_raw[2] = decimalstr2float(data[5]) # Filter the data self.acceleration_filtered += alpha * ( sp.array(self.acceleration_raw) - self.acceleration_filtered) if data[0] == 'M': # Magnetometer data # Index and sign are adjusted to obtain x through the screen, and z down deltaT = decimalstr2float(data[2]) / 1000 - self.time_magnetic if self.filterTimeConstant == 0.0: alpha = 1 else: alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant) self.time_magnetic = decimalstr2float(data[2]) / 1000 self.magnetic_raw[0] = decimalstr2float(data[3]) self.magnetic_raw[1] = decimalstr2float(data[4]) self.magnetic_raw[2] = -decimalstr2float( data[5]) # Adapt to a bug in sensorUDP? # Filter the data self.magnetic_filtered += alpha * (sp.array(self.magnetic_raw) - self.magnetic_filtered)
def purcell_filling_angle(physics, phase, network, r_toroid, surface_tension='pore.surface_tension', contact_angle='pore.contact_angle', diameter='throat.diameter', Pc=1e3, **kwargs): r""" Calculate the filling angle (alpha) for a given capillary pressure Parameters ---------- network : OpenPNM Network Object The Network on which to apply the calculation sigma : dict key (string) The dictionary key containing the surface tension values to be used. If a pore property is given, it is interpolated to a throat list. theta : dict key (string) The dictionary key containing the contact angle values to be used. If a pore property is given, it is interpolated to a throat list. throat_diameter : dict key (string) The dictionary key containing the throat diameter values to be used. r_toroid : float or array_like The radius of the toroid surrounding the pore Notes ----- This approach accounts for the converging-diverging nature of many throat types. Advancing the meniscus beyond the apex of the toroid requires an increase in capillary pressure beyond that for a cylindical tube of the same radius. The details of this equation are described by Mason and Morrow [1]_, and explored by Gostick [2]_ in the context of a pore network model. !!! Takes mean contact angle and surface tension !!! """ from scipy import ndimage entity = diameter.split('.')[0] if surface_tension.split('.')[0] == 'pore' and entity == 'throat': sigma = phase[surface_tension] sigma = phase.interpolate_data(data=sigma) else: sigma = phase[surface_tension] if contact_angle.split('.')[0] == 'pore' and entity == 'throat': theta = phase[contact_angle] theta = phase.interpolate_data(data=theta) else: theta = phase[contact_angle] # Mason and Morrow have the definitions switched theta = 180 - theta theta = _sp.deg2rad(theta) rt = network[diameter]/2 R = r_toroid ratios = rt/R a_max = theta - np.arcsin((np.sin(theta))/(1 + ratios)) def purcell_pressure(ratio, fill_angle, theta, sigma, R): # Helper function a_max = theta - np.arcsin((np.sin(theta))/(1+ratio)) fill_angle[fill_angle > a_max] = a_max r_men = R*(1+(ratio)-_sp.cos(fill_angle))/_sp.cos(theta-fill_angle) Pc = 2*sigma/r_men return Pc fill_angle = _sp.deg2rad(np.linspace(-30, 150, 1001)) alpha = np.zeros_like(ratios) for T, ratio in enumerate(ratios): mask = np.zeros_like(fill_angle, dtype=bool) nudge = 100 all_Pc = purcell_pressure(ratio, fill_angle, theta[T], sigma[T], R) if Pc > all_Pc.max(): # Target Pc out of range lowest = fill_angle[np.argwhere(all_Pc == all_Pc.max())[0][0]] else: while np.sum(mask) == 0: plus_mask = all_Pc < Pc + nudge minus_mask = all_Pc > Pc - nudge mask = np.logical_and(plus_mask, minus_mask) if np.sum(mask) == 0: nudge += 10 regions = ndimage.find_objects(ndimage.label(mask)[0]) rx = [np.mean(fill_angle[regions[r]]) for r in range(len(regions))] root_x = np.asarray(rx) lowest = np.min(root_x) alpha[T] = lowest logger.info('Filling angles calculated for Pc: '+str(Pc)) physics['throat.alpha_max'] = a_max return _sp.rad2deg(alpha)
import scipy as sp import matplotlib from mpl_toolkits.mplot3d import axes3d from scipy import interpolate import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter from matplotlib import cm from scipy import linalg import SU2_interp from shutil import copyfile import os import sys #Z = lambda x: sp.cos(x[0]-0.02) + sp.cos(x[1]+0.01) Z = lambda x: (x[0]) + (x[1]) InternalAngle = sp.deg2rad(80); quad_x = sp.array([10.0, 11.0, 11+sp.cos(InternalAngle), 10+sp.cos(InternalAngle) ]) quad_y = sp.array([10.0, 10.0, 10+sp.sin(InternalAngle), 10+sp.sin(InternalAngle) ]) quad_z = Z([quad_x, quad_y]) x_samples = sp.zeros((100,100)) y_samples = sp.zeros((100,100)) for i in range(100): y_samples[:,i]= sp.linspace(10,10+sp.sin(InternalAngle),100) x_lim = 10+(y_samples[i,0]-10)/sp.tan(InternalAngle) x_samples[i,:] = sp.linspace(x_lim,1+x_lim,100) #The correct values z_samples = Z([x_samples, y_samples]) #The SciPy values
def track(self): print "Press right mouse button to pause or play" print "Use left mouse button to select target" print "Target color must be different from background" print "Target must have width larger than height" print "Target can be upside down" #Parameters isUDPConnection = False # Currently switched manually in the code display = True displayDebug = True useBasemap = False maxRelativeMotionPerFrame = 2 # How much the target can moved between two succesive frames pixelPerRadians = 320 radius = pixelPerRadians referenceImage = '../ObjectTracking/kite_detail.jpg' scaleFactor = 0.5 isVirtualCamera = True useHDF5 = False # Open reference image: this is used at initlalisation target_detail = Image(referenceImage) # Get RGB color palette of target (was found to work better than using hue) pal = target_detail.getPalette(bins=2, hue=False) # Open video to analyse or live stream #cam = JpegStreamCamera('http://192.168.1.29:8080/videofeed')#640 * 480 if isVirtualCamera: #cam = VirtualCamera('../../zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi','video') #cam = VirtualCamera('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking/00095.MTS', 'video') #cam = VirtualCamera('output.avi', 'video') cam = VirtualCamera( '../Recording/Videos/Flying kite images (for kite steering unit development)-YTMgX1bvrTo.mp4', 'video') virtualCameraFPS = 25 else: cam = JpegStreamCamera( 'http://192.168.43.1:8080/videofeed') #640 * 480 #cam = Camera() # Get a sample image to initialize the display at the same size img = cam.getImage().scale(scaleFactor) print img.width, img.height # Create a pygame display if display: if img.width > img.height: disp = Display( (27 * 640 / 10, 25 * 400 / 10) ) #(int(2*img.width/scaleFactor), int(2*img.height/scaleFactor))) else: disp = Display((810, 1080)) #js = JpegStreamer() # Initialize variables previous_angle = 0 # target has to be upright when starting. Target width has to be larger than target heigth. previous_coord_px = ( 0, 0) # Initialized to top left corner, which always exists previous_dCoord = previous_coord_px previous_dAngle = previous_angle angles = [] coords_px = [] coord_px = [0, 0] angle = 0 target_elevations = [] target_bearings = [] times = [] wasTargetFoundInPreviousFrame = False i_frame = 0 isPaused = False selectionInProgress = False th = [100, 100, 100] skycolor = Color.BLUE timeLastTarget = 0 # Prepare recording recordFilename = datetime.datetime.utcnow().strftime( "%Y%m%d_%Hh%M_") + 'simpleTrack' if useHDF5: try: os.remove(recordFilename + '.hdf5') except: print('Creating file ' + recordFilename + '.hdf5') """ The following line is used to silence the following error (according to http://stackoverflow.com/questions/15117128/h5py-in-memory-file-and-multiprocessing-error) #000: ../../../src/H5F.c line 1526 in H5Fopen(): unable to open file major: File accessability minor: Unable to open file""" h5py._errors.silence_errors() recordFile = h5py.File( os.path.join(os.getcwd(), 'log', recordFilename + '.hdf5'), 'a') hdfSize = 0 dset = recordFile.create_dataset('kite', (2, 2), maxshape=(None, 7)) imset = recordFile.create_dataset('image', (2, img.width, img.height, 3), maxshape=(None, img.width, img.height, 3)) else: try: os.remove(recordFilename + '.csv') except: print('Creating file ' + recordFilename + '.csv') recordFile = file( os.path.join(os.getcwd(), 'log', recordFilename + '.csv'), 'a') csv_writer = csv.writer(recordFile) csv_writer.writerow([ 'Time (s)', 'x (px)', 'y (px)', 'Orientation (rad)', 'Elevation (rad)', 'Bearing (rad)', 'ROT (rad/s)' ]) # Launch a thread to get UDP message with orientation of the camera mobile = mobileState.mobileState() if isUDPConnection: mobile.open() # Loop while not canceled by user t0 = time.time() previousTime = t0 while not (display) or disp.isNotDone(): t = time.time() deltaT = (t - previousTime) FPS = 1.0 / deltaT #print 'FPS =', FPS if isVirtualCamera: deltaT = 1.0 / virtualCameraFPS previousTime = t i_frame = i_frame + 1 timestamp = datetime.datetime.utcnow() # Receive orientation of the camera if isUDPConnection: mobile.computeRPY([2, 0, 1], [-1, 1, 1]) ctm = np.array([[sp.cos(mobile.roll), -sp.sin(mobile.roll)], \ [sp.sin(mobile.roll), sp.cos(mobile.roll)]]) # Coordinate transform matrix if useBasemap: # Warning this really slows down the computation m = Basemap(width=img.width, height=img.height, projection='aeqd', lat_0=sp.rad2deg(mobile.pitch), lon_0=sp.rad2deg(mobile.yaw), rsphere=radius) # Get an image from camera if not isPaused: img = cam.getImage() img = img.resize(int(scaleFactor * img.width), int(scaleFactor * img.height)) if display: # Pause image when right button is pressed dwn = disp.rightButtonDownPosition() if dwn is not None: isPaused = not (isPaused) dwn = None if display: # Create a layer to enable user to make a selection of the target selectionLayer = DrawingLayer((img.width, img.height)) if img: if display: # Create a new layer to host information retrieved from video layer = DrawingLayer((img.width, img.height)) # Selection is a rectangle drawn while holding mouse left button down if disp.leftButtonDown: corner1 = (disp.mouseX, disp.mouseY) selectionInProgress = True if selectionInProgress: corner2 = (disp.mouseX, disp.mouseY) bb = disp.pointsToBoundingBox( corner1, corner2) # Display the temporary selection if disp.leftButtonUp: # User has finished is selection selectionInProgress = False selection = img.crop(bb[0], bb[1], bb[2], bb[3]) if selection != None: # The 3 main colors in the area selected are considered. # Note that the selection should be included in the target and not contain background try: selection.save('../ObjectTracking/' + 'kite_detail_tmp.jpg') img0 = Image( "kite_detail_tmp.jpg" ) # For unknown reason I have to reload the image... pal = img0.getPalette(bins=2, hue=False) except: # getPalette is sometimes bugging and raising LinalgError because matrix not positive definite pal = pal wasTargetFoundInPreviousFrame = False previous_coord_px = (bb[0] + bb[2] / 2, bb[1] + bb[3] / 2) if corner1 != corner2: selectionLayer.rectangle((bb[0], bb[1]), (bb[2], bb[3]), width=5, color=Color.YELLOW) # If the target was already found, we can save computation time by # reducing the Region Of Interest around predicted position if wasTargetFoundInPreviousFrame: ROITopLeftCorner = (max(0, previous_coord_px[0]-maxRelativeMotionPerFrame/2*width), \ max(0, previous_coord_px[1] -height*maxRelativeMotionPerFrame/2)) ROI = img.crop(ROITopLeftCorner[0], ROITopLeftCorner[1], \ maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height, \ centered = False) if display: # Draw the rectangle corresponding to the ROI on the complete image layer.rectangle((previous_coord_px[0]-maxRelativeMotionPerFrame/2*width, \ previous_coord_px[1]-maxRelativeMotionPerFrame/2*height), \ (maxRelativeMotionPerFrame*width, maxRelativeMotionPerFrame*height), \ color = Color.GREEN, width = 2) else: # Search on the whole image if no clue of where is the target ROITopLeftCorner = (0, 0) ROI = img '''#Option 1 target_part0 = ROI.hueDistance(color=(142,50,65)).invert().threshold(150) target_part1 = ROI.hueDistance(color=(93,16,28)).invert().threshold(150) target_part2 = ROI.hueDistance(color=(223,135,170)).invert().threshold(150) target_raw_img = target_part0+target_part1+target_part2 target_img = target_raw_img.erode(5).dilate(5) #Option 2 target_img = ROI.hueDistance(imgModel.getPixel(10,10)).binarize().invert().erode(2).dilate(2)''' # Find sky color sky = (img - img.binarize()).findBlobs(minsize=10000) if sky: skycolor = sky[0].meanColor() # Option 3 target_img = ROI - ROI # Black image # Loop through palette of target colors if display and displayDebug: decomposition = [] i_col = 0 for col in pal: c = tuple([int(col[i]) for i in range(0, 3)]) # Search the target based on color ROI.save('../ObjectTracking/' + 'ROI_tmp.jpg') img1 = Image('../ObjectTracking/' + 'ROI_tmp.jpg') filter_img = img1.colorDistance(color=c) h = filter_img.histogram(numbins=256) cs = np.cumsum(h) thmax = np.argmin( abs(cs - 0.02 * img.width * img.height) ) # find the threshold to have 10% of the pixel in the expected color thmin = np.argmin( abs(cs - 0.005 * img.width * img.height) ) # find the threshold to have 10% of the pixel in the expected color if thmin == thmax: newth = thmin else: newth = np.argmin(h[thmin:thmax]) + thmin alpha = 0.5 th[i_col] = alpha * th[i_col] + (1 - alpha) * newth filter_img = filter_img.threshold( max(40, min(200, th[i_col]))).invert() target_img = target_img + filter_img #print th i_col = i_col + 1 if display and displayDebug: [R, G, B] = filter_img.splitChannels() white = (R - R).invert() r = R * 1.0 / 255 * c[0] g = G * 1.0 / 255 * c[1] b = B * 1.0 / 255 * c[2] tmp = white.mergeChannels(r, g, b) decomposition.append(tmp) # Get a black background with with white target foreground target_img = target_img.threshold(150) target_img = target_img - ROI.colorDistance( color=skycolor).threshold(80).invert() if display and displayDebug: small_ini = target_img.resize( int(img.width / (len(pal) + 1)), int(img.height / (len(pal) + 1))) for tmp in decomposition: small_ini = small_ini.sideBySide(tmp.resize( int(img.width / (len(pal) + 1)), int(img.height / (len(pal) + 1))), side='bottom') small_ini = small_ini.adaptiveScale( (int(img.width), int(img.height))) toDisplay = img.sideBySide(small_ini) else: toDisplay = img #target_img = ROI.hueDistance(color = Color.RED).threshold(10).invert() # Search for binary large objects representing potential target target = target_img.findBlobs(minsize=500) if target: # If a target was found if wasTargetFoundInPreviousFrame: predictedTargetPosition = ( width * maxRelativeMotionPerFrame / 2, height * maxRelativeMotionPerFrame / 2 ) # Target will most likely be close to the center of the ROI else: predictedTargetPosition = previous_coord_px # If there are several targets in the image, take the one which is the closest of the predicted position target = target.sortDistance(predictedTargetPosition) # Get target coordinates according to minimal bounding rectangle or centroid. coordMinRect = ROITopLeftCorner + np.array( (target[0].minRectX(), target[0].minRectY())) coord_px = ROITopLeftCorner + np.array( target[0].centroid()) # Rotate the coordinates of roll angle around the middle of the screen rot_coord_px = np.dot( ctm, coord_px - np.array([img.width / 2, img.height / 2])) + np.array( [img.width / 2, img.height / 2]) if useBasemap: coord = sp.deg2rad( m(rot_coord_px[0], img.height - rot_coord_px[1], inverse=True)) else: coord = localProjection( rot_coord_px[0] - img.width / 2, img.height / 2 - rot_coord_px[1], radius, mobile.yaw, mobile.pitch, inverse=True) target_bearing, target_elevation = coord # Get minimum bounding rectangle for display purpose minR = ROITopLeftCorner + np.array(target[0].minRect()) contours = target[0].contour() contours = [ ROITopLeftCorner + np.array(contour) for contour in contours ] # Get target features angle = sp.deg2rad(target[0].angle()) + mobile.roll angle = sp.deg2rad( unwrap180(sp.rad2deg(angle), sp.rad2deg(previous_angle))) width = target[0].width() height = target[0].height() # Check if the kite is upside down # First rotate the kite ctm2 = np.array([[sp.cos(-angle+mobile.roll), -sp.sin(-angle+mobile.roll)], \ [sp.sin(-angle+mobile.roll), sp.cos(-angle+mobile.roll)]]) # Coordinate transform matrix rotated_contours = [ np.dot(ctm2, contour - coordMinRect) for contour in contours ] y = [-tmp[1] for tmp in rotated_contours] itop = np.argmax(y) # Then looks at the points at the top ibottom = np.argmin(y) # and the point at the bottom # The point the most excentered is at the bottom if abs(rotated_contours[itop][0]) > abs( rotated_contours[ibottom][0]): isInverted = True else: isInverted = False if isInverted: angle = angle + sp.pi # Filter the data alpha = 1 - sp.exp(-deltaT / self.filterTimeConstant) if not (isPaused): dCoord = np.array(previous_dCoord) * ( 1 - alpha) + alpha * ( np.array(coord_px) - previous_coord_px ) # related to the speed only if cam is fixed dAngle = np.array(previous_dAngle) * ( 1 - alpha) + alpha * (np.array(angle) - previous_angle) else: dCoord = np.array([0, 0]) dAngle = np.array([0]) #print coord_px, angle, width, height, dCoord # Record important data times.append(timestamp) coords_px.append(coord_px) angles.append(angle) target_elevations.append(target_elevation) target_bearings.append(target_bearing) # Export data to controller self.elevation = target_elevation self.bearing = target_bearing self.orientation = angle dt = time.time() - timeLastTarget self.ROT = dAngle / dt self.lastUpdateTime = t # Save for initialisation of next step previous_dCoord = dCoord previous_angle = angle previous_coord_px = (int(coord_px[0]), int(coord_px[1])) wasTargetFoundInPreviousFrame = True timeLastTarget = time.time() else: wasTargetFoundInPreviousFrame = False if useHDF5: hdfSize = hdfSize + 1 dset.resize((hdfSize, 7)) imset.resize((hdfSize, img.width, img.height, 3)) dset[hdfSize - 1, :] = [ time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT ] imset[hdfSize - 1, :, :, :] = img.getNumpy() recordFile.flush() else: csv_writer.writerow([ time.time(), coord_px[0], coord_px[1], angle, self.elevation, self.bearing, self.ROT ]) if display: if target: # Add target features to layer # Minimal rectange and its center in RED layer.polygon(minR[(0, 1, 3, 2), :], color=Color.RED, width=5) layer.circle( (int(coordMinRect[0]), int(coordMinRect[1])), 10, filled=True, color=Color.RED) # Target contour and centroid in BLUE layer.circle((int(coord_px[0]), int(coord_px[1])), 10, filled=True, color=Color.BLUE) layer.polygon(contours, color=Color.BLUE, width=5) # Speed vector in BLACK layer.line((int(coord_px[0]), int(coord_px[1])), (int(coord_px[0] + 20 * dCoord[0]), int(coord_px[1] + 20 * dCoord[1])), width=3) # Line giving angle layer.line((int(coord_px[0] + 200 * sp.cos(angle)), int(coord_px[1] + 200 * sp.sin(angle))), (int(coord_px[0] - 200 * sp.cos(angle)), int(coord_px[1] - 200 * sp.sin(angle))), color=Color.RED) # Line giving rate of turn #layer.line((int(coord_px[0]+200*sp.cos(angle+dAngle*10)), int(coord_px[1]+200*sp.sin(angle+dAngle*10))), (int(coord_px[0]-200*sp.cos(angle + dAngle*10)), int(coord_px[1]-200*sp.sin(angle+dAngle*10)))) # Add the layer to the raw image toDisplay.addDrawingLayer(layer) toDisplay.addDrawingLayer(selectionLayer) # Add time metadata toDisplay.drawText(str(i_frame) + " " + str(timestamp), x=0, y=0, fontsize=20) # Add Line giving horizon #layer.line((0, int(img.height/2 + mobile.pitch*pixelPerRadians)),(img.width, int(img.height/2 + mobile.pitch*pixelPerRadians)), width = 3, color = Color.RED) # Plot parallels for lat in range(-90, 90, 15): r = range(0, 361, 10) if useBasemap: # \todo improve for high roll l = m(r, [lat] * len(r)) pix = [np.array(l[0]), img.height - np.array(l[1])] else: l = localProjection(sp.deg2rad(r), \ sp.deg2rad([lat]*len(r)), \ radius, \ lon_0 = mobile.yaw, \ lat_0 = mobile.pitch, \ inverse = False) l = np.dot(ctm, l) pix = [ np.array(l[0]) + img.width / 2, img.height / 2 - np.array(l[1]) ] for i in range(len(r) - 1): if isPixelInImage( (pix[0][i], pix[1][i]), img) or isPixelInImage( (pix[0][i + 1], pix[1][i + 1]), img): layer.line((pix[0][i], pix[1][i]), (pix[0][i + 1], pix[1][i + 1]), color=Color.WHITE, width=2) # Plot meridians for lon in range(0, 360, 15): r = range(-90, 91, 10) if useBasemap: # \todo improve for high roll l = m([lon] * len(r), r) pix = [np.array(l[0]), img.height - np.array(l[1])] else: l= localProjection(sp.deg2rad([lon]*len(r)), \ sp.deg2rad(r), \ radius, \ lon_0 = mobile.yaw, \ lat_0 = mobile.pitch, \ inverse = False) l = np.dot(ctm, l) pix = [ np.array(l[0]) + img.width / 2, img.height / 2 - np.array(l[1]) ] for i in range(len(r) - 1): if isPixelInImage( (pix[0][i], pix[1][i]), img) or isPixelInImage( (pix[0][i + 1], pix[1][i + 1]), img): layer.line((pix[0][i], pix[1][i]), (pix[0][i + 1], pix[1][i + 1]), color=Color.WHITE, width=2) # Text giving bearing # \todo improve for high roll for bearing_deg in range(0, 360, 30): l = localProjection(sp.deg2rad(bearing_deg), sp.deg2rad(0), radius, lon_0=mobile.yaw, lat_0=mobile.pitch, inverse=False) l = np.dot(ctm, l) layer.text( str(bearing_deg), (img.width / 2 + int(l[0]), img.height - 20), color=Color.RED) # Text giving elevation # \todo improve for high roll for elevation_deg in range(-60, 91, 30): l = localProjection(0, sp.deg2rad(elevation_deg), radius, lon_0=mobile.yaw, lat_0=mobile.pitch, inverse=False) l = np.dot(ctm, l) layer.text(str(elevation_deg), (img.width / 2, img.height / 2 - int(l[1])), color=Color.RED) #toDisplay.save(js) toDisplay.save(disp) if display: toDisplay.removeDrawingLayer(1) toDisplay.removeDrawingLayer(0) recordFile.close()
def flowprandtlmeyer(**flow): """ Prandtl-Meyer function for expansion waves. This function accepts a given set of specific heat ratios and an input of either Mach number, Mach angle or Prandtl-Meyer angle. Inputs can be a single scalar or an array_like data structure. Parameters ---------- gamma : array_like, optional Specific heat ratio. Values must be greater than 1. M : array_like Mach number. Values must be greater than or equal to 1. nu : array_like Prandtl-Meyer angle [degrees]. Values must be 0 <= M <= 90*(sqrt((g+1)/(g-1))-1). mu : array_like Mach angle [degrees]. Values must be 0 <= M <= 90. Returns ------- out : (M, nu, mu) Tuple of Mach number, Prandtl-Meyer angle, Mach angle. Examples -------- >>> flowprandtlmeyer(M=5) (5.0, 76.920215508538789, 11.536959032815489) """ #parse the input gamma, flow, mtype, itype = _flowinput(flow) #calculate gamma-ratios for use in the equations l = sp.sqrt((gamma - 1) / (gamma + 1)) #preshape mach array M = sp.empty(flow.shape, sp.float64) #use prandtl-meyer relation to solve for the mach number if mtype in ["mach", "m"]: if (flow < 1).any(): raise Exception("Mach number inputs must be real numbers greater" \ " than or equal to 1.") M = flow elif mtype in ["mu", "machangle"]: if (flow < 0).any() or (flow > 90).any(): raise Exception("Mach angle inputs must be real numbers" \ " 0 <= M <= 90.") M = 1 / sp.sin(sp.deg2rad(flow)) elif mtype in ["nu", "pm", "pmangle"]: if (flow < 0).any() or (flow > 90 * ((1 / l) - 1)).any(): raise Exception("Prandtl-Meyer angle inputs must be real" \ " numbers 0 <= M <= 90*(sqrt((g+1)/(g-1))-1).") M[:] = 2 #initial guess for the solution for _ in xrange(_AETB_iternum): b = sp.sqrt(M**2 - 1) f = -sp.deg2rad(flow) + (1 / l) * sp.arctan(l * b) - sp.arctan(b) g = b * (1 - l**2) / (M * (1 + (l**2) * (b**2))) #derivative M = M - (f / g) #Newton-Raphson else: raise Exception("Keyword input must be an acceptable string to" \ " select input parameter.") #normal shock relations b = sp.sqrt(M**2 - 1) V = (1 / l) * sp.arctan(l * b) - sp.arctan(b) U = sp.arcsin(1 / M) return from_ndarray(itype, M, sp.rad2deg(V), sp.rad2deg(U))