def gen(): 'this is the function that generates the data and will be iterated over' # set up variables and allocate storage n = np.zeros((l.size, wvl.size), dtype=complex) psi = delta = np.zeros((ang.size * wvl.size)) ellips = np.zeros((2, ang.size * wvl.size)) rp = rs = tp = ts = psi = np.zeros_like(psi) # creating structure, mats are not random, only thicknesses n[::3] = materials[0] n[1::3] = materials[1] n[2::3] = materials[2] l[::3] = rng.uniform(low=al_range[0], high=al_range[1], size=l[::3].size) l[1::3] = rng.uniform(low=ag_range[0], high=ag_range[1], size=l[1::3].size) l[2::3] = rng.uniform(low=ge_range[0], high=ge_range[1], size=l[2::3].size) # calculate sim exp data # p-pol rp = np.array([ tmm.reflect_amp(1, ang[j], wvl[i], n[:, i], l, 1., n_gl[i]) for j in range(len(ang)) for i in range(len(wvl)) ]) + rng.normal(scale=sig, size=rp.size) tp = np.array([ tmm.trans_amp(1, ang[j], wvl[i], n[:, i], l, 1., n_gl[i]) for j in range(len(ang)) for i in range(len(wvl)) ]) + rng.normal(scale=sig, size=tp.size) # s-pol rs = np.array([ tmm.reflect_amp(0, ang[j], wvl[i], n[:, i], l, 1., n_gl[i]) for j in range(len(ang)) for i in range(len(wvl)) ]) + rng.normal(scale=sig, size=rs.size) ts = np.array([ tmm.trans_amp(0, ang[j], wvl[i], n[:, i], l, 1., n_gl[i]) for j in range(len(ang)) for i in range(len(wvl)) ]) + rng.normal(scale=sig, size=ts.size) # ellips ellips = np.array([ tmm.ellips(ang[j], wvl[i], n[:, i], l, 1., n_gl[i]) for j in range(len(ang)) for i in range(len(wvl)) ]) # add gaussian errors and split into psi and delta psi = ellips[:, 0] + rng.normal(scale=sig, size=psi.size) delta = ellips[:, 1] + rng.normal(scale=sig, size=delta.size) # save data as a long straight line data = np.reshape(np.concatenate((ang, l, rp, rs, tp, ts, psi, delta)), (1, ang.size + l.size + 6 * ang.size * wvl.size)) return data
def main(): ''' thicknesses in microns (um) ''' eps = putil.getEps('al', 1500.) print eps exit() # Set layer indices ni = 1. nf = 4. nt = sqrt(-100 + 1j) ns_film = np.array([nf]) ns_bare = np.array([ni]) # Set layer thicknesses ds = np.array([1]) # um # Set excitation propeties wls = np.linspace(1., 2., 100) # um # ths = np.linspace(0.0, 90, 1000) * pi/180. ths = np.array([45. * pi / 180]) pol = 'p' # Collect data R_bare = pl.zeros((len(wls), len(ths)), dtype='float') R_film = pl.zeros((len(wls), len(ths)), dtype='float') for ith, th in enumerate(ths): for iwl, wl in enumerate(wls): R_film[iwl, ith] = tm.solvestack(ni, nt, ns_film, ds, wl, pol, th)[0] R_bare[iwl, ith] = tm.solvestack(ni, nt, ns_bare, ds, wl, pol, th)[0] R = R_film / R_bare # Plot data pl.figure(figsize=(10, 7.5)) pl.plot(wls, R[:, 0], 'r', lw=2, label='R') #pl.xlim(ths[0], ths[-1]) #pl.ylim(0, 1) pl.ylabel(r'$R$', fontsize=18) pl.xlabel(r'$\theta$', fontsize=18) pl.show() return
def main(): nm = 1e-9 um = 1e-6 cm = 1e-3 wls = np.linspace(1500,6000,100) # e_agst = putil.getEps('a-gete', wls) # e_cgst = putil.getEps('c-gete', wls) e_agst = putil.getEps('a-gst225', wls) e_cgst = putil.getEps('c-gst225', wls) n_agst = sqrt(e_agst) n_agst = n_agst.real + 1j * n_agst.imag n_cgst = sqrt(e_cgst) n_cgst = n_cgst.real + 1j * n_cgst.imag e_al = putil.getEps('al',wls) n_al = sqrt(e_al) n_al = n_al.real + 1j * n_al.imag Nths = 10 ths = np.linspace(10,40,Nths) * pi/180. pol = 'p' ds = np.array([175]) # nm R = np.zeros((len(wls),Nths,2), dtype='float') T = np.zeros((len(wls),Nths,2), dtype='float') A = np.zeros((len(wls),Nths,2), dtype='float') plt.figure(figsize=(5,4)) for phase in ['a','c']: for ip, p in enumerate(['p','s']): for ith, th in enumerate(ths): for iwl, wl in enumerate(wls): ni = 1+0j ns_a = np.array([n_agst[iwl]]) ns_c = np.array([n_cgst[iwl]]) nt = n_al[iwl] # reflection from aluminum with no GST ref = tm.solvestack(ni, nt, np.array([1]), ds, wl, p, th)[0] if phase == 'a': R[iwl,ith,ip], T[iwl,ith,ip], A[iwl,ith,ip] = tm.solvestack(ni, nt, ns_a, ds, wl, p, th) else: R[iwl,ith,ip], T[iwl,ith,ip], A[iwl,ith,ip] = tm.solvestack(ni, nt, ns_c, ds, wl, p, th) R[iwl,ith,ip] /= ref Rav = np.sum(np.sum(R,axis=1), axis=1) / (2*Nths) c = 'b' if phase == 'a' else 'r' plt.plot(wls/1e3, Rav, c, lw=2, label='R') plt.ylabel(r'$R$', fontsize=18) plt.xlabel(r'$\lambda$ ($\mu m$)', fontsize=18) plt.legend(('amor.','crys.'), loc='best') plt.tight_layout() plt.show()
updated versions of the batchin CSVs used to construct the transit network for the Transit Modernization Model. A new network can then be constructed using the altered batchin files to model the transit improvements. ''' import os import sys import arcpy import csv import TMM # ----------------------------------------------------------------------------- # Set parameters. # ----------------------------------------------------------------------------- input_dir = TMM.input_dir output_dir = TMM.ensure_dir(TMM.output_dir) scen = 100 # Year 2010 tod_periods = range(1, 9) # 1-8 tline_table = os.path.join(TMM.gdb, 'extra_attr_tlines') node_table = os.path.join(TMM.gdb, 'extra_attr_nodes') bus_node_attr_csv_in = os.path.join(input_dir, 'bus_node_extra_attributes.csv') rail_node_attr_csv_in = os.path.join(input_dir, 'rail_node_extra_attributes.csv') tline_easeb_csv_in = os.path.join(input_dir, 'boarding_ease_by_line_id.csv') tline_prof_csv_in = os.path.join(input_dir, 'productivity_bonus_by_line_id.csv') tline_relim_csv_in = os.path.join(input_dir, 'relim_by_line_id.csv') bus_node_attr_csv_out = bus_node_attr_csv_in.replace(input_dir, output_dir) rail_node_attr_csv_out = rail_node_attr_csv_in.replace(input_dir, output_dir)
stores updated policies for all bus stops and train stations, for any that are currently selected. Must be run from within ArcMap. ''' import os import sys import arcpy import TMM # Set parameters: nodes_lyr = arcpy.GetParameterAsText(0) policy_values = [arcpy.GetParameter(i+1) for i in xrange(len(TMM.node_fields))] ignore_zeroes = arcpy.GetParameter(len(policy_values)+1) # Verify some features are selected, otherwise fail: if not TMM.check_selection(nodes_lyr): TMM.die('You must select at least one feature from "{0}" before continuing. (If you want the policy changes to be regionwide, please select all features.)'.format(nodes_lyr)) # Iterate through extra_attr_nodes table, updating rows for selected features: selected_nodes = [str(row[0]) for row in arcpy.da.SearchCursor(nodes_lyr, [TMM.node_id_int_field])] node_table = os.path.join(TMM.gdb, 'extra_attr_nodes') with arcpy.da.UpdateCursor(node_table, TMM.node_fields, ''' "NODE_ID" IN ({0}) '''.format(",".join(selected_nodes))) as cursor: for row in cursor: for i in xrange(len(row)): if policy_values[i] != 0 or not ignore_zeroes: row[i] = policy_values[i] cursor.updateRow(row)
creating tables to store specific policy-based extra attributes for each unique feature. ''' import os import sys import arcpy import TMM # Set parameters: shp_root_dir = arcpy.GetParameterAsText(0) # 'C:\\WorkSpace\\TransitModernizationModel\\TMM_Test\\Media' # Create geodatabase: arcpy.AddMessage('\nCreating geodatabase {0}...\n'.format(TMM.gdb)) TMM.delete_if_exists(TMM.gdb) arcpy.CreateFileGDB_management(TMM.gdb_dir, TMM.gdb_name) # Create TOD-specific FDs and FCs from shapefiles and identify unique node/tline IDs: unique_nodes = set() unique_tlines = set() for tod in (1, 2, 3, 4, 5, 6, 7, 8): arcpy.AddMessage('TOD {0}:'.format(tod)) shp_dir = os.path.join(shp_root_dir, 'Scenario_10{0}'.format(tod)) tod_fd_name = 'tod_{0}'.format(tod) tod_fd = os.path.join(TMM.gdb, tod_fd_name) day_fd_name = 'tod_all' day_fd = os.path.join(TMM.gdb, day_fd_name)