def import_to_csv(): person = read_csv('personImport.txt') post = read_csv('postImport.txt') friend = read_csv('friendImport.txt') write_csv('person.csv',person) write_csv('post.csv',post) write_csv('friend.csv',friend)
def lookup(title): articles = read_csv('article.csv') for a in articles: for b in range(len(a)): if b == 1 and a[b] == title: print("Title: " + a[b] + " \nBody: " + a[b + 1]) return articles
def test_all_users_from(path): #if(path.exists): users = read_csv(path) for i in users: fname = i[0] lname = i[1] email = i[2] newUser = create_user(fname, lname, email) test_user(newUser)
def change_body(title, new_body): articles = read_csv('article.csv') for a in articles: for b in range(len(a)): if b == 1 and a[b] == title: a[b + 1] = new_body write_csv('article.csv', articles) print(articles) return articles
def select_articles(): articles = read_csv('article.csv') arts = "" for a in articles: for b in range(len(a)): if b == 1: arts = arts + a[b] + ", " print("Available Articles: " + arts) return arts
def display_author_list(): authors = read_csv('author.csv') z = "" for a in authors: for b in a: if '@' not in b: z = z + b + ', ' print(z) return z
def print_author_email(): authors = read_csv('author.csv') z = "" for a in authors: for b in a: if '@' in b: z = z + b + ', ' print(z) return z
def change_email(old_email, new_email): authors = read_csv('author.csv') for a in authors: for b in range(len(a)): if a[b] == old_email: a[b] = new_email print(authors) write_csv('author.csv', authors) return (authors)
def delete_author(name): authors = read_csv('author.csv') for a in authors: for b in range(len(a)): if a[b] == name: a[b] = "" a[b + 1] = "" print(authors) write_csv('author.csv', authors) return authors
def author_exists(name): #Failing Test #return read_csv('author.csv') #Passing Test authors = read_csv('author.csv') #print (authors) z = "" for a in authors: for b in a: if b == name: z = name + " is in the list!" if z == "": z = "Name not in list" print(z) return z
] inner_cut = [0.0170, 0.0800, 0.0500, 0.0, 0.057, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] outer_cut = [ 0.0700, 0.2500, 0.1400, 0.3000, 0.180, 0.2100, 0.2400, 0.1350, 0.1900, 0.1400, 0.0870 ] background_cut = [ 0.0800, 0.3000, 0.1700, 0.4000, 0.2500, 0.2800, 0.3300, 0.2000, 0.2500, 0.1800, 0.1200 ] outer_back_cut = 10.0 for i in range(len(datafile)): if i == 7: continue if skip[i] == 1: continue data = f.read_csv(datafile[i]) clus_data = gce.make_cut(data, center_ra[i], center_dec[i], inner_cut[i], outer_cut[i]) back_data = gce.make_cut(data, center_ra[i], center_dec[i], background_cut[i], outer_back_cut) clus_out = datafile[i][:-4] + '_cluster.csv' back_out = datafile[i][:-4] + '_background.csv' if (f.write_csv(clus_data, clus_out)) == 1: print '#-cluster file', datafile[ i], 'successfully cut and saved as', clus_out else: print '!!!AN ERROR OCCURED - FILE NOT CUT CORRECTLY!!!' if (f.write_csv(back_data, back_out)) == 1: print '#-data file', datafile[ i], 'successfully cut and saved as', back_out else:
#! /usr/bin/env python #'Bang' line - modify as needed import files as f import GCplot_enc as gcp """This script quickly loads data and calls plotting functions. Matthew Newby (RPI), Jan 11, 2011 """ main_data = f.read_csv('HR_NGC_5053_cluster.csv') back_data = f.read_csv('HR_NGC_5053_background.csv') gcp.plot_infiles(main_data, back_data, GCname='5053', to_file=1)
def read_dist(csv_file): rows = read_csv(csv_file, int) dist = defaultdict(int) for degree, count in rows: dist[degree] = count return dist
def user_list(): return read_csv('user.csv') userList = read_csv('user.csv') print(userList)
plot_file = 1 for cluster in range(len(NAME)): if (SKIP[cluster] == 1): continue for run in range(len(CONVOLVE[cluster])): gc_name = NAME[cluster] +'_'+ str(CONVOLVE[cluster][run]) con_dist = CONVOLVE[cluster][run] real_dist = DISTANCE[cluster] in_area = AREA_IN[cluster] back_area = AREA_BACK[cluster] if (HIGH_CUTS != []): high_cut = HIGH_CUTS[run] print '***** Starting run', gc_name, '*****' main_data = f.read_csv(datafile[cluster]) back_data = f.read_csv(backfile[cluster]) con_clus = gce.convolve(main_data, real_dist, con_dist) #don't convolve background! Background is static!!! #Actually, DO convolve background! need to accurately remove it from cluster! # If cluster moves before subtraction, background needs to move, too! con_back = gce.convolve(back_data, real_dist, con_dist) con_clus_f = gce.select_stars(con_clus, low_limit = 0.1) con_back_f = gce.select_stars(back_data, low_limit = 0.1) lc,wc = con_clus_f.shape lb,wb = con_back_f.shape cluster_x = sc.zeros(lc) background_x = sc.zeros(lb)
r[i] = data_in[i][3] m = sc.zeros((length,4)) for i in range(length): m[i,0] = data_in[i][0] m[i,1] = data_in[i][1] m[i,2] = g[i] m[i,3] = r[i] return m data_out = [] for cluster in range(len(NAME)): if (SKIP[cluster] == 1): continue con_dist = DISTANCE[cluster] real_dist = DISTANCE[cluster] gc_name = NAME[cluster] main_data = f.read_csv(datafile[cluster]) l, w = main_data.shape print '#-Starting run', gc_name print '#-Total number of stars in cluster:', l ''' Sort the Data ''' blue, yellow, red = [], [], [] for i in range(l): """Remove stars from outside interesting range""" if ( (main_data[i,2] - 5.*(m.log10(real_dist*1000) - 1.) ) < low_cut ): continue if ( (main_data[i,2] - 5.*(m.log10(real_dist*1000) - 1.) ) > high_cut ): continue g_minus_r = (main_data[i,2] - main_data[i,3]) if g_minus_r < BLUE_LIMIT: blue.append(main_data[i,:]) elif g_minus_r > RED_LIMIT: red.append(main_data[i,:]) else:
subs = [] """Plot Initializations""" fig = plt.figure() plt.subplots_adjust(hspace=0.001, wspace=0.001) #plt.title(save_name) #plt.xlabel('g-r') #plt.ylabel('Mg') for i in range(len(gc_names)): name = gc_names[i] distance = gc_distances[i] #metal = gc_metals[i] HB_limit = cutoff[i] """Get Data""" if (i == 10): data_iso = f.read_data(('Iso_new_Pal5.dat')) data_clus = f.read_csv('noU_Pal5_cluster.csv') #'HR_Pal_5_cluster.csv' data_fid = f.read_data(('Pal5_2_fiducial_out.txt')) else: data_iso = f.read_data(('Iso_new_' + name + '.dat')) #data_iso = f.read_data( ('Iso_series_'+name+'_A.dat') ) data_clus = f.read_csv('noU_NGC_' + name + '_cluster.csv') #'HR_NGC_'+name+'_cluster.csv' data_fid = f.read_data( ('NGC_' + name + '_' + str(fid_res[i]) + '_fiducial_out.txt')) """Setup Data for Plotting""" mm = -0.01463023 bb = 0.08928602 iso_x_list, iso_y_list = [], [] for j in range(len(data_iso[:, 8])): if (data_iso[j, 8] > HB_limit): iso_x_list.append((data_iso[j, 8] - data_iso[j, 9]) +
def person_list(): return read_csv('person.csv')
import math as m import numpy as np import scipy as sc import files as f '''python script for finding SEGUE plate ra, dec. Matthew Newby, Aug 16, 2010''' data = f.read_csv('SEGUE_t2_mnewby.csv') #plate,fiberid,ra,dec,l,b,feha,teffa,logga,alphafe,elodierv length, width = data.shape #finding number and ids of unique plates plate_id = [] for i in range(length): if (plate_id.count(data[i,0])==0): plate_id.append(data[i,0]) print len(plate_id), plate_id #finding min and max ra and dec for each plate plate_values = [] for i in range(len(plate_id)): field = [1000, -1000, 1000, -1000] for j in range(length): if (data[j,0]==plate_id[i]): if (data[j,2] < field[0]): field[0]=int(data[j,2]) if (data[j,2] > field[1]): field[1]=(int(data[j,2])+1) if (data[j,3] < field[2]): field[2]=int(data[j,3]) if (data[j,3] > field[3]): field[3]=(int(data[j,3])+1) plate_values.append(field) #print '(( ra BETWEEN', field[0], 'AND', field[1], \ #') AND ( dec BETWEEN', field[2], 'AND', field[3], ')) OR' print 'All plates searched'
def author_list(): return read_csv('author.csv')
def read_friendships(): read_csv('user_friends.csv')
def friend_list(): return read_csv('friend.csv')
def read_user_posts(first_name, last_name): for p in read_csv(first_name + '_' + last_name + '_postlogger.csv'): return a[0, 1]
#HIGH_CUTS = [6.3, 6.3, 6.5, 6.5, 7.1, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0] #7089, short series plot_file = 1 for cluster in range(len(NAME)): if (SKIP[cluster] == 1): continue for run in range(len(CONVOLVE[cluster])): gc_name = NAME[cluster] + '_' + str(CONVOLVE[cluster][run]) con_dist = CONVOLVE[cluster][run] real_dist = DISTANCE[cluster] in_area = AREA_IN[cluster] back_area = AREA_BACK[cluster] if (HIGH_CUTS != []): high_cut = HIGH_CUTS[run] print '***** Starting run', gc_name, '*****' main_data = f.read_csv(datafile[cluster]) back_data = f.read_csv(backfile[cluster]) con_clus = gce.convolve(main_data, real_dist, con_dist) #don't convolve background! Background is static!!! #Actually, DO convolve background! need to accurately remove it from cluster! # If cluster moves before subtraction, background needs to move, too! con_back = gce.convolve(back_data, real_dist, con_dist) con_clus_f = gce.select_stars(con_clus, low_limit=0.1) con_back_f = gce.select_stars(back_data, low_limit=0.1) lc, wc = con_clus_f.shape lb, wb = con_back_f.shape cluster_x = sc.zeros(lc) background_x = sc.zeros(lb)
def read_user_single_post(post_num): posts = read_csv(first_name + '_' + last_name + '_postlogger.csv') for i in posts[i]: if posts[0] == post_num: return posts[1]
datafile = 'HR_NGC_5904_cluster.csv' backfile = 'HR_NGC_5904_background.csv' real_dist = 7.3 in_area = 0.076 back_area = 1.8537 cut_hist = 1 low_cut = 2.0 high_cut = 7.0 plot_file = 1 for run in range(len(NAME)): gc_name = NAME[run] con_dist = CONVOLVE[run] print '***** Starting run', gc_name, '*****' main_data = f.read_csv(datafile) back_data = f.read_csv(backfile) con_clus = gce.convolve(main_data, real_dist, con_dist) con_back = gce.convolve(back_data, real_dist, con_dist) con_clus_f = gce.select_stars(con_clus) con_back_f = gce.select_stars(con_back) lc,wc = con_clus_f.shape lb,wb = con_back_f.shape cluster_x = sc.zeros(lc) background_x = sc.zeros(lb) for i in range(lc): cluster_x[i] = ( con_clus_f[i,2] - 5.*(m.log10(real_dist*1000) - 1.) ) for i in range(lb): background_x[i] = ( con_back_f[i,2] - 5.*(m.log10(real_dist*1000) - 1.) )
def delete_post(post_num): posts = read_csv(first_name + '_' + last_name + '_postlogger.csv') for i in posts[i]: if posts[0] == post_num: del posts[1]
def user_list(): return read_csv('user.csv')
def read_users(): read_csv('users.csv')
st_dev = 0.0 for j in range(len(in_list)): st_dev = st_dev + ((in_list[j] - mu)**2) return np.sqrt(st_dev/float(len(in_list))) #initial parameters; this would be a good point for a wrapper. data_file = 'HR_Pal_5_cluster.csv' cluster_name = 'Pal5_2' distance = 21.0 #starting distance step_size = 0.2 #smaller gives more points, but larger errors save_data = 1 #saves isocrone files as output if set to 1 #initializations plot_title = cluster_name + ', fiducial fit(green)' file_str = 'HR_fiducial_' + cluster_name + '.ps' #plot file string #load in and prepare SDSS data gc_data = f.read_csv(data_file) gc_l, gc_w = gc_data.shape gc_array = sc.zeros((gc_l,2), float) gc_array[:,0] = gc_data[:,2] - 5.*(m.log10(distance*1000) - 1.0) #'g' values gc_array[:,1] = (gc_data[:,2] - gc_data[:,3]) #'g-r' values bounds = sc.arange(1.0, 7.0, step_size) #break up data CMD into strips in Mg #reduce Mg strips - reject outliers, get mean, st_dev for each strip #Note - if distances are fitted, then this part needs to be done every iteration. fit_array = sc.zeros((len(bounds), 3),float) #average g, g-r mean, g-r st_dev for i in range(1, len(bounds)): star_list, g_list = [], [] for j in range(gc_l): if ( (gc_array[j,0] < bounds[i]) and (gc_array[j,0] >= bounds[i-1])): #Do standard deviation in g, too? g_list.append(gc_array[j,0])
def create_post(post_content): posts = read_csv(first_name + '_' + last_name + '_postlogger.csv') for i in posts: num = i posts.append([num, post_content]) write_csv(first_name + '_' + last_name + '_postlogger.csv', posts)
def post_list(): return read_csv('post.csv')
"""Plot Initializations""" fig = plt.figure() plt.subplots_adjust(hspace=0.001, wspace=0.001) #plt.title(save_name) #plt.xlabel('g-r') #plt.ylabel('Mg') for i in range(len(gc_names)): name = gc_names[i] distance = gc_distances[i] #metal = gc_metals[i] HB_limit = cutoff[i] """Get Data""" if (i == 10): data_iso = f.read_data( ('Iso_new_Pal5.dat') ) data_clus = f.read_csv('noU_Pal5_cluster.csv') #'HR_Pal_5_cluster.csv' data_fid = f.read_data( ('Pal5_2_fiducial_out.txt') ) else: data_iso = f.read_data( ('Iso_new_'+name+'.dat') ) #data_iso = f.read_data( ('Iso_series_'+name+'_A.dat') ) data_clus = f.read_csv('noU_NGC_'+name+'_cluster.csv') #'HR_NGC_'+name+'_cluster.csv' data_fid = f.read_data( ('NGC_'+name+'_'+str(fid_res[i])+'_fiducial_out.txt') ) """Setup Data for Plotting""" mm = -0.01463023 bb = 0.08928602 iso_x_list, iso_y_list = [], [] for j in range(len(data_iso[:,8])): if (data_iso[j,8] > HB_limit): iso_x_list.append((data_iso[j,8] - data_iso[j,9]) + mm*data_iso[j,8] + bb) iso_y_list.append( data_iso[j,8] )
""" # Get Data #Data is: ra, dec, g, r, u, flags filename = 'noU_NGC_6205.csv' name = '6205_test' center_ra, center_dec = 250.423, 36.460 #NGC 6205 #center_ra, center_dec = 229.013, -0.123 #Pal 5 radii_step = 0.04 steps = 25 suffix = filename[-4:] if suffix == '.txt': data = f.read_data(filename) elif suffix == '.csv': data = f.read_csv(filename) '''Initial Skyplot''' plt.figure(1) data_1 = data[:, 0] data_2 = data[:, 1] #Strange indices are due to phantom liast point - not in data, but added by code somehow... plt.scatter(data_1[:-1], data_2[:-1], 1, 'k', 'o') plt.xlabel('ra') plt.ylabel('dec') plt.axis('scaled') y_limits = plt.ylim() x_limits = plt.xlim() plot_file = name + '_initial_skyplot.ps' plt.savefig(plot_file, papertype='letter') plt.close('all')
'wide_7089.csv', 'wide_Pal5.csv' ] center_ra = [182.525, 198.228, 199.109, 205.545, 211.36, 229.641, 250.423, 259.1680, 322.493, 323.362, 229.013] center_dec = [18.530, 18.164, 17.697, 28.376, 28.53, 2.083, 36.460, 43.1033, 12.167, -0.826, -0.123] inner_cut = [0.0170, 0.0800, 0.0500, 0.0, 0.057, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] outer_cut = [0.0700, 0.2500, 0.1400, 0.3000, 0.180, 0.2100, 0.2400, 0.1350, 0.1900, 0.1400, 0.0870] background_cut = [0.0800, 0.3000, 0.1700, 0.4000, 0.2500, 0.2800, 0.3300, 0.2000, 0.2500, 0.1800, 0.1200] outer_back_cut = 10.0 for i in range(len(datafile)): if i == 7: continue if skip[i] == 1: continue data = f.read_csv(datafile[i]) clus_data = gce.make_cut(data, center_ra[i], center_dec[i], inner_cut[i], outer_cut[i]) back_data = gce.make_cut(data, center_ra[i], center_dec[i], background_cut[i], outer_back_cut) clus_out = datafile[i][:-4]+'_cluster.csv' back_out = datafile[i][:-4]+'_background.csv' if (f.write_csv(clus_data, clus_out)) == 1: print '#-cluster file', datafile[i], 'successfully cut and saved as', clus_out else: print '!!!AN ERROR OCCURED - FILE NOT CUT CORRECTLY!!!' if (f.write_csv(back_data, back_out)) == 1: print '#-data file', datafile[i], 'successfully cut and saved as', back_out else: print '!!!AN ERROR OCCURED - FILE NOT CUT CORRECTLY!!!' print '#---All Done'
plt.close('all') return 1 if __name__ == '__main__': print '#- number of arguments:', (len(sys.argv)-1) # Read arguments filename = sys.argv[1] column = int(sys.argv[2]) size = float(sys.argv[3]) if len(sys.argv) > 4: spread = [float(sys.argv[4]), float(sys.argv[5])] else: spread = [] if len(sys.argv) > 6: name = sys.argv[6] else: name = 'quick' # Load Data suffix = filename[-4:] if suffix == '.txt': data = f.read_data(filename) elif suffix == '.csv': data = f.read_csv(filename) # Run scripts to_bin = data[:,column] reg_hist = make_hist(to_bin, size, spread) plot_histogram(reg_hist, size, name=(name+'_normal') ) f.write_data(reg_hist, fileout=(name+'_normal.txt'), header='# Centers, Counts') cum_hist = cumulative_hist(to_bin, size, spread) plot_histogram(cum_hist, size, name=(name+'_cumulative') ) f.write_data(cum_hist, fileout=(name+'_cumulative.txt'), header='# Centers, Counts') print '#done with quick histograms'
for j in range(len(in_list)): st_dev = st_dev + ((in_list[j] - mu)**2) return np.sqrt(st_dev / float(len(in_list))) #initial parameters; this would be a good point for a wrapper. iso_file = '6205_isocrone_12.3gyr_1.41feH.dat' data_file = 'HR_NGC_6205_cluster.csv' cluster_name = 'NGC_6205_CG97_clean2' distance = 7.7 #starting distance save_data = 0 #saves isocrone files as output if set to 1 #initializations #plot_title = cluster_name + ', isocrone(blue) and fiducial fit(green)' file_str = 'HRISO_compare_' + cluster_name #plot file string #load in and prepare SDSS data gc_data = f.read_csv(data_file) gc_l, gc_w = gc_data.shape gc_array = sc.zeros((gc_l, 2), float) gc_array[:, 0] = gc_data[:, 2] - 5. * (m.log10(distance * 1000) - 1.0) #'g' values gc_array[:, 1] = (gc_data[:, 2] - gc_data[:, 3]) #'g-r' values #load in and prepare isocrone data iso_data = f.read_data(iso_file) #<--might fail due to leading whitespace? iso_l, iso_w = iso_data.shape iso_in = sc.zeros((iso_l, 2), float) iso_in[:, 0] = iso_data[:, 8] #'Mg' values iso_in[:, 1] = (iso_data[:, 8] - iso_data[:, 9]) #'g-r' values #Chop up isocrone so that the bounds are equal gminusr = 0.6 for i in range(1, iso_l):
import math as m import numpy as np import scipy as sc import files as f import gctools_enc as gce import gauss_fit_enc as gfe import GCplot_enc as gcp '''python script for running convolve analysis scripts Matthew Newby, June 6, 2010''' datafile = 'HR_Pal_5_cluster.csv' NAME = ['Pal5_ah23', 'Pal5_ah24', 'Pal5_ah25', 'Pal5_ah26', 'Pal5_ah27', 'Pal5_ah28'] CONVOLVE = [23.2, 24.2, 25.2, 26.2, 27.2, 28.2] real_dist = 23.2 repeat_runs = 3 print '#', CONVOLVE print '#-Total iterations:', repeat_runs for i in range(repeat_runs): for run in range(len(NAME)): gc_name = NAME[run] con_dist = CONVOLVE[run] print '#***** Starting run', gc_name, 'from distace', real_dist, \ 'to distance:', con_dist ,'*****' main_data = f.read_csv(datafile) con_clus = gce.convolve(main_data, real_dist, con_dist) if gce.con_analysis(main_data, con_clus, right_limit=0.3, left_limit=0.1) == 1: print '#*****', gc_name, '-process complete*****' print '#---ALL RUNS COMPLETE---'