def assoc_catalogs_opt(c1, c2, tolerance = 1/18000.): mag_tuples = [] flux_tuples = [] cat1 = asciidata.open(c1) cat2 = asciidata.open(c2) for i in range(cat2.nrows): cat2['ASSOC'][i] = -1 for i in range(cat1.nrows): cat1['ASSOC'][i] = -1 alpha1 = [cat1['ALPHA_SKY'][i] for i in range(cat1.nrows)] alpha2 = [cat2['ALPHA_SKY'][i] for i in range(cat2.nrows)] delta1 = [cat1['DELTA_SKY'][i] for i in range(cat1.nrows)] delta2 = [cat2['DELTA_SKY'][i] for i in range(cat2.nrows)] for i in range(cat1.nrows): match = [abs(alpha1[i]-alpha2[j]) < tolerance and abs(delta1[i]-delta2[j]) < tolerance for j in range(cat2.nrows)] try: idx = match.index(True) if cat2['ASSOC'][idx] == -1 \ and cat1['SNR'][i] > 20 \ and cat2['SNR'][idx] > 20 \ and cat1['MAG_AUTO'][i] + 21.1 < 22.5 \ and cat2['MAG_AUTO'][idx] + 21.1 < 22.5 \ and cat1['FLUX_RADIUS'][i] > 0 \ and cat2['FLUX_RADIUS'][idx] > 0 \ and cat1['IS_STAR'][i] == 0 \ and cat2['IS_STAR'][idx] == 0 \ and cat1['FLUX_RADIUS'][i] < 500 \ and cat2['FLUX_RADIUS'][idx] < 500: cat1['ASSOC'][i] = i cat2['ASSOC'][idx] = i mag_tuples.append((cat1['MAG_AUTO'][i], cat2['MAG_AUTO'][idx])) flux_tuples.append((cat1['FLUX_RADIUS'][i],cat2['FLUX_RADIUS'][idx])) except ValueError: continue
def check_adjacent(base_catalog, all_catalogs, check_indices, tolerance=1.0 / 18000): nDeleted = 0 base = asciidata.open(base_catalog) base_alpha = [base["ALPHA_SKY"][i] for i in range(base.nrows)] base_delta = [base["DELTA_SKY"][i] for i in range(base.nrows)] for index in check_indices: nDeletedInd = 0 print "Checking for overlaps in the base catalog", base_catalog print "Checking against catalog", all_catalogs[index] check = asciidata.open(all_catalogs[index]) check_alpha = [check["ALPHA_SKY"][i] for i in range(check.nrows)] check_delta = [check["DELTA_SKY"][i] for i in range(check.nrows)] check_number = [check["NUMBER"][i] for i in range(check.nrows)] delete_numbers = [] for j in range(check.nrows): if j % 1000 == 0: print "Checking item", j item_alpha = check_alpha[j] item_delta = check_delta[j] item_bools = [ (abs(item_alpha - base_alpha[k]) < tolerance and abs(item_delta - base_delta[k]) < tolerance) for k in range(base.nrows) ] if True in item_bools: delete_numbers.append(check_number[j]) nDeleted += 1 nDeletedInd += 1 print nDeletedInd, "objects were deleted in this check." delete_items(all_catalogs[index], delete_numbers) return nDeleted
def SemenovMeanOpacity(Temp = None, Density = None,Type=None): # Type deteremines the mean opacity type, 0 : Rosseland , 1: Planck. if Type == 0 or Type == None: Whole_Table = np.array(asc.open(Opac_Dir+RossFileName)) else: Whole_Table = np.array(asc.open(Opac_Dir+PlanckFileName)) rho = Whole_Table[1:,0] T = Whole_Table[0,1:] OpMatrix = Whole_Table[1:,1:] LgInputTemp = np.log10(Temp) LgInputDens = np.log10(Density) pt1 = (np.abs(rho-LgInputDens)).argmin() pt2 = (np.abs(T-LgInputTemp)).argmin() if LgInputTemp > 3.87: interpval = ConstantGasOpacity else: outgrid = RectBivariateSpline(rho,T,OpMatrix,kx=1,ky=1) interpval = outgrid(rho[pt1],T[pt2])[0][0] return interpval
def compareLabelLists(labelFile1, labelFile2, magCut=18): t = 2006.580 ## Read in star labels tab1 = asciidata.open(labelFile1) name1 = [tab1[0][ss].strip() for ss in range(tab1.nrows)] mag1 = tab1[1].tonumpy() x01 = tab1[2].tonumpy() y01 = tab1[3].tonumpy() vx1 = tab1[6].tonumpy() vy1 = tab1[7].tonumpy() t01 = tab1[10].tonumpy() x1 = x01 + vx1 * (t - t01) / 10**3 y1 = y01 + vy1 * (t - t01) / 10**3 tab2 = asciidata.open(labelFile2) name2 = [tab2[0][ss].strip() for ss in range(tab2.nrows)] mag2 = tab2[1].tonumpy() x02 = tab2[2].tonumpy() y02 = tab2[3].tonumpy() vx2 = tab2[6].tonumpy() vy2 = tab2[7].tonumpy() t02 = tab2[10].tonumpy() x2 = x02 + vx2 * (t - t02) / 10**3 y2 = y02 + vy2 * (t - t02) / 10**3 # Image im = pyfits.getdata('/u/ghezgroup/data/gc/06maylgs1/combo/mag06maylgs1_dp_msc_kp.fits') imgsize = (im.shape)[0] # pixel position (0,0) is at upper left xpix = np.arange(0, im.shape[0], dtype=float) ypix = np.arange(0, im.shape[1], dtype=float) sgra = [1422.6, 1543.8] scale_jpg = 0.00995 xim = (xpix - sgra[0]) * scale_jpg * -1.0 yim = (ypix - sgra[1]) * scale_jpg py.clf() py.grid(True) py.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95) py.imshow(np.log10(im), extent=[xim[0], xim[-1], yim[0], yim[-1]], aspect='equal', vmin=1.9, vmax=6.0, cmap=py.cm.gray) py.xlabel('X Offset from Sgr A* (arcsec)') py.ylabel('Y Offset from Sgr A* (arcsec)') py.title('UCLA/Keck Galactic Center Group', fontsize=20, fontweight='bold') thePlot = py.gca() py.axis([15, -15, -15, 15]) idx2 = np.where(mag2 < magCut)[0] py.plot(x2[idx2], y2[idx2], 'ro', color='cyan', mfc='none', mec='cyan') for ii in idx2: py.text(x2[ii], y2[ii], name2[ii], color='cyan', fontsize=10) idx1 = np.where(mag1 < magCut)[0] py.plot(x1[idx1], y1[idx1], 'ro', color='orange', mfc='none', mec='orange') for ii in idx1: py.text(x1[ii], y1[ii], name1[ii], color='orange', fontsize=10)
def check_adjacent(base_catalog, all_catalogs, check_indices, tolerance=1. / 18000): nDeleted = 0 base = asciidata.open(base_catalog) base_alpha = [base['ALPHA_SKY'][i] for i in range(base.nrows)] base_delta = [base['DELTA_SKY'][i] for i in range(base.nrows)] for index in check_indices: nDeletedInd = 0 print "Checking for overlaps in the base catalog", base_catalog print "Checking against catalog", all_catalogs[index] check = asciidata.open(all_catalogs[index]) check_alpha = [check['ALPHA_SKY'][i] for i in range(check.nrows)] check_delta = [check['DELTA_SKY'][i] for i in range(check.nrows)] check_number = [check['NUMBER'][i] for i in range(check.nrows)] delete_numbers = [] for j in range(check.nrows): if j % 1000 == 0: print "Checking item", j item_alpha = check_alpha[j] item_delta = check_delta[j] item_bools = [(abs(item_alpha - base_alpha[k]) < tolerance and abs(item_delta - base_delta[k]) < tolerance) for k in range(base.nrows)] if True in item_bools: delete_numbers.append(check_number[j]) nDeleted += 1 nDeletedInd += 1 print nDeletedInd, "objects were deleted in this check." delete_items(all_catalogs[index], delete_numbers) return nDeleted
def SemenovMeanOpacity(Temp=None, Density=None, Type=None): # Type deteremines the mean opacity type, 0 : Rosseland , 1: Planck. if Type == 0 or Type == None: Whole_Table = np.array(asc.open(Opac_Dir + RossFileName)) else: Whole_Table = np.array(asc.open(Opac_Dir + PlanckFileName)) rho = Whole_Table[1:, 0] T = Whole_Table[0, 1:] OpMatrix = Whole_Table[1:, 1:] LgInputTemp = np.log10(Temp) LgInputDens = np.log10(Density) pt1 = (np.abs(rho - LgInputDens)).argmin() pt2 = (np.abs(T - LgInputTemp)).argmin() if LgInputTemp > 3.87: interpval = ConstantGasOpacity else: outgrid = RectBivariateSpline(rho, T, OpMatrix, kx=1, ky=1) interpval = outgrid(rho[pt1], T[pt2])[0][0] return interpval
def go(epoch, limMag=15): root = '/u/ghezgroup/data/gc/' + epoch + '/clean/kp/starfinder/align/' aln_list = root + 'align_kp_0.0.list' #frameList = asciidata.open(aln_list) f_list = open(aln_list) files = [] for line in f_list: _line = line.split() fileParts = _line[0].split('/') files.append(fileParts[-1]) files = files[1:] #frames = frameList[0].tonumarray() s = starset.StarSet(root + 'align_kp_0.0') numstars = asciidata.open(root + 'align_kp_0.0.mag').nrows numepochs = asciidata.open(root + 'align_kp_0.0.mag').ncols - 5 fluxFile = root + '/sgra_all.mag' brtFile = root + '/sgra_brt.mag' dimFile = root + '/sgra_dim.mag' _sgraAll = open(fluxFile, 'w') _sgraBrt = open(brtFile, 'w') _sgraDim = open(dimFile, 'w') _sgraAll.write('#Frame' + ' Mag ' + ' Flux(mJy) ' + '\n') _sgraBrt.write('#Frame' + ' Mag ' + ' Flux(mJy) ' + '\n') _sgraDim.write('#Frame' + ' Mag ' + ' Flux(mJy) ' + '\n') # Find index for Sgr A* in the mag file for x in range(numstars): if (s.stars[x].name == 'SgrA'): sgra_idx = x # Loop through epochs and print frame, mags, & fluxes (in mJy) for i in range(numepochs): mag = s.stars[sgra_idx].e[i].mag flux = 655000. * 10**(-0.4 * mag) #frame = frames[i] frame = files[i] _sgraAll.write('%5s % 5.3f %7.2f\n' % (frame, mag, flux)) if (mag < limMag): _sgraBrt.write('%5s % 5.3f %7.2f\n' % (frame, mag, flux)) else: _sgraDim.write('%5s % 5.3f %7.2f\n' % (frame, mag, flux)) _sgraAll.close() _sgraBrt.close() _sgraDim.close()
def plotTrans(root): """ Plot the fractional change in plate scale and PA over many different starlists that have been aligned. You can either give align results for many different epochs or align results for many different cleaned frames in a single epoch. root - align output """ tab = asciidata.open(root + '.trans') a0 = tab[3].tonumarray() a0e = tab[4].tonumarray() a1 = tab[5].tonumarray() a1e = tab[6].tonumarray() a2 = tab[7].tonumarray() a2e = tab[8].tonumarray() b0 = tab[9].tonumarray() b0e = tab[10].tonumarray() b1 = tab[11].tonumarray() b1e = tab[12].tonumarray() b2 = tab[13].tonumarray() b2e = tab[14].tonumarray() trans = [] for ff in range(len(a0)): tt = objects.Transform() tt.a = [a0[ff], a1[ff], a2[ff]] tt.b = [b0[ff], b1[ff], b2[ff]] tt.aerr = [a0e[ff], a1e[ff], a2e[ff]] tt.berr = [b0e[ff], b1e[ff], b2e[ff]] tt.linearToSpherical(override=False) trans.append(tt) # Read epochs dateTab = asciidata.open(root + '.date') numEpochs = dateTab.ncols years = [dateTab[i][0] for i in range(numEpochs)] p.clf() p.subplot(211) p.plot(scale - 1.0, 'ko') p.ylabel('Fract. Plate Scale Difference') if (years[0] != years[1]): thePlot = p.gca() thePlot.get_xaxis().set_major_locator(p.MultipleLocator(0.1)) thePlot.get_xaxis().set_major_formatter(p.FormatStrFormatter('%8.3f')) p.subplot(212) p.plot(angle, 'ko') p.ylabel('Position Angle') if (years[0] != years[1]): thePlot = p.gca() thePlot.get_xaxis().set_major_locator(p.MultipleLocator(0.1)) thePlot.get_xaxis().set_major_formatter(p.FormatStrFormatter('%8.3f'))
def findBestMask(iterations=100., maxDist = 1000., realProfilePath=False): import Deimos_SKiMS_slit__def__ as tst # if realProfilePath: try: try: inputData = numpy.array(asciidata.open(realProfilePath)) R_as, mag_R, emag_R = inputData[1,:], inputData[2,:], inputData[3,:] except: inputData = asciidata.open(realProfilePath) R_as, mag_R, emag_R = [], [], [] for ii in numpy.arange(len(inputData[0])): R_as.append(inputData[1][ii]) mag_R.append(inputData[2][ii]) emag_R.append(inputData[3][ii]) # R_as = numpy.array(R_as); mag_R = numpy.array(mag_R); emag_R = numpy.array(emag_R) # inputPar = [R_as, mag_R, emag_R] initialGuesses = [7.67, gal_Reff, 4] #Guesses for minimization, #b=7.67 Cai et al. 2008, Reff=47.9arcsec from Brodie+14, Sersic Index = 4 (de Vaucouleurs) # bm, Re, m = scipy.optimize.fmin(SersicFunctChi2, initialGuesses, args=(R_as, mag_R, emag_R), ftol = 0.1, disp = True) I0 = mag_R[0] except: print "Error with SB profile. Using a default Sersic profile. " realProfilePath = False # for ii in arange(iterations): t1 = time.time() print "\n###########" print "Iteration "+str(int(ii+1))+"/"+str(int(iterations))+"\n" print "Creating mask..." tmpObj = tst.Mask() print "\r DONE!" print "Creating slits..." if realProfilePath: tmpObj.createSlits(sersicPar=[bm, Re, m, I0]) else: tmpObj.createSlits() print "\r DONE!" print "Finding largest empty space between the slits." tmpDist = tmpObj.getMaxEmptySpace() print "\r DONE!" if tmpDist < maxDist: maxDist = tmpDist ## Adding Sky Slits tmpObj.createSkySlits() # Mask = tmpObj t2 = time.time() print "Elapsed time: "+str(t2-t1) print "###########\n" tmpObj.__del__() return Mask, maxDist
def go(epoch, limMag=15): root = '/u/ghezgroup/data/gc/'+epoch+'/clean/kp/starfinder/align/' aln_list = root+'align_kp_0.0.list' #frameList = asciidata.open(aln_list) f_list = open(aln_list) files = [] for line in f_list: _line = line.split() fileParts = _line[0].split('/') files.append(fileParts[-1]) files = files[1:] #frames = frameList[0].tonumarray() s=starset.StarSet(root+'align_kp_0.0') numstars = asciidata.open(root+'align_kp_0.0.mag').nrows numepochs = asciidata.open(root+'align_kp_0.0.mag').ncols - 5 fluxFile = root+'/sgra_all.mag' brtFile = root+'/sgra_brt.mag' dimFile = root+'/sgra_dim.mag' _sgraAll = open(fluxFile, 'w') _sgraBrt = open(brtFile, 'w') _sgraDim = open(dimFile, 'w') _sgraAll.write('#Frame' + ' Mag ' + ' Flux(mJy) ' + '\n') _sgraBrt.write('#Frame' + ' Mag ' + ' Flux(mJy) ' + '\n') _sgraDim.write('#Frame' + ' Mag ' + ' Flux(mJy) ' + '\n') # Find index for Sgr A* in the mag file for x in range(numstars): if (s.stars[x].name == 'SgrA'): sgra_idx = x # Loop through epochs and print frame, mags, & fluxes (in mJy) for i in range(numepochs): mag = s.stars[sgra_idx].e[i].mag flux = 655000. * 10 ** (-0.4*mag) #frame = frames[i] frame = files[i] _sgraAll.write('%5s % 5.3f %7.2f\n' % (frame, mag, flux)) if (mag < limMag): _sgraBrt.write('%5s % 5.3f %7.2f\n' % (frame, mag, flux)) else: _sgraDim.write('%5s % 5.3f %7.2f\n' % (frame, mag, flux)) _sgraAll.close() _sgraBrt.close() _sgraDim.close()
def make_SNR(self, out_name): catalog = asciidata.open(self.class_catalog) for i in range(catalog.nrows): catalog['SNR'][ i] = catalog['FLUX_AUTO'][i] / catalog['FLUXERR_AUTO'][i] catalog['SNR'].set_colcomment("Signal to Noise Ratio") catalog.writeto(out_name)
def make_segmentation_map(self, out_name, enlarge=20): hdulist = pyfits.open(self.file) x_dim = int(hdulist[0].header['NAXIS1']) y_dim = int(hdulist[0].header['NAXIS2']) hdulist.close() positions = [] catalog = asciidata.open(self.bright_catalog) for i in range(catalog.nrows): x_min = catalog['XMIN_IMAGE'][i] y_min = catalog['YMIN_IMAGE'][i] x_max = catalog['XMAX_IMAGE'][i] y_max = catalog['YMAX_IMAGE'][i] pos_tuple = (x_min, x_max, y_min, y_max) positions.append(pos_tuple) #Make an empty numpy array of zeros in those dimensions segmentation_map_array = np.zeros((x_dim, y_dim)) #Iterate through position tuples and switch flagged areas to 1's in the array for j in range(len(positions)): x_min = (positions[j])[0] x_max = (positions[j])[1] y_min = (positions[j])[2] y_max = (positions[j])[3] for x in range(x_min - enlarge, x_max + enlarge): for y in range(y_min - enlarge, y_max + enlarge): try: segmentation_map_array[y, x] = 1 except: continue #Write out to a fits file hdu_out = pyfits.PrimaryHDU(segmentation_map_array) hdu_out.writeto(out_name + "_seg_map.fits", clobber=True)
def plotKeyword(keyword1, keyword2, imgList): """ Pass in a file containing a list of images. For each of these images, read out the values of the header keywords specified. Then plot each of the keywords against each other. """ tab = asciidata.open(imgList) files = [tab[0][i].strip() for i in range(tab.nrows)] value1 = zeros(tab.nrows, dtype=float) value2 = zeros(tab.nrows, dtype=float) print keyword1, keyword2 for ff in range(len(files)): hdr = pyfits.getheader(files[ff],ignore_missing_end=True) value1[ff] = hdr[keyword1] value2[ff] = hdr[keyword2] import pylab as py py.clf() py.plot(value1, value2, 'k.') py.xlabel(keyword1) py.ylabel(keyword2) return (value1, value2)
def manual_mask(catalog, x_vertices, y_vertices, clean=True): orig_name = catalog catalog = asciidata.open(catalog) new_table = asciidata.create(catalog.ncols,catalog.nrows) delete_numbers = [] for i in range(catalog.nrows): if (i+1) % 1000 == 0: print "Working on object", i+1, "out of", catalog.nrows number = catalog['NUMBER'][i] is_star = catalog['IS_STAR'][i] if is_star == 1: continue x_min,y_min = catalog['XMIN_IMAGE'][i], catalog['YMIN_IMAGE'][i] x_max,y_max = catalog['XMAX_IMAGE'][i], catalog['YMAX_IMAGE'][i] bottom_pixels = [(x,y_min) for x in range(x_min,x_max)] left_pixels = [(x_min,y) for y in range(y_min,y_max)] top_pixels = [(x, y_max) for x in range(x_min,x_max)] right_pixels = [(x_max,y) for y in range(y_min,y_max)] pixels = bottom_pixels + left_pixels + top_pixels + right_pixels bools = [inpoly(pixel[0],pixel[1],x_vertices,y_vertices) for pixel in pixels] if max(bools) == 1: delete_numbers.append(number) print "Delete numbers", delete_numbers new_table = asciidata.create(catalog.ncols,catalog.nrows) for i in range(catalog.nrows): if catalog['NUMBER'][i] not in delete_numbers: for k in range(catalog.ncols): new_table[k][i] = catalog[k][i] #Get rid of empty rows row_number = 0 while True: try: if new_table[0][row_number] is None: new_table.delete(row_number) else: row_number += 1 except: break #Write out to another catalog new_table.writeto("manual_filter.cat") new_catalog = open("manual_filter.cat") old_catalog = open(orig_name) final_catalog = open("final_catalog.cat", "w") for line in old_catalog.readlines(): if line[0] == "#": final_catalog.write(line) else: break for line in new_catalog: final_catalog.write(line) old_catalog.close() final_catalog.close() final = open("final_catalog.cat", "r") rewrite = open(orig_name, "w") for line in final.readlines(): rewrite.write(line) #Optional clean if clean: subprocess.call(["rm", "final_catalog.cat"]) subprocess.call(["rm", "manual_filter.cat"])
def retrieveFrom_RKriging(inputPath, genTable, namegal, label, sizePixelMap, limits): #Retrieving galaxy parameters' dictionary #Creating the Kriging maps with Python #reading input file fileKriging = asciidata.open(inputPath+'gridKrig_'+label+'.txt') xK, yK, zK, errzK = [], [], [], [] maxZmap = 0. minZmap = 0. for jj in range(len(fileKriging[0])): xK.append(fileKriging[0][jj]) yK.append(fileKriging[1][jj]) if fileKriging[2][jj] != 'NA': zK.append(float(fileKriging[2][jj])) errzK.append(float(fileKriging[3][jj])) if float(fileKriging[2][jj]) > maxZmap: maxZmap = float(fileKriging[2][jj]) if float(fileKriging[2][jj]) < minZmap: minZmap = float(fileKriging[2][jj]) else: zK.append(nan) errzK.append(nan) # #reshaping xK = numpy.array(xK).reshape(sizePixelMap,sizePixelMap) yK = numpy.array(yK).reshape(sizePixelMap,sizePixelMap) zK = numpy.array(zK).reshape(sizePixelMap,sizePixelMap) errzK = numpy.array(errzK).reshape(sizePixelMap,sizePixelMap) # minZpoints, maxZpoints = numpy.min(genTable[:,2]), numpy.max(genTable[:,2]) rangeZmap = [numpy.max([numpy.min([minZpoints, minZmap]), limits[0]]), numpy.min([numpy.max([maxZpoints, maxZmap]), limits[1]])] return [xK, yK, zK, errzK], [minZpoints, maxZpoints], rangeZmap
def __init__(self): self.file = tablesDir + "ucla_ott2003.dat" tab = asciidata.open(self.file) self.ourName = [tab[0][d].strip() for d in range(tab.nrows)] self.id = [tab[1][d].strip() for d in range(tab.nrows)] self.name = [tab[2][d].strip() for d in range(tab.nrows)] self.r = tab[3].tonumpy() self.x = tab[4].tonumpy() self.y = tab[5].tonumpy() self.xerr = tab[6].tonumpy() self.yerr = tab[7].tonumpy() self.mag = tab[8].tonumpy() self.magerr = tab[9].tonumpy() self.mHK = tab[10].tonumpy() self.mCO = tab[11].tonumpy() self.vx = tab[12].tonumpy() self.vy = tab[13].tonumpy() self.vz = tab[14].tonumpy() self.vxerr = tab[15].tonumpy() self.vyerr = tab[16].tonumpy() self.vzerr = tab[17].tonumpy() self.type = tab[18].tonumpy() self.fixNames()
def __init__(self): self.file = tablesDir + "ucla_genzel2000.dat" tab = asciidata.open(self.file) self.ourName = [tab[0][d].strip() for d in range(tab.nrows)] self.name = [tab[1][d].strip() for d in range(tab.nrows)] self.r = tab[2].tonumpy() self.x = tab[3].tonumpy() self.y = tab[4].tonumpy() self.vx1 = tab[5].tonumpy() self.vx1err = tab[6].tonumpy() self.vy1 = tab[7].tonumpy() self.vy1err = tab[8].tonumpy() self.vx2 = tab[9].tonumpy() self.vx2err = tab[10].tonumpy() self.vy2 = tab[11].tonumpy() self.vy2err = tab[12].tonumpy() self.vx = tab[13].tonumpy() self.vxerr = tab[14].tonumpy() self.vy = tab[15].tonumpy() self.vyerr = tab[16].tonumpy() self.vz = tab[17].tonumpy() self.vzerr = tab[18].tonumpy() self.fixNames()
def plotKeyword(keyword1, keyword2, imgList): """ Pass in a file containing a list of images. For each of these images, read out the values of the header keywords specified. Then plot each of the keywords against each other. """ tab = asciidata.open(imgList) files = [tab[0][i].strip() for i in range(tab.nrows)] value1 = zeros(tab.nrows, dtype=float) value2 = zeros(tab.nrows, dtype=float) print keyword1, keyword2 for ff in range(len(files)): hdr = pyfits.getheader(files[ff], ignore_missing_end=True) value1[ff] = hdr[keyword1] value2[ff] = hdr[keyword2] import pylab as py py.clf() py.plot(value1, value2, 'k.') py.xlabel(keyword1) py.ylabel(keyword2) return (value1, value2)
def __init__(self, massfile): self.file = massfile table = asciidata.open(massfile) # Date and time are in UT self.year = table[0].tonumpy() self.month = table[1].tonumpy() self.day = table[2].tonumpy() self.hour = table[3].tonumpy() self.minute = table[4].tonumpy() self.second = table[5].tonumpy() self.free_seeing = table[6].tonumpy() if '2010' in massfile: # Values Don't exist self.isoplanatic_angle = np.zeros(len(self.hour)) self.tau0 = np.zeros(len(self.hour)) # Convert from HST to UT self.hour += 10 idx = np.where(self.hour > 24)[0] self.day[idx] += 1 self.hour[idx] -= 24 else: self.isoplanatic_angle = table[18].tonumpy() self.tau0 = table[22].tonumpy() # in milli-sec self.timeInHours = self.hour + (self.minute/60.0) + (self.second/3600.0)
def __init__(self, dimmfile): self.file = dimmfile table = asciidata.open(dimmfile) # Date and time are in UT self.year = table[0].tonumpy() self.month = table[1].tonumpy() self.day = table[2].tonumpy() self.hour = table[3].tonumpy() self.minute = table[4].tonumpy() self.second = table[5].tonumpy() if '2010' in dimmfile: self.seeing = table[6].tonumpy() # No airmass in new file format self.airmass = np.zeros(len(self.hour)) # Convert from HST to UT self.hour += 10 idx = np.where(self.hour > 24)[0] self.day[idx] += 1 self.hour[idx] -= 24 else: self.airmass = table[8].tonumpy() self.seeing = table[9].tonumpy() self.r0 = 0.98 * 500e-7 * 206265.0 / self.seeing # in cm self.timeInHours = self.hour + (self.minute/60.0) + (self.second/3600.0)
def velVsAcc(): """ Plot v/v_circular vs. a/a_bound. """ # Load up the accelPolar file fitFile = rootDir + poly + '.accelPolar' scale = 0.00995 # arcsec/pixel tab = asciidata.open(fitFile) name = tab[0]._data radius = tab[2].tonumarray() * scale velPhi = tab[5].tonumarray() * scale velRad = tab[6].tonumarray() * scale velPhiErr = tab[7].tonumarray() * scale velRadErr = tab[8].tonumarray() * scale acc = tab[10].tonumarray() * scale accErr = tab[12].tonumarray() * scale # Need to get the line-of-sight velocity from Paumard et al. vel = np.sqrt(velPhi**2 + velRad**2) velErr = np.sqrt((velPhi*velPhiErr)**2 + (velRad*velRadErr)**2) / vel
def makeHoldenRegionFile(self): if self.prefix.find('RDCSJ1317') > -1: infile = homedir + 'research/z08clusters/AncillaryData/Holden/r1317/c1317+29.skycat.v2' in1 = asciidata.open(infile) self.hdata = in1 ncol = len(in1) nrow = len(in1[0]) redshift = array(in1[11], 'f') zindex = where(redshift > -1) out1 = homedir + 'research/z08clusters/RegionsFiles/' + self.prefix + '_holdenRADec.reg' outfile = open(out1, 'w') outfile.write('global color=green width=2\n') outfile.write('fk5 \n') zi = zindex[0] for i in zi: print i, zindex, redshift[i], float(in1[11][i]) if (redshift[i] > zmin) & (redshift[i] < zmax): ccolor = 'blue' else: ccolor = 'cyan' s = 'circle(%12.8f,%12.8f,5\") # color= %s text={%s}\n' % (float( in1[1][i]), float(in1[2][i]), ccolor, in1[11][i]) outfile.write(s) outfile.close() in1.close()
def make_segmentation_map(self, out_name, enlarge=20): hdulist = pyfits.open(self.file) x_dim = int(hdulist[0].header['NAXIS1']) y_dim = int(hdulist[0].header['NAXIS2']) hdulist.close() positions = [] catalog = asciidata.open(self.bright_catalog) for i in range(catalog.nrows): x_min = catalog['XMIN_IMAGE'][i] y_min = catalog['YMIN_IMAGE'][i] x_max = catalog['XMAX_IMAGE'][i] y_max = catalog['YMAX_IMAGE'][i] pos_tuple = (x_min, x_max, y_min, y_max) positions.append(pos_tuple) #Make an empty numpy array of zeros in those dimensions segmentation_map_array = np.zeros((x_dim, y_dim)) #Iterate through position tuples and switch flagged areas to 1's in the array for j in range(len(positions)): x_min = (positions[j])[0] x_max = (positions[j])[1] y_min = (positions[j])[2] y_max = (positions[j])[3] for x in range(x_min-enlarge,x_max+enlarge): for y in range(y_min-enlarge,y_max+enlarge): try: segmentation_map_array[y,x] = 1 except: continue #Write out to a fits file hdu_out = pyfits.PrimaryHDU(segmentation_map_array) hdu_out.writeto(out_name + "_seg_map.fits",clobber=True)
def nightAnimation(epoch): cleanShifts = '/u/ghezgroup/data/gc/' + epoch cleanShifts += '/combo/mag' + epoch + '_kp.shifts' table = asciidata.open(cleanShifts) cleanFiles = table[0]._data xshifts = table[1].tonumpy() yshifts = table[2].tonumpy() cleanDir = '/u/ghezgroup/data/gc/' + epoch + '/clean/kp/' for cc in range(len(cleanFiles)): print '%4d out of %4d' % (cc, len(cleanFiles)) cleanImg = cleanDir + cleanFiles[cc] img = pyfits.getdata(cleanImg) xax = arange(0, img.shape[1]) yax = arange(0, img.shape[0]) xax -= 512.0 - xshifts[cc] yax -= 512.0 - yshifts[cc] pylab.clf() pylab.imshow(log10(img), extent=[xax[0], xax[-1], yax[0], yax[-1]], vmin=1.5, vmax=3.5) pylab.plot([0], [0], 'k+') pylab.axis([-300, 300, -300, 300]) pylab.xlabel('RA Offset (pixels)') pylab.ylabel('Dec Offset (pixels)') pylab.title(cleanFiles[cc]) pylab.savefig(cleanFiles[cc].replace('.fits', '_shift.png'))
def lp_sensitivity(): """ Read in a number of GC L' data sets and plot the SNR vs. mag with number of frames plotted. """ rootDir = "/u/jlu/doc/proposals/keck/uc/10B/orion/" files = [rootDir + "mag04jul_lp_rms.lis", rootDir + "mag05jullgs_lp_rms.lis", rootDir + "mag06jullgs_lp_rms.lis"] legends = ["04jul", "05jullgs", "06jullgs"] py.clf() magStep = 1.0 magBins = np.arange(6, 18, magStep) snrAvg = np.zeros(len(magBins)) for ff in range(len(files)): tab = asciidata.open(files[ff]) mag = tab[1].tonumpy() snr = tab[7].tonumpy() cnt = tab[9].tonumpy() for mm in range(len(magBins) - 1): magLo = magBins[mm] - magStep / 2.0 magHi = magBins[mm] + magStep / 2.0 idx = np.where((mag > magLo) & (mag <= magHi))[0] snrAvg[mm] = snr[idx].mean() py.semilogy(magBins, snrAvg) legends[ff] += ": N = %d" % cnt[0] py.legend(legends) py.show()
def plot_psf_stars(psfList, fitsFile='/u/ghezgroup/data/gc/09maylgs1/combo/mag09maylgs1_kp.fits', sgraX=576.875, sgraY=681.500): """ Plot the PSf starlist over an image. """ tab = asciidata.open(psfList) name = tab[0]._data mag = tab[1].tonumpy() x = tab[2].tonumpy() y = tab[3].tonumpy() vx = tab[4].tonumpy() vy = tab[5].tonumpy() t0 = tab[6].tonumpy() filt = tab[7]._data isPsf = (tab[8].tonumpy() == 1) # Check the PA of the image and rotate to North up if necessary hdr = pyfits.getheader(fitsFile) pa = float(hdr['ROTPOSN']) - 0.7 if pa != 0: rotPos = rotate_pos(x, y, pa) x = rotPos[0] y = rotPos[1] # Remove PSF stars that should be rejected for the K' filter. kpReject = np.zeros(len(x), dtype=np.bool) for ff in range(len(filt)): filters = filt[ff].split(',') if 'KP' in filters: kpReject[ff] = True isPsf[kpReject == True] = False sgra = np.array([sgraX, sgraY]) img = pyfits.getdata(fitsFile) xaxis = (np.arange(img.shape[1]) - sgra[0]) * 0.00995 * -1.0 yaxis = (np.arange(img.shape[0]) - sgra[1]) * 0.00995 # pdb.set_trace() py.close(2) py.figure(2, figsize=(12, 12)) py.clf() py.imshow(np.log10(img), extent=[xaxis[0], xaxis[-1], yaxis[0], yaxis[-1]], vmin=1, vmax=math.log10(40000), cmap=py.cm.Greys, origin='lowerleft') py.plot(x[isPsf == False], y[isPsf == False], 'bx', mew=2) py.plot(x[isPsf == True], y[isPsf == True], 'rx', mew=2) idxPsf = np.where(isPsf == True)[0] for pp in range(len(idxPsf)): py.text(x[idxPsf[pp]], y[idxPsf[pp]], name[idxPsf[pp]], color='red') py.axis([xaxis[0], xaxis[-1], yaxis[0], yaxis[-1]]) py.title(psfList) py.xlabel('R.A. Offset from Sgr A* (arcsec)') py.ylabel('Dec. Offset from Sgr A* (arcsec)') py.savefig(psfList + '.png')
def readProfileTxt(filepath): # Reads SB profile from txt output # try: inputData = numpy.array(asciidata.open(realProfilePath)) R_as, mag_R, emag_R = inputData[1,:], inputData[2,:], inputData[3,:] except: inputData = asciidata.open(realProfilePath) R_as, mag_R, emag_R = [], [], [] for ii in numpy.arange(len(inputData[0])): R_as.append(inputData[1][ii]) mag_R.append(inputData[2][ii]) emag_R.append(inputData[3][ii]) # R_as = numpy.array(R_as); mag_R = numpy.array(mag_R); emag_R = numpy.array(emag_R) # return R_as, mag_R, emag_R
def model_klf(): # Read in Geneva tracks genevaFile = '/u/jlu/work/models/geneva/iso/020/c/' genevaFile += 'iso_c020_0675.UBVRIJHKLM' model = asciidata.open(genevaFile) modMass = model[1].tonumpy() modV = model[6].tonumpy() modVK = model[11].tonumpy() modHK = model[15].tonumpy() modJLp = model[19].tonumpy() modJK = model[17].tonumpy() # genevaFile2 = '/u/jlu/work/models/geneva/iso/020/c/' # genevaFile2 += 'iso_c020_068.UBVRIJHKLM' # model = asciidata.open(genevaFile) # modMass = model[1].tonumpy() # modV = model[6].tonumpy() # modVK = model[11].tonumpy() # modHK = model[15].tonumpy() # modJLp = model[19].tonumpy() # modJK = model[17].tonumpy() # Reddening aV = 27.0 RV = 2.9 # # cardelli() returns A_L # aJ = aV * extinction.cardelli(1.248, RV) # aH = aV * extinction.cardelli(1.6330, RV) # aKp = aV * extinction.cardelli(2.1245, RV) # aK = aV * extinction.cardelli(2.196, RV) # aKs = aV * extinction.cardelli(2.146, RV) aKs = 2.7 aJ = extinction.nishiyama09(1.248, aKs) aH = extinction.nishiyama09(1.6330, aKs) aKp = extinction.nishiyama09(2.1245, aKs) aK = extinction.nishiyama09(2.196, aKs) aKs = extinction.nishiyama09(2.146, aKs) modK = modV - modVK modH = modK + modHK modJ = modK + modJK modLp = modJ - modJLp modKs = modK + 0.002 + 0.026 * (modJK) modKp = modK + 0.22 * (modHK) dist = 8400.0 distMod = -5.0 + 5.0 * math.log10(dist) modK_extinct = modK + aK + distMod modKp_extinct = modKp + aKp + distMod modKs_extinct = modKs + aKs + distMod return modKp_extinct
def degrade_spectra_Jband(Teff, logg, met): #Each model has its own directory root = '/Users/duisiya/astro/ABPic' dirname = str(Teff) + '_' + 'logg' + str(logg) + 'met' + str(met) #read synthetic spectrum spec_synth = asciidata.open(os.path.join(dirname, 'J_band_mod.dat')) wave_synth = spec_synth[0].tonumpy() flux_synth = spec_synth[1].tonumpy() #read observed spectrum spec_obs = asciidata.open(os.path.join(dirname, 'J_band_obs.dat')) wave_obs = spec_obs[0].tonumpy() flux_obs = spec_obs[1].tonumpy() #BINNING SPECTRUM # #the synthetic spectrum needs to be binned to have fewer point in the wavelength dimension (~1700 instead of >100000) dwave = (max(wave_obs) - min(wave_obs)) / len (wave_obs) #"fwhm" of the observed spectrum dwave_sigma = dwave / (2 * np.sqrt(2 * np.log(2))) #before interpolating the spectrum to a new wavelength grid, smooth it with a Gaussian flux_conv = ndimage.filters.gaussian_filter(flux_synth, dwave_sigma) #interpolating the smoothed synthetic spectrum on the observed wavelength grid func_interp = interpolate.splrep(wave_synth, flux_conv) #searching for interpolation function flux_synth_smooth = interpolate.splev(wave_obs, func_interp) #interpolating on a new grid #DEGRADING SPECTRAL RESOLUTION # R = 2000. #resolution of observed spectra in J band FWHM = wave_synth / R #FWHM of a line profile at a given wavelength G_sigma = FWHM / (2 * np.sqrt(2 * np.log(2))) #Convolution of the synthetic spectrum with a Gaussian function that has a variable sigma gauss_matr = np.vstack([np.hstack([gauss(x, mu, sig) for x in wave_obs]) for mu, sig in zip(wave_obs, G_sigma)]) flux_synth_conv = np.dot(flux_synth_smooth * dwave, gauss_matr) #writing the degraded spectrum to file flux_smooth_file = open(os.path.join(dirname, 'low_res_synth_spectrum_Jband.dat'), 'w') for n, m in zip(wave_obs, flux_synth_conv): flux_smooth_file.write(str(n) + ' ' + str(m) + ' ' + '\n') flux_smooth_file.close()
def match_to_tt(image_star_table, tt_star_data_file, dist=200.): centroid_table = asciidata.open(tt_star_data_file) star_table = asciidata.open(image_star_table) out_table = asciidata.create(6, star_table.nrows) tt_centroids = [] for i in range(star_table.nrows): x0 = star_table[0][i] y0 = star_table[1][i] r = star_table[2][i] for j in range(centroid_table.nrows): x = centroid_table[0][j] y = centroid_table[1][j] if abs(x0-x) < dist and abs(y0-y) < dist: tt_centroids.append((x,y,r)) break if j == centroid_table.nrows-1: print "no star found" return tt_centroids
def alter_catalog_for_classification(self, out_name, flat_x_division, flat_y_division, slope, intercept): catalog = asciidata.open(self.merged_catalog) for i in range(catalog.nrows): if is_below_boundary(catalog['MAG_AUTO'][i]+25, catalog['MU_MAX'][i], flat_x_division, flat_y_division, slope, intercept) and catalog['MAG_AUTO'][i]+25.0 < 25.0: catalog['IS_STAR'][i] = 1 else: catalog['IS_STAR'][i] = 0 catalog['IS_STAR'].set_colcomment("Revised Star-Galaxy Classifier") catalog.writeto(out_name)
def match_to_tt(image_star_table, tt_star_data_file, dist=200.): centroid_table = asciidata.open(tt_star_data_file) star_table = asciidata.open(image_star_table) out_table = asciidata.create(6, star_table.nrows) tt_centroids = [] for i in range(star_table.nrows): x0 = star_table[0][i] * (7500. / 4210.) y0 = star_table[1][i] * (7500. / 4242.) r = star_table[2][i] for j in range(centroid_table.nrows): x = centroid_table[0][j] y = centroid_table[1][j] if abs(x0 - x) < dist and abs(y0 - y) < dist: tt_centroids.append((x, y, r)) break if j == centroid_table.nrows - 1: print "no star found at location", x0, y0 return tt_centroids
def radialProfileLog( namegal, inputFile, label='Z', #binsize=50, #Bin numerosity binsize=0.01, #Bin size in dex datapoints=[] ): #If exist, the radial profiles are limited by the actual datapoints #reading input file fileKriging = asciidata.open(inputFile) xK, yK, zK, errzK = [], [], [], [] for jj in range(len(fileKriging[0])): if fileKriging[2][jj] != 'NA': xK.append(fileKriging[0][jj]) yK.append(fileKriging[1][jj]) zK.append(float(fileKriging[2][jj])) errzK.append(float(fileKriging[3][jj])) # xK, yK = numpy.array(xK), numpy.array(yK) zK, errzK = numpy.array(zK), numpy.array(errzK) ellDist = findDell(xK, yK, PA0[namegal], b_a[namegal]) ellDist_Sorted = ellDist[permutation_indices(ellDist)] ellDist_Sorted_log = numpy.log10(ellDist[permutation_indices(ellDist)] / Reff[namegal]) zK_Sorted = zK[permutation_indices(ellDist)] errzK_Sorted = errzK[permutation_indices(ellDist)] # # Limit elements within datapoints # if datapoints != []: RA_dp, Dec_dp = numpy.array(datapoints)[:, 0], numpy.array(datapoints)[:, 1] ellDist_dp_log = numpy.log10( findDell(RA_dp, Dec_dp, PA0[namegal], b_a[namegal]) / Reff[namegal]) minR, maxR = numpy.min(ellDist_dp_log), numpy.max(ellDist_dp_log) else: minR, maxR = numpy.min(ellDist_Sorted_log), numpy.max( ellDist_Sorted_log) # binR, binZ, bineZ = [], [], [] for ii in numpy.arange(minR, maxR, binsize): tmpR, tmpZ, tmperrZ = [], [], [] for kk in numpy.arange(len(ellDist_Sorted_log)): if ii <= ellDist_Sorted_log[kk] < ii + binsize: tmpR.append(ellDist_Sorted_log[kk]) tmpZ.append(zK_Sorted[kk]) tmperrZ.append(errzK_Sorted[kk]) if len(tmpR) > 0: binR.append(numpy.average(tmpR)) binZ.append( numpy.average(tmpZ, weights=1. / (numpy.array(tmperrZ)**2.))) bineZ.append(numpy.std(tmpZ)) # return binR, binZ, bineZ
def load_osiris_spectral_comp(extinctCorrect=True): workDir = '/u/jlu/work/gc/imf/gcows/' # Get the completeness corrections for each field. if extinctCorrect == True: completenessFile = workDir + 'spec_completeness_extinct_correct.txt' else: completenessFile = workDir + 'spec_completeness.txt' completeness = asciidata.open(completenessFile) # Get the field names which are in the header fields = completeness.header.hdata[1].split() fields = fields[1:] # Get the completeness in dictionary by field name compKp = completeness[0].tonumpy() comp = {} # Fix the compKp because the values given are actually # the left side of the bin. kpBinSize = compKp[1] - compKp[0] compKp += kpBinSize / 2.0 for ff in range(len(fields)): field = fields[ff] compTmp = completeness[ff+1].tonumpy() # We need to interpolate over empty stuff, but only where # completness is <= 1. # Get the "good" values. tmpKp = compKp[compTmp.mask == False] tmpComp = compTmp[compTmp.mask == False].data # Now find the last bin with completeness = 1 and inlcude # only this bin plus all fainter magnitude bins. idx = np.where(tmpComp == 1)[0] tmpKp = tmpKp[idx[-1]:] tmpComp = tmpComp[idx[-1]:] c_interp = interpolate.splrep(tmpKp, tmpComp, s=0) compInField = interpolate.splev(compKp, c_interp) # Flatten to 1 at the bright end idx = np.where(compInField >= 1)[0] if len(idx) > 0: compInField[0:idx[-1]+1] = 1.0 # Flatten to 0 at the faint end idx = np.where(compInField <= 0)[0] if len(idx) > 0: compInField[idx[0]:] = 0.0 comp[field] = compInField return compKp, comp
def load_osiris_spectral_comp(extinctCorrect=True): workDir = '/u/jlu/work/gc/imf/gcows/' # Get the completeness corrections for each field. if extinctCorrect == True: completenessFile = workDir + 'spec_completeness_extinct_correct.txt' else: completenessFile = workDir + 'spec_completeness.txt' completeness = asciidata.open(completenessFile) # Get the field names which are in the header fields = completeness.header.hdata[1].split() fields = fields[1:] # Get the completeness in dictionary by field name compKp = completeness[0].tonumpy() comp = {} # Fix the compKp because the values given are actually # the left side of the bin. kpBinSize = compKp[1] - compKp[0] compKp += kpBinSize / 2.0 for ff in range(len(fields)): field = fields[ff] compTmp = completeness[ff + 1].tonumpy() # We need to interpolate over empty stuff, but only where # completness is <= 1. # Get the "good" values. tmpKp = compKp[compTmp.mask == False] tmpComp = compTmp[compTmp.mask == False].data # Now find the last bin with completeness = 1 and inlcude # only this bin plus all fainter magnitude bins. idx = np.where(tmpComp == 1)[0] tmpKp = tmpKp[idx[-1]:] tmpComp = tmpComp[idx[-1]:] c_interp = interpolate.splrep(tmpKp, tmpComp, s=0) compInField = interpolate.splev(compKp, c_interp) # Flatten to 1 at the bright end idx = np.where(compInField >= 1)[0] if len(idx) > 0: compInField[0:idx[-1] + 1] = 1.0 # Flatten to 0 at the faint end idx = np.where(compInField <= 0)[0] if len(idx) > 0: compInField[idx[0]:] = 0.0 comp[field] = compInField return compKp, comp
def read_coords(): """ Read in the list of coordinates for the WISPS fields. """ cooFile = '/u/jlu/work/wisps/WISPS_hmsdms.coords' cooTable = asciidata.open(cooFile, delimiter=',') ra = cooTable.tonumpy() dec = cooTable.tonumpy()
def snr_hist(catalog): cat = asciidata.open(catalog) snrs = [] for i in range(cat.nrows): if cat['MAG_AUTO'][i] + 21.1 <= 22.5: snrs.append(cat['SNR'][i]) for i in range(10): print i * 10, "percentile is", np.percentile(snrs, i * 10) plt.hist(snrs, bins=50, range=(0, 50)) plt.show()
def update_stars(astromTable): """ Update the database with the positions, photometry, and velocities from our final results. """ foo = asciidata.open(astromTable) name = foo[0].tonumpy() x = foo[1].tonumpy() xerr = foo[3].tonumpy() y = foo[4].tonumpy() yerr = foo[6].tonumpy() h = foo[7].tonumpy() herr = foo[9].tonumpy() kp = foo[10].tonumpy() kperr = foo[12].tonumpy() lp = foo[13].tonumpy() lperr = foo[15].tonumpy() x0 = foo[16].tonumpy() x0err = foo[18].tonumpy() y0 = foo[19].tonumpy() y0err = foo[21].tonumpy() vx = foo[22].tonumpy() vxerr = foo[24].tonumpy() vy = foo[25].tonumpy() vyerr = foo[27].tonumpy() vy = foo[25].tonumpy() vyerr = foo[27].tonumpy() t0 = foo[28].tonumpy() velField = foo[29].tonumpy() # Create a connection to the database connection = sqlite.connect(dbfile) # Create a cursor object cur = connection.cursor() for ss in range(len(name)): sql = 'update stars ' sql += 'set x=?, xerr=?, y=?, yerr=?, vx=?, vxerr=?, vy=?, vyerr=?, ' sql += 'h=?, herr=?, kp=?, kperr=?, lp=?, lperr=?, ' sql += 't0=?, velField=? where name=?' if x0[ss] == 0: x0[ss] = x[ss] if y0[ss] == 0: y0[ss] = y[ss] cur.execute(sql, (x0[ss], x0err[ss], y0[ss], y0err[ss], vx[ss], vxerr[ss], vy[ss], vyerr[ss], h[ss], herr[ss], kp[ss], kperr[ss], lp[ss], lperr[ss], t0[ss], velField[ss], name[ss])) connection.commit()
def plotPhotoCalib(image, cooStar, photoCalib='/u/ghezgroup/data/gc/source_list/photo_calib.dat'): """ Plot the specified image and overlay the photo_calib.dat sources on top. Coordinates are converted from pixels to arcsec using the coo star and assuming that the angle of the image is 0. """ # Load up the photometric calibraters table. _tab = asciidata.open(photoCalib) name = _tab[0].tonumpy() x = _tab[1].tonumpy() y = _tab[2].tonumpy() # Load up the image imageRoot = image.replace('.fits', '') im = pyfits.getdata(imageRoot + '.fits') # Coo star pixel coordinates _coo = open(imageRoot + '.coo', 'r') tmp = _coo.readline().split() cooPixel = [float(tmp[0]), float(tmp[1])] imgsize = (im.shape)[0] xpix = np.arange(0, im.shape[0], dtype=float) ypix = np.arange(0, im.shape[1], dtype=float) cooIdx = np.where(name == cooStar)[0] if len(cooIdx) == 0: print 'Failed to find the coo star %s in %s' % (cooStar, photoCalib) cooArcsec = [x[cooIdx[0]], y[cooIdx[0]]] scale = 0.00994 xim = ((xpix - cooPixel[0]) * scale * -1.0) + cooArcsec[0] yim = ((ypix - cooPixel[1]) * scale) + cooArcsec[1] py.figure(1) py.clf() py.grid(True) py.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95) py.imshow(np.log10(im), extent=[xim[0], xim[-1], yim[0], yim[-1]], aspect='equal', vmin=1.9, vmax=6.0, cmap=py.cm.gray) py.xlabel('X Offset from Sgr A* (arcsec)') py.ylabel('Y Offset from Sgr A* (arcsec)') py.title(imageRoot) thePlot = py.gca() idx = (np.where((x > xim.min()) & (x < xim.max()) & (y > yim.min()) & (y < yim.max()) ))[0] py.plot(x[idx], y[idx], 'r+', color='orange') for ii in idx: py.text(x[ii], y[ii], name[ii], color='orange', fontsize=12)
def mu_mag(catalog): mu = [] mag = [] cat = asciidata.open(catalog) for i in range(cat.nrows): mu.append(cat['MU_MAX'][i]) mag.append(cat['MAG_AUTO'][i]) mu_array = np.asarray(mu) mag_array = np.asarray(mag) plt.scatter(mag_array, mu_array) plt.show()
def massLuminosity(distance=8.0, AKp=3.0, isoFileName='iso_c020_0680.UBVRIJHKLM'): """ Plot the mass luminosity relationship for the GC young stars. Make two figures, one for absolute magnitudes (Kp) and one for apparent magnitudes at the specified distance and AKp extinction. """ # ========== # Convert to masses # Assume solar metallicity, A_K=3, distance = 8 kpc, age = 6 Myr # ========== genevaFile = '/u/jlu/work/models/geneva/iso/020/c/' genevaFile += isoFileName model = asciidata.open(genevaFile) modMass = model[1].tonumpy() modV = model[6].tonumpy() modVK = model[11].tonumpy() modHK = model[15].tonumpy() modK = modV - modVK modH = modK + modHK # Convert from K to Kp (Wainscoat and Cowie 1992) modKp = modK + 0.22 * (modH - modK) dist = 8000.0 distMod = -5.0 + 5.0 * math.log10(dist) modKpGC = modKp + distMod + AKp outputDir = '/u/jlu/work/gc/imf/klf/2010_04_02/plots/' outputSuffix = '_%.1f_%.1f.png' % (distance, AKp) py.clf() py.plot(modMass, modKp, 'b.') py.xlabel('Mass (Msun)') py.ylabel('Absolute Kp (magnitude)') rng = py.axis() py.ylim(rng[3], rng[2]) py.savefig(outputDir + 'mass_vs_absKp' + outputSuffix) py.show() py.clf() py.plot(modMass, modKpGC, 'b.') py.xlabel('Mass (Msun)') py.ylabel('Apparent Kp (magnitude)') rng = py.axis() py.ylim(rng[3], rng[2]) py.title('Distance = %.1f kpc, A_Kp = %.1f' % (distance, AKp)) py.savefig(outputDir + 'mass_vs_appKp' + outputSuffix) py.show()
def radialProfile( namegal, inputFile, label='Z', #binsize=50, #Bin numerosity binsize=1, #Bin size in arcsec datapoints=[] ): #If exist, the radial profiles are limited by the actual datapoints #reading input file fileKriging = asciidata.open(inputFile) xK, yK, zK, errzK = [], [], [], [] for jj in range(len(fileKriging[0])): if fileKriging[2][jj] != 'NA': xK.append(fileKriging[0][jj]) yK.append(fileKriging[1][jj]) zK.append(float(fileKriging[2][jj])) errzK.append(float(fileKriging[3][jj])) # xK, yK = numpy.array(xK), numpy.array(yK) zK, errzK = numpy.array(zK), numpy.array(errzK) # xA = -(-xK*numpy.cos((90-PA0[namegal])*numpy.pi/180.) - yK*numpy.sin((90-PA0[namegal])*numpy.pi/180.)) # yA = -xK*numpy.sin((90-PA0[namegal])*numpy.pi/180.) + yK*numpy.cos((90-PA0[namegal])*numpy.pi/180.) # ellDist = numpy.sqrt((b_a[namegal]*(xA**2.))+((yA**2.)/b_a[namegal])) ellDist = findDell(xK, yK, PA0[namegal], b_a[namegal]) ellDist_Sorted = ellDist[permutation_indices(ellDist)] zK_Sorted = zK[permutation_indices(ellDist)] errzK_Sorted = errzK[permutation_indices(ellDist)] # # Limit elements within datapoints # if datapoints != []: RA_dp, Dec_dp = numpy.array(datapoints)[:, 0], numpy.array(datapoints)[:, 1] ellDist_dp = findDell(RA_dp, Dec_dp, PA0[namegal], b_a[namegal]) minR, maxR = numpy.min(ellDist_dp), numpy.max(ellDist_dp) else: minR, maxR = numpy.min(ellDist_Sorted), numpy.max(ellDist_Sorted) # binR, binZ = [], [] for ii in numpy.arange(minR, maxR, binsize): tmpR, tmpZ, tmperrZ = [], [], [] for kk in numpy.arange(len(ellDist_Sorted)): if ii <= ellDist_Sorted[kk] < ii + binsize: tmpR.append(ellDist_Sorted[kk]) tmpZ.append(zK_Sorted[kk]) tmperrZ.append(errzK_Sorted[kk]) if len(tmpR) > 0: binR.append(numpy.average(tmpR)) binZ.append( numpy.average(tmpZ, weights=1. / (numpy.array(tmperrZ)**2.))) # return binR, binZ
def alter_catalog_for_classification(self, out_name, flat_x_division, flat_y_division, slope, intercept): catalog = asciidata.open(self.merged_catalog) for i in range(catalog.nrows): if is_below_boundary( catalog['MAG_AUTO'][i] + 25, catalog['MU_MAX'][i], flat_x_division, flat_y_division, slope, intercept) and catalog['MAG_AUTO'][i] + 25.0 < 25.0: catalog['IS_STAR'][i] = 1 else: catalog['IS_STAR'][i] = 0 catalog['IS_STAR'].set_colcomment("Revised Star-Galaxy Classifier") catalog.writeto(out_name)
def update_header_coords(fileList): """ Updates coordinates in the header for XREF, YREF and XSTREHL, and YSTREHL. fileList : list of files to update """ _files = asciidata.open(fileList) files = _files[0].tonumpy() files = [files[ff].split('.')[0] for ff in range(len(files))] for ff in range(len(files)): # Open .coo file and read 16C's coordinates coo = asciidata.open(files[ff]+'.coo') xref = coo[0].tonumpy() yref = coo[1].tonumpy() # Open .coord file and read strehl source's coordinates coord = asciidata.open(files[ff]+'.coord') xstr = coord[0].tonumpy() ystr = coord[1].tonumpy() # Open image and write reference star x,y to fits header fits = pyfits.open(files[ff]+'.fits') fits[0].header.update('XREF', "%.3f" %xref, 'Cross Corr Reference Src x') fits[0].header.update('YREF', "%.3f" %yref, 'Cross Corr Reference Src y') fits[0].header.update('XSTREHL', "%.3f" %xstr, 'Strehl Reference Src x') fits[0].header.update('YSTREHL', "%.3f" %ystr, 'Strehl Reference Src y') # Output fits file _out = 'new_hdr/' + files[ff] + '.fits' fits[0].writeto(_out, output_verify='silentfix')
def ttsRadius(): file = '/u/jlu/work/m31/tts_close.txt' tab = asciidata.open(file) name = tab[1].tonumarray().tolist() ra_hr = tab[7].tonumarray() ra_min = tab[8].tonumarray() ra_sec = tab[9].tonumarray() dec_deg = tab[10].tonumarray() dec_min = tab[11].tonumarray() dec_sec = tab[12].tonumarray() m31_ra_hr = 0.0 m31_ra_min = 42.0 m31_ra_sec = 44.23 m31_dec_deg = 41.0 m31_dec_min = 16.0 m31_dec_sec = 8.8 # Convert into floats ra_tmp = ra_hr + (ra_min / 60.0) + (ra_sec / 3600.0) dec_tmp = dec_deg + (dec_min / 60.0) + (dec_sec / 3600.0) m31_ra_tmp = m31_ra_hr + (m31_ra_min / 60.0) + (m31_ra_sec / 3600.0) m31_dec_tmp = m31_dec_deg + (m31_dec_min / 60.0) + (m31_dec_sec / 3600.0) ra_diff = (ra_tmp - m31_ra_tmp) * math.cos(math.radians(m31_dec_tmp)) dec_diff = dec_tmp - m31_dec_tmp # Convert to degrees ra_diff *= (360.0 / 24.0) diff = sqrt(pow(ra_diff, 2) + pow(dec_diff, 2)) # Convert into arcsec ra_diff *= 3600.0 dec_diff *= 3600.0 diff *= 3600.0 idx = diff.argsort() for i in idx: print('%10s %2d %2d %4.1f %2d %2d %4.1f %4d %4d %4d' % \ (name[i], ra_hr[i], ra_min[i], ra_sec[i], dec_deg[i], dec_min[i], dec_sec[i], diff[i], ra_diff[i], dec_diff[i])) if (diff[i] < 65.0): os.system('grep %s tts_clust.txt' % name[i]) os.system('grep %s tts_stars.txt' % name[i])
def test_parsing_conversion_bsc(self): """Parsing and comparing to Vizier calculated values the entire 5th Bright Star Catalogue""" bsc = asciidata.open(os.path.abspath( os.path.join(os.path.dirname(__file__), 'bsc.dat')), comment_char='#', delimiter='\t') expected_ra = [] expected_ra_str = [] expected_dec = [] expected_dec_str = [] ra = [] dec = [] for i in range(bsc.nrows): expected_ra.append(bsc[0][i]) expected_dec.append(bsc[1][i]) expected_ra_str.append(bsc[2][i].strip()) expected_dec_str.append(bsc[3][i].strip()) ra.append(Coord.fromHMS(bsc[2][i])) dec.append(Coord.fromDMS(bsc[3][i])) for i in range(bsc.nrows): # use e=0.0001 'cause its the maximum we can get with Vizier data (4 decimal places only) # test conversion from HMS DMS to decimal assert TestCoord.equal( ra[i].D, expected_ra[i], e=1e-4), "ra: %.6f != coord ra: %.6f (%.6f)" % ( expected_ra[i], ra[i].D, expected_ra[i] - ra[i].D) assert TestCoord.equal( dec[i].D, expected_dec[i], e=1e-4), "dec: %.6f != coord dec: %.64f (%.6f)" % ( expected_dec[i], dec[i].D, expected_dec[i] - dec[i].D) # test strfcoord implementation assert expected_ra_str[i] == ra[i].strfcoord( "%(h)02d %(m)02d %(s)04.1f"), "ra: %s != coord ra: %s" % ( expected_ra_str[i], ra[i].strfcoord("%(h)02d %(m)02d %(s)04.1f")) assert expected_dec_str[i] == dec[i].strfcoord( "%(d)02d %(m)02d %(s)02.0f"), "dec: %s != coord dec: %s" % ( expected_dec_str[i], dec[i].strfcoord("%(d)02d %(m)02d %(s)02.0f"))
def Disk_Rad(self,Data,**kwargs): Ld = asciidata.open(kwargs.get('file','/Users/bhargavvaidya/test_linediskrpcor_55.dat')) r2d = np.asarray(Ld[0]).reshape(516,1028) z2d = np.asarray(Ld[1]).reshape(516,1028) Srl = np.asarray(Ld[2]).reshape(516,1028) Svl = np.asarray(Ld[3]).reshape(516,1028) Mstar = kwargs.get('Mstar',30.0) urho = kwargs.get('urho',5.0e-14) ul = kwargs.get('ul',0.1) Gammae = kwargs.get('Gammae',0.2369) Zeta = kwargs.get('Zeta',0.4644) Lambda = kwargs.get('Lambda',0.4969) Qo = kwargs.get('Qo',1400.0) Alpha = kwargs.get('Alpha',0.55) print '-----------------------------------------------' print 'xfl : ',kwargs.get('xfl',5.0) print 'Alpha : ',Alpha print 'Gammae: ',Gammae print 'Zeta : ',Zeta print 'Lambda: ',Lambda print 'Qo : ',Qo print 'ul : ',ul print 'urho : ',urho print 'Mstar : ',Mstar print '-----------------------------------------------' sigmae = 0.4 clight = 3.0e10 G = 6.67e-8 Msun = 2.0e33 AU=1.5e13 uvel = np.sqrt((G*Mstar*Msun)/(ul*AU)) Dless = uvel/(urho*ul*AU*sigmae*clight) prefactor = (3.0/2.0)*(1.0/np.pi)*Gammae*Zeta*Lambda Kpara = (Dless**(Alpha))*((Qo**(1.0-Alpha))/(1.0-Alpha)) Tool = pp.Tools() grv2 = Tool.Grad(Data.v2,Data.x1,Data.x2,Data.dx1,Data.dx2) DvzDz = np.abs(grv2[:,:,1]) dvdl = DvzDz Disk_Mt = Kpara*((1.0/Data.rho)*dvdl)**(Alpha) Disk_Rad_r = Disk_Mt*prefactor*Srl[2:514,2:1026] Disk_Rad_z = Disk_Mt*prefactor*Svl[2:514,2:1026] DiskRad_force_dict={'d_dvdl':dvdl,'d_Mt':Disk_Mt,'d_Fr_r':Disk_Rad_r,'d_Fr_z':Disk_Rad_z} return DiskRad_force_dict
def Analysis(filepath=None,info=None): Values = np.asarray(asc.open(filepath+'analysis.out')) [a,b]=Values.shape MyArr = [] finfo = open(filepath+'analysis.info','r') for line in finfo.readlines(): if line.find('column')>=0: MyArr.append(line.split()) print len(MyArr) ana_dict= dict([('col'+ str(i+1),[Values[i],' '.join(MyArr[i][3:])]) for i in range(a-1)]) return ana_dict
def label_catalogs(focus_text_file, catalogs): f = open(focus_text_file) n = 0 for line in f.readlines(): split = line.split() filename = split[0] focus = np.float32(split[1]) catalog = asciidata.open(catalogs[n]) for i in range(catalog.nrows): catalog['FILENAME'][i] = filename catalog['FOCUS'][i] = focus catalog['FILENAME'].set_colcomment( 'Original name of image file for object') catalog['FOCUS'].set_colcomment('Focus position in um') catalog.writeto((catalogs[n])[:len(catalogs[n]) - 4] + ".focus.cat") n += 1