예제 #1
0
def metadata_worker():
    while True:
        global consumer
        token = consumer.poll(0)
        # file_name = input("Input file name \n")
        if token is None:
            break
            print("all done")
        else:
            token = msgpack.unpackb(token, raw=True)
            if token[0] == 'metadata':
                CasJobs.executeQuery("INSERT Successful INTO TABLE test_table",
                                     context='MyDB',
                                     format='json')
예제 #2
0
def retrieve_sdss_photometry(ra: float, dec: float):
    """
    Retrieve SDSS photometry for a given field, in a 0.2 x 0.2 degree box centred on the passed coordinates
    coordinates. (Note - the width of the box is in RA degrees, not corrected for spherical distortion)
    :param ra: Right Ascension of the centre of the desired field, in degrees.
    :param dec: Declination of the centre of the desired field, in degrees.
    :return: Retrieved photometry table, as a pandas dataframe, if successful; if not, None.
    """
    try:
        from SciServer import Authentication, CasJobs
    except ImportError:
        print("It seems that SciScript/SciServer is not installed, or not accessible to this environment. "
              "\nIf you wish to automatically download SDSS data, please install "
              "\nSciScript (https://github.com/sciserver/SciScript-Python); "
              "\notherwise, retrieve the data manually from "
              "\nhttp://skyserver.sdss.org/dr16/en/tools/search/sql.aspx")
        return None

    print(f"Querying SDSS DR16 archive for field centring on RA={ra}, DEC={dec}")
    user = keys['sciserver_user']
    password = keys["sciserver_pwd"]
    Authentication.login(UserName=user, Password=password)
    # Construct an SQL query to send to SciServer
    query = "SELECT objid,ra,dec"
    for f in sdss_filters:
        query += f",psfMag_{f},psfMagErr_{f},fiberMag_{f},fiberMagErr_{f},fiber2Mag_{f},fiber2MagErr_{f},petroMag_{f},petroMagErr_{f} "
    query += "FROM PhotoObj "
    query += f"WHERE ra BETWEEN {ra - 0.1} AND {ra + 0.1} "
    query += f"AND dec BETWEEN {dec - 0.1} AND {dec + 0.1} "
    print(f"Retrieving photometry from SDSS DR16 via SciServer for field at {ra}, {dec}...")
    df = CasJobs.executeQuery(sql=query, context='DR16')
    if len(df.index) == 0:
        df = None
    return df
def testCasJobsQuery(token):
    sql="""select top 10 galaxyId,snapnum,stellarmass from MRIIscPlanck1"""

    queryResponse=CasJobs.executeQuery(sql,context="Henriques2015a",token=token)
    gals= pandas.read_csv(queryResponse,index_col=None)
    print gals
    return gals
예제 #4
0
 def test_CasJobs_submitJob(self):
     jobId = CasJobs.submitJob(sql=CasJobs_TestQuery + " into MyDB." +
                               CasJobs_TestTableName1,
                               context=CasJobs_TestDatabase)
     jobDescription = CasJobs.waitForJob(jobId=jobId, verbose=True)
     df = CasJobs.executeQuery(sql="DROP TABLE " + CasJobs_TestTableName1,
                               context="MyDB",
                               format="csv")
     self.assertNotEqual(jobId, "")
예제 #5
0
    def test_CasJobs_uploadPandasDataFrameToTable_uploadCSVDataToTable(self):
        try:

            df = pandas.read_csv(StringIO(CasJobs_TestTableCSV),
                                 index_col=None)

            result = CasJobs.uploadPandasDataFrameToTable(
                dataFrame=df, tableName=CasJobs_TestTableName2, context="MyDB")
            table = CasJobs.executeQuery(sql="select * from " +
                                         CasJobs_TestTableName2,
                                         context="MyDB",
                                         format="pandas")
            result2 = CasJobs.executeQuery(sql="DROP TABLE " +
                                           CasJobs_TestTableName2,
                                           context="MyDB",
                                           format="csv")
            self.assertEqual(result, True)
            self.assertItemsEqual(table, df)

            result = CasJobs.uploadCSVDataToTable(
                csvData=CasJobs_TestTableCSV,
                tableName=CasJobs_TestTableName2,
                context="MyDB")
            df2 = CasJobs.executeQuery(sql="select * from " +
                                       CasJobs_TestTableName2,
                                       context="MyDB",
                                       format="pandas")
            result2 = CasJobs.executeQuery(sql="DROP TABLE " +
                                           CasJobs_TestTableName2,
                                           context="MyDB",
                                           format="csv")
            self.assertEqual(result, True)
            self.assertItemsEqual(df, df2)

        finally:
            try:
                csv = CasJobs.executeQuery(sql="DROP TABLE " +
                                           CasJobs_TestTableName2,
                                           context="MyDB",
                                           format="csv")
            except:
                pass
def testCasJobsQuery(token):
    sql="""select top 10 galaxyId,snapnum,stellarmass from MRIIscPlanck1"""

    gals= CasJobs.executeQuery(sql,context="Henriques2015a",format="pandas", token=token)
    print("PANDAS")
    print(gals)
    gals= CasJobs.executeQuery(sql,context="Henriques2015a",format="csv", token=token)
    print("CSV")
    print(gals)
    gals= CasJobs.executeQuery(sql,context="Henriques2015a",format="json", token=token)
    print("JSON")
    print(gals)
    gals= CasJobs.executeQuery(sql,context="Henriques2015a",format="readable", token=token)
    print("READABLE")
    gals=pandas.read_csv(gals)
    print(gals)
    gals= CasJobs.executeQuery(sql,context="Henriques2015a",format="blabal", token=token)
    print("ERROR")
    print(gals)
    return gals
print(casJobsId)


# In[ ]:

#get info about tables inside MyDB database context:

tables = CasJobs.getTables(context="MyDB")
print(tables)


# In[ ]:

#execute a quick SQL query:

df = CasJobs.executeQuery(sql=CasJobs_TestQuery, context=CasJobs_TestDatabase, format="pandas")
print(df)


# In[ ]:

#submit a job, which inserts the query results into a table in the MyDB database context. 
#Wait until the job is done and get its status.

jobId = CasJobs.submitJob(sql=CasJobs_TestQuery + " into MyDB." + CasJobs_TestTableName1, context="MyDB")
jobDescription = CasJobs.waitForJob(jobId=jobId, verbose=False)
print(jobId)
print(jobDescription)


# In[ ]:
예제 #8
0
 def test_CasJobs_executeQuery(self):
     df = CasJobs.executeQuery(sql=CasJobs_TestQuery,
                               context=CasJobs_TestDatabase,
                               format="pandas")
     self.assertEqual(CasJobs_TestTableCSV, df.to_csv(index=False))
예제 #9
0
password = "******"

token = Authentication.login(username, password)

user = Authentication.getKeystoneUserWithToken(token)

#csv = open("./metadata_table.csv", 'r')
#csv_str = csv.read()
#csv.close()

#success = CasJobs.uploadCSVDataToTable(csv_str, "osc_metadata")

#print(success)

query1 = "create table osc_metadata (FileName varchar(255), FileSize varchar(255))"
CasJobs.executeQuery(sql=query1, format='json')

# get file name and size
f = argv[1]
f_name = os.path.basename(f)
f_size = os.path.getsize(f)

print(f_name)

print(f_size)

# upload data to table
query2 = "insert into osc_metadata (FileName, FileSize) values (" + "'" + f_name[
    0:2] + "''" + f_name[2:3] + "''" + f_name[3:12] + "''" + f_name[
        12] + "''" + f_name[13:] + "'" + ", " + "'" + str(f_size) + "'" + ")"
예제 #10
0
# Don't show python warnings
warnings.filterwarnings('ignore')

print("SciServer library imported")
print('Supporting libraries imported')

print('Settings applied')

# This query finds all objects from galSpecInfo
# with extra data pulled from PhotoObjAll.
query = """
SELECT g.specObjID, p.type, p.u, p.g, p.r, p.i, p.z, g.spectrotype
FROM galSpecInfo AS g
    JOIN PhotoObjAll as p ON p.specObjID = g.specObjID
ORDER BY g.specObjID
"""

# Then, query the database. The answer is a table that is
# being returned as a dataframe.
q = CasJobs.executeQuery(query, "dr14")

# Check data and drop any row that has NaN.
q.dropna()
# Check that all types in columns are as expected.
q.info()

# Write to file.
q.to_csv("result.csv")

print("SQL query finished.")
예제 #11
0
    def do(self,
           user='******',
           password='******',
           search=1,
           path_to_model='YSE_App\\data_ingest\\RF_model.sav'):
        """
		Predicts photometric redshifts from RA and DEC points in SDSS

		An outline of the algorithem is:

		first pull from SDSS u,g,r,i,z magnitudes from SDSS; 
			should be able to handle a list/array of RA and DEC

		place u,g,r,i,z into a vector, append the derived information into the data array

		predict the information from the model

		return the predictions in the same order to the user

		inputs:
			Ra: list or array of len N, right ascensions of target galaxies in decimal degrees
			Dec: list or array of len N, declination of target galaxies in decimal degrees
			search: float, arcmin tolerance to search for the object in SDSS Catalogue
			path_to_model: str, filepath to saved model for prediction
		
		Returns:
			predictions: array of len N, photometric redshift of input galaxy

		"""
        nowdate = datetime.datetime.utcnow() - datetime.timedelta(1)
        from django.db.models import Q  #HAS To Remain Here, I dunno why
        print('Entered the photo_z cron')
        #save time b/c the other cron jobs print a time for completion

        transients = (Transient.objects.filter(Q(host__photo_z__isnull=True)))

        #print('Number of test transients:', len(transients))
        RA = []  #Needs to be list b/c don't know how many hosts are None
        DEC = []
        outer_mask = [
        ]  #create an integer index mask that we will place values into because some hosts dont have a RA and DEC assigned

        for i, transient_obj in enumerate(transients):
            if transient_obj.host != None:
                RA.append(transient_obj.host.ra)
                DEC.append(transient_obj.host.dec)
                outer_mask.append(i)  #provides integer index mask

        outer_mask = np.array(outer_mask)  #make that an array

        N_outer = len(transients)  #gives size of returned array

        Ra = np.array(RA)
        Dec = np.array(DEC)

        N = len(Ra)  #gives size of query array
        Q = N // 1000  #decompose the length of transients needing classification

        if N % 1000 != 0:
            Q = Q + 1  #catch remainder and start a new batch
        total_job = []  #store all pandas job dataframes
        for j in range(Q):  #iterate over batches
            if j == (Q - 1):
                Ra_batch = Ra[j * 1000:((j + 1) * 1000 +
                                        N % 1000)]  #grab batch remainder
                Dec_batch = Dec[j * 1000:((j + 1) * 1000 + N % 1000)]
            else:
                Ra_batch = Ra[j * 1000:(j + 1) *
                              1000]  #other wise grab batch of 1000
                Dec_batch = Dec[j * 1000:(j + 1) * 1000]

            hold = [
            ]  #a list for holding the strings that I want to place into an sql query
            for val in range(len(Ra_batch)):
                string = '({},{},{}),|'.format(str(val), str(Ra[val]),
                                               str(Dec[val]))
                hold.append(string)

            #Now construct the full query
            sql = "CREATE TABLE #UPLOAD(|id INT PRIMARY KEY,|up_ra FLOAT,|up_dec FLOAT|)|INSERT INTO #UPLOAD|   VALUES|"
            for data in hold:
                sql = sql + data
            #there is a comma that needs to be deleted from the last entry for syntax to work
            sql = sql[0:(len(sql) - 2)] + '|'
            #append the rest to it
            sql = sql + "SELECT|p.u,p.g,p.r,p.i,p.z|FROM #UPLOAD as U|OUTER APPLY dbo.fGetNearestObjEq((U.up_ra),(U.up_dec),{}) as N|LEFT JOIN Galaxy AS p ON N.objid=p.objid".format(
                str(search))
            #change all | to new line: when we change to Unix system will need to change this new line
            sql = sql.replace('|', '\n')
            #login, change to some other credentials later
            Authentication.login('awe2', 'StandardPassword')
            job = CasJobs.executeQuery(
                sql, 'DR15',
                'pandas')  #this lines sends and retrieves the result
            print('Query {} of {} complete'.format(j + 1, Q))
            job['u-g'] = job['u'].values - job['g'].values
            job['g-r'] = job['g'].values - job['r'].values
            job['r-i'] = job['r'].values - job['i'].values
            job['i-z'] = job['i'].values - job['z'].values
            job['u_over_z'] = job['u'].values / job['z'].values
            total_job.append(job)
        #print('left the query loop')
        query_result = pd.concat(total_job)
        #now feed to a RF model for prediction
        X = query_result.values
        #load the model, will need to change the path later
        model = pickle.load(open(path_to_model, 'rb'))
        #Need to deal with NANs now since many objects are outside the SDSS footprint, later models will learn to deal with this
        #ideas: need to retain a mask of where the nans are in the row
        mask = np.invert((query_result.isna().any(1).values
                          ))  #true was inside SDSS footprint
        #also will want this mask in indices so we can insert the predicted data correctly
        indices = []
        for i, val in enumerate(mask):
            if val == True:
                indices.append(i)
        predictions = model.predict((X[mask, :]))
        #make nan array with size of what user asked for
        return_me = np.ones(N) * np.nan
        #now replace nan with the predictions in order
        return_me[indices] = predictions
        #something is wrong here!, line works inside a try statement but not outside. raises no error for some reason...?
        return_me_outer = np.ones(N_outer) * np.nan
        return_me_outer[outer_mask] = return_me
        #print('debug: made it here')
        print('time taken:', datetime.datetime.utcnow() - nowdate)
        print('uploading now')
        for t, pz in zip(transients, return_me):
            #print('1')
            host = t.host
            #print('2')
            host.photo_z = pz
            #print('3')
            host.save()
            #print('4')
        print('time taken with upload:', datetime.datetime.utcnow() - nowdate)
예제 #12
0
	def do(self,user='******',password='******',search=1,path_to_model='YSE_App/data_ingest/YSE_DNN_photoZ_model_315.hdf5'):
		"""
		Predicts photometric redshifts from RA and DEC points in SDSS

		An outline of the algorithem is:

		first pull from SDSS u,g,r,i,z magnitudes from SDSS; 
			should be able to handle a list/array of RA and DEC

		place u,g,r,i,z into a vector, append the derived information into the data array

		predict the information from the model

		return the predictions in the same order to the user

		inputs:
			Ra: list or array of len N, right ascensions of target galaxies in decimal degrees
			Dec: list or array of len N, declination of target galaxies in decimal degrees
			search: float, arcmin tolerance to search for the object in SDSS Catalogue
			path_to_model: str, filepath to saved model for prediction
		
		Returns:
			predictions: array of len N, photometric redshift of input galaxy

		"""

		try:
			nowdate = datetime.datetime.utcnow() - datetime.timedelta(1)
			from django.db.models import Q #HAS To Remain Here, I dunno why
			print('Entered the photo_z cron')		
			#save time b/c the other cron jobs print a time for completion

			transients = (Transient.objects.filter(Q(host__photo_z__isnull=True) & Q(host__isnull=False)))

			#print('Number of test transients:', len(transients))
			RA=[] #Needs to be list b/c don't know how many hosts are None
			DEC=[]
			outer_mask = [] #create an integer index mask that we will place values into because some hosts dont have a RA and DEC assigned 
			transients_withhost = []
			
			for i,transient_obj in enumerate(transients):
				if transient_obj.host != None:
					RA.append(transient_obj.host.ra)
					DEC.append(transient_obj.host.dec)
					outer_mask.append(i) #provides integer index mask

			outer_mask = np.array(outer_mask) #make that an array

			N_outer = len(transients) #gives size of returned array

			Ra = np.array(RA)
			Dec = np.array(DEC)

			N = len(Ra)#gives size of query array
			Q = N//1000#decompose the length of transients needing classification

			if N%1000 != 0: 
				Q=Q+1 #catch remainder and start a new batch
			total_job = [] #store all pandas job dataframes
			for j in range(Q): #iterate over batches
				if j == (Q-1):
					Ra_batch = Ra[j*1000:((j+1)*1000 + N%1000)] #grab batch remainder
					Dec_batch = Dec[j*1000:((j+1)*1000 + N%1000)]
				else:
					Ra_batch = Ra[j*1000:(j+1)*1000] #other wise grab batch of 1000
					Dec_batch = Dec[j*1000:(j+1)*1000]

				hold=[] #a list for holding the strings that I want to place into an sql query
				for val in range(len(Ra_batch)):
					string = '({},{},{}),|'.format(str(val),str(Ra[val]),str(Dec[val]))
					hold.append(string)

				#Now construct the full query
				sql = "CREATE TABLE #UPLOAD(|id INT PRIMARY KEY,|up_ra FLOAT,|up_dec FLOAT|)|INSERT INTO #UPLOAD|	VALUES|"
				for data in hold:
					sql = sql + data
				#there is a comma that needs to be deleted from the last entry for syntax to work
				sql = sql[0:(len(sql)-2)] + '|'
				#append the rest to it
				sql = sql + "SELECT|p.u,p.g,p.r,p.i,p.z,p.extinction_u,p.extinction_g,p.extinction_r,p.extinction_i,p.extinction_z,p.petroRad_u,p.petroRad_g,p.petroRad_r,p.petroRad_i,p.petroRad_z,p.petroR50_r,p.petroR90_r,zi.e_bv_sfd|FROM #UPLOAD as U|OUTER APPLY dbo.fGetNearestObjEq((U.up_ra),(U.up_dec),{}) as N|LEFT JOIN PhotoObjAll AS p ON N.objid=p.objID|JOIN SpecObjAll za on (p.objID = za.bestObjID)|JOIN galSpecInfo zi ON (zi.SpecObjID = za.specObjID)".format(str(search))
				#change all | to new line: when we change to Unix system will need to change this new line 
				sql = sql.replace('|','\n')
				#login, change to some other credentials later
				Authentication.login('awe2','StandardPassword')
				job = CasJobs.executeQuery(sql,'DR12','pandas') #this line sends and retrieves the result
				print('Query {} of {} complete'.format(j+1,Q))
				job['dered_u'] = job['u'].values - job['extinction_u'].values
				job['dered_g'] = job['g'].values - job['extinction_g'].values
				job['dered_r'] = job['r'].values - job['extinction_r'].values
				job['dered_i'] = job['i'].values - job['extinction_i'].values
				job['dered_z'] = job['z'].values - job['extinction_z'].values
				
				job['u-g']= job['dered_u'].values - job['dered_g'].values
				job['g-r']= job['dered_g'].values - job['dered_r'].values
				job['r-i']= job['dered_r'].values - job['dered_i'].values
				job['i-z']= job['dered_i'].values - job['dered_z'].values
				job['u_over_z']= job['dered_u'].values / job['dered_z'].values
				
				job['C'] = job['petroR90_r'].values/job['petroR50_r'].values
				total_job.append(job)

			print('left the query loop')
			query_result = pd.concat(total_job)
			#now feed to a RF model for prediction
			X = query_result[['dered_u','dered_g','dered_r','dered_i','dered_z','u-g','g-r','r-i','i-z','u_over_z','petroRad_u','petroRad_g','petroRad_r','petroRad_i','petroRad_z','petroR50_r','petroR90_r','C','e_bv_sfd']].values
			print(X.shape)
			#define and load in the model
			def create_model(learning_rate):
    
				model = keras.Sequential([])
				model.add(keras.layers.Dense(input_shape=(19,),units=19,activation=keras.activations.linear)) #tried relu
				#model.add(keras.layers.Dropout(rate=0.1))
				model.add(keras.layers.Dense(units=19,activation=tf.nn.relu)) 
				#model.add(keras.layers.Dropout(rate=0.1))
				model.add(keras.layers.Dense(units=19,activation=tf.nn.relu))
				#model.add(keras.layers.Dropout(rate=0.1))
				model.add(keras.layers.Dense(units=19,activation=tf.nn.relu)) #tf.nn.relu
				#model.add(keras.layers.Dropout(rate=0.1))
				model.add(keras.layers.Dense(units=315,activation=keras.activations.softmax))
				
				#RMS = keras.optimizers.RMSprop(learning_rate=learning_rate)
				adam = keras.optimizers.Adam(lr=learning_rate)
				model.compile(optimizer=adam, loss='categorical_crossentropy') 
				return(model)

			keras.backend.clear_session()
			model = create_model(learning_rate = 1e-3)#couldve been anything for this, just gonna predict
			model.load_weights(path_to_model)
			
			#Need to deal with NANs now since many objects are outside the SDSS footprint, later models will learn to deal with this
			#ideas: need to retain a mask of where the nans are in the row
			mask = np.invert((query_result.isna().any(1).values)) #true was inside SDSS footprint
			#also will want this mask in indices so we can insert the predicted data correctly
			indices=[]
			for i,val in enumerate(mask):
				if val == True:
					indices.append(i)
			
			#predict on data that is not NAN
			predictions = model.predict(X[mask,:], verbose=2)			
			
			#make nan array with size of what user asked for
			return_me = np.ones(N)*np.nan
			#now replace nan with the predictions in order
			return_me[indices] = predictions
			return_me_outer = np.ones(N_outer) * np.nan
			return_me_outer[outer_mask] = return_me
			print('time taken:', datetime.datetime.utcnow() - nowdate)
			print('uploading now')
			tz,mpz = [],[]
			for t,pz in zip(transients,return_me):
				if pz != pz: continue
				host = t.host
				#import pdb; pdb.set_trace()
				host.photo_z = pz
				host.save()
				tz += [host.redshift]
				mpz += [pz]
			plt.plot(tz,mpz,'.')
			plt.savefig('test.png')

			print('time taken with upload:', datetime.datetime.utcnow() - nowdate)
		except Exception as e:
			exc_type, exc_obj, exc_tb = sys.exc_info()
			print("""Photo-z cron failed with error %s at line number %s"""%(e,exc_tb.tb_lineno))
예제 #13
0
import matplotlib.pyplot as plt
import skimage.io
import SciServer.CasJobs as CasJobs
import SciServer.SciDrive

query = """
select top 16 g.objId,g.ra,g.dec,g.petror90_r, dbo.fGetUrlFitsCFrame(F.FIELDiD,'r') as fieldURL
from galaxy g
  ,  field f
where f.run=g.run
  and f.camcol=g.camcol
  and f.field=g.field
  and f.rerun=g.rerun
"""
# query CasJobs table. Using DR12 as context
queryResponse = CasJobs.executeQuery(query, "DR12")
# parse results into pandas.DataFrame for further in memory processing
gals = pandas.read_csv(queryResponse, index_col=None)
gals['objId'] = gals['objId'].astype(np.int64)

width = 200
height = 200
pixelsize = 0.396
plt.figure(figsize=(15, 15))
subPlotNum = 1

for index, gal in gals.iterrows():
    scale = 2 * gal['petror90_r'] / pixelsize / width
    url = "http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=" + str(
        gal['ra'])
    url += "&dec=" + str(
예제 #14
0
    FROM fGetNearbyObjEq(" + ra + ", " + dec + ", " + radius + ") n, Star p \
    LEFT OUTER JOIN specObj s ON s.bestObjID = p.objID \
    LEFT OUTER JOIN photoz pz ON pz.objid = p.objid \
    WHERE n.objID = p.objID AND(s.class IS NULL or s.class='STAR') "

# Query for galaxy with modelMag and cmodelMag and extinctions
q_gal = "SELECT p.ra, p.dec, p.modelMag_u, p.modelMagErr_u, p.modelMag_g, p.modelMagErr_g, p.modelMag_r, p.modelMagErr_r, p.modelMag_i, p.modelMagErr_i, p.modelMag_z, p.modelMagErr_z, \
    p.petroMag_u, p.petroMagErr_u, p.petroMag_g, p.petroMagErr_g, p.petroMag_r, p.petroMagErr_r, p.petroMag_i, p.petroMagErr_i, p.petroMag_z, p.petroMagErr_z, \
    p.cmodelMag_u, p.cmodelMagErr_u, p.cmodelMag_g, p.cmodelMagErr_g, p.cmodelMag_r, p.cmodelMagErr_r, p.cmodelMag_i, p.cmodelMagErr_i, p.cmodelMag_z, p.cmodelMagErr_z, \
    extinction_u, extinction_g, extinction_r, extinction_i, extinction_z, clean, s.z, s.zerr, pz.z as photoz, pz.zerr as photozerr \
    FROM fGetNearbyObjEq(" + ra + ", " + dec + ", " + radius + ") n, Galaxy p \
    LEFT OUTER JOIN specObj s ON s.bestObjID = p.objID \
    LEFT OUTER JOIN photoz pz ON pz.objid = p.objid \
    WHERE n.objID = p.objID AND(s.class IS NULL or s.class='GALAXY') "

star_data = CasJobs.executeQuery(q_star, context=context, format='pandas')
gal_data = CasJobs.executeQuery(q_gal, context=context, format='pandas')

star_tab = Table(star_data.values,
                 names=star_data.columns,
                 meta={
                     'clustername': clustername,
                     'ra': ra,
                     'dec': dec,
                     'radius': radius
                 },
                 dtype=[float for x in range(len(star_data.columns))])
gal_tab = Table(gal_data.values,
                names=gal_data.columns,
                meta={
                    'clustername': clustername,
예제 #15
0
import skimage.io
import SciServer.CasJobs as CasJobs
import SciServer.SciDrive


query = """
select top 16 g.objId,g.ra,g.dec,g.petror90_r, dbo.fGetUrlFitsCFrame(F.FIELDiD,'r') as fieldURL
from galaxy g
  ,  field f
where f.run=g.run
  and f.camcol=g.camcol
  and f.field=g.field
  and f.rerun=g.rerun
"""
# query CasJobs table. Using DR12 as context
queryResponse = CasJobs.executeQuery(query, "DR12")
# parse results into pandas.DataFrame for further in memory processing
gals = pandas.read_csv(queryResponse, index_col=None)
gals['objId'] = gals['objId'].astype(np.int64)

width = 200
height = 200
pixelsize = 0.396
plt.figure(figsize=(15, 15))
subPlotNum = 1

for index, gal in gals.iterrows():
	scale = 2 * gal['petror90_r'] / pixelsize / width
	url = "http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx?ra=" + str(gal['ra'])
	url += "&dec=" + str(gal['dec']) + "&scale=""" + str(scale) + "&width=" + str(width)
	url += "&height=" + str(height)
예제 #16
0
              {}
        ORDER BY class,subclass, u,g,r,i,z""".format('', '')

sql_spec = '''
select  class, subclass, u-g as ug, g-r as gr, r-i as ri, i-z as iz, u-r as ur, g-i as gi, r-z as rz, u-i as ui, g-z as gz, u-z as uz,  (r-15)/5 as r,objid 
--into MYDB.spectrain
from (SELECT p.psfMag_u AS u, p.psfMag_g AS g, p.psfMag_r AS r, p.psfMag_i AS i, p.psfMag_z AS z,
s.class as class, s.subclass as subclass,s.BestObjID as objid
        FROM SpecObj as s left join PhotoObj as P  on s.BestObjID= P.objID
        WHERE s.BestObjID in (select objid from MYDB.specDR13) 
) x 
order by 1'''

sql_drop = """IF OBJECT_ID('spectrain') IS NOT NULL
        DROP TABLE spectrain"""
CasJobs.executeQuery(sql=sql_drop, context='MYDB', format="pandas")

sql_get = """
    select * from MYDB.NIPSspecphoto order by objid
    {} 
    OFFSET {} ROWS FETCH NEXT 1407000 ROWS ONLY;""".format('', 1000000 * ii)
# ii+=1

df = CasJobs.executeQuery(sql='SELECT * FROM N15',
                          context='MYDB',
                          format="pandas")
jobid = CasJobs.submitJob(sql=sql_spec, context='DR13')
CasJobs.waitForJob(jobid)

df = CasJobs.executeQuery(sql=sql_spec, context='DR13', format="pandas")
df = CasJobs.executeQuery(sql='SELECT * FROM MYDB.spectrain',
예제 #17
0
 def delete_table(self, tablename):
     sql = "DROP TABLE {}".format(tablename)
     CasJobs.executeQuery(sql=sql, context="MyDB", format="pandas")