def runSQL(item):
	g = Get()
	host = "host"
	"""docstring for runSQL"""
	platformName, resourceId = item
	url = r"http://%s:7080/hqu/hqapi1/resource/get.hqu?id=%s&children=true&verbose=true" % (host, resourceId)
	filename = platformName + ".resource.xml"
	print " Now processing .. " 	
	print filename
	print url
	print resourceId
	file = open("data/" + filename, 'w')
	data = g.get(url)
	file.write(xml.dom.minidom.parseString(data).toprettyxml())
	print filename + " done .. " 
class GetAlertDefinitions(object):
	"""docstring for GetAlertDefinitions"""
	def __init__(self, host):
		super(GetAlertDefinitions, self).__init__()
		self.getTuple()
		self.g = Get()
		assert host
		self.host = host

	def getTuple(self):
		"""docstring for getTuple"""
		for line in sys.stdin:
			line = line.strip()
			platformName, resourceId = line.split()
			yield (platformName, resourceId)
	

	def runSQL(self):
		"""docstring for runSQL"""
		for platformName, resourceId in self.getTuple():
			url = r"http://%s:7080/hqu/hqapi1/alertdefinition/listDefinitions.hqu?resourceId=%s&children=true&verbose=true" % (self.host, resourceId)
			filename = platformName + ".xml"

			print " Now processing .. " 	
			print filename
			print url
			print resourceId

			file = open("data/" + filename, 'w')
			data = self.g.get(url)
			file.write(xml.dom.minidom.parseString(data).toprettyxml())
			print " filename done .. " 
			print "==========================="
def runSQL(item):
    g = Get()
    host = "host"
    """docstring for runSQL"""
    platformName, resourceId = item
    url = r"http://%s:7080/hqu/hqapi1/alertdefinition/listDefinitions.hqu?resourceId=%s&children=true&verbose=true" % (
        host,
        resourceId,
    )
    filename = platformName + ".xml"
    print " Now processing .. "
    print filename
    print url
    print resourceId
    file = open("data/" + filename, "w")
    data = g.get(url)
    file.write(xml.dom.minidom.parseString(data).toprettyxml().encode("utf-8"))
    print filename + " done .. "
	def __init__(self, host):
		super(GetAlertDefinitions, self).__init__()
		self.getTuple()
		self.g = Get()
		assert host
		self.host = host
Beispiel #5
0
def RunLC(file='',
          objectid='',
          lctype='',
          display=False,
          readfile=False,
          debug=False,
          dofake=True,
          dbmode='fits',
          gapwindow=0.1,
          maxgap=0.125,
          fakefreq=.25,
          mode='davenport',
          iterations=10):
    '''
    Main wrapper to obtain and process a light curve
    '''
    # get the data
    if debug is True:
        print(str(datetime.datetime.now()) + ' GetLC started')
        print(file, objectid)

    if dbmode in ('txt', 'ktwo', 'everest', 'vdb', 'csv', 'kplr', 'k2sc',
                  'random', 'test'):
        outfile, objectid, lc = Get(dbmode, file=file, objectid=objectid)
    # UNUSED, UNTESTED, DELETE?
    # elif dbmode = 'mysql':
    #     outfile, objectid, lc  = GetLCdb(objectid, type='', readfile=False,
    #               savefile=False, exten = '.lc.gz',
    #               onecadence=False)

    #-----------------------------------------------

    if debug is True:
        print('outfile = ' + outfile)

    ### Basic flattening
    # flatten quarters with polymonial
    flux_qtr = detrend.QtrFlat(lc.time.values, lc.flux_raw.values,
                               lc.qtr.iloc[0])

    # then flatten between gaps
    lc['flux'] = detrend.GapFlat(lc.time.values, flux_qtr, maxgap=maxgap)

    #find continuous observing periods
    _, dlr = detrend.FindGaps(lc.time.values, maxgap=maxgap)
    if debug is True:
        print("dl, dr: {}".format(dlr))

    # uQtr = np.unique(qtr)
    if debug is True:
        print(str(datetime.datetime.now()) + ' MultiFind started')
    istart, istop, lc['flux_model'] = MultiFind(lc,
                                                dlr,
                                                gapwindow=gapwindow,
                                                debug=debug,
                                                mode=mode)

    df1 = pd.DataFrame({
        'istart': istart,
        'istop': istop,
        'ed68': np.full_like(istart, -99),
        'ed90': np.full_like(istart, -99)
    })
    allfakes = pd.DataFrame()
    # run artificial flare test in this gap

    if dofake is True:
        dffake = pd.DataFrame()
        for k in range(iterations):
            fakeres = FakeFlares(df1,
                                 lc,
                                 dlr,
                                 mode,
                                 savefile=True,
                                 gapwindow=gapwindow,
                                 outfile='{}fake.json'.format(file),
                                 display=display,
                                 fakefreq=fakefreq,
                                 debug=debug)
            dffake = dffake.append(fakeres, ignore_index=True)

        dffake.to_csv('{}_all_fakes.csv'.format(outfile))

        df1['ed68'], df1['ed90'] = FakeCompleteness(dffake,
                                                    fakefreq,
                                                    iterations,
                                                    display=display,
                                                    file=objectid)

    if display is True:
        print(str(len(istart)) + ' flare candidates found.')

        fig, ax = plt.subplots(figsize=(8, 4))

        ax = help.Plot(lc, ax, istart=istart, istop=istop, onlybit=10.)
        plt.show()
        plt.savefig(file + '_lightcurve.png',
                    dpi=300,
                    bbox_inches='tight',
                    pad_inches=0.5)
        plt.close()
    # '''
    # #-- IF YOU WANT TO PLAY WITH THE WAVELET STUFF MORE, WORK HERE
    # test_model = detrend.WaveletSmooth(time, flux)
    # test_cand = DetectCandidate(time, flux, error, test_model)
    #
    # print(len(cand))
    # print(len(test_cand))
    #
    # if display is True:
    #     plt.figure()
    #     plt.plot(time, flux, 'k')
    #     plt.plot(time, test_model, 'green')
    #
    #     plt.scatter(time[test_cand], flux[test_cand], color='orange', marker='p',s=60, alpha=0.8)
    #     plt.show()
    # '''

    # set this to silence bad fit warnings from polyfit
    warnings.simplefilter('ignore', np.RankWarning)

    metadata = {
        'ObjectID': objectid,
        'File': file,
        'Date-Run': str(datetime.datetime.now()),
        'Appaloosa-Version': __version__,
        'N_epoch in LC': str(len(lc.time)),
        'Total exp time of LC': str(np.sum(lc.exptime)),
    }

    if debug is True:
        print(str(datetime.datetime.now()) + 'Getting output header')

    header = FlareStats(lc, ReturnHeader=True)
    header = header + ['ED68i', 'ED90i']
    dfout = pd.DataFrame()

    if debug is True:
        print(str(datetime.datetime.now()) + 'Getting FlareStats')

    # loop over EACH FLARE, compute stats
    df2 = pd.DataFrame({'istart': istart, 'istop': istop})
    stats_i = []

    for i in range(0, len(istart)):
        stats_i = FlareStats(lc, istart=istart[i], istop=istop[i])

        stats_i = np.append(stats_i, [df1.ed68.iloc[i], df1.ed90.iloc[i]])
        stats_i = [[item] for item in stats_i]
        dfout = dfout.append(pd.DataFrame(dict(zip(header, stats_i))),
                             ignore_index=True)
    if not dfout.empty:
        dfout.to_csv(outfile + '_flare_stats.csv')
        with open(outfile + '_flare_stats_meta.json', 'w') as f:
            j = json.dumps(metadata)
            f.write(j)

    #Add the number of flares from this LC to the list
    flist = 'flarelist.csv'.format(outfile)
    header = [
        'Object ID', ' Date of Run', 'Number of Flares', 'Filename',
        'Total Exposure Time of LC in Days', 'BJD-2454833 days'
    ]

    if glob.glob(flist) == []:
        dfout = pd.DataFrame()
    else:
        dfout = pd.read_csv(flist)

    line = [
        objectid,
        datetime.datetime.now(),
        len(istart), file,
        np.sum(lc.exptime), lc.time[0]
    ]
    line = [[item] for item in line]
    dfout = dfout.append(pd.DataFrame(dict(zip(header, line))),
                         ignore_index=True)
    dfout.to_csv(flist)
    return