def __init__(self, field_name = 'M67field'): ''' Constructor ''' self.field_name = field_name self.wifsip = DataSource(database=config.dbname, user=config.dbuser, host=config.dbhost) kpno = ephem.Observer() #31.958036,-111.600578 kpno.lon, kpno.lat = '-111.600578', '31.958036' kpno.horizon = '-0:34' kpno.elevation = 2096 tzi = pytz.timezone('MST') #fmt = '%Y-%m-%d %H:%M:%S %Z%z' obsdate = datetime.datetime(2015,2,10,4,0,0, tzinfo=tzi) kpno.date = obsdate + datetime.timedelta(7*ephem.hour) d = kpno.date.datetime() self.input_epoch = 2000 self.current_epoch = d.year + d.timetuple().tm_yday/365. # LST at mid-exposure in decimal hours lst = kpno.sidereal_time() print('local siderial time: ', lst) self.siderial_time = ast.hms2hh(str(lst)) self.exposure_length = 40.0/60.0 # in hours self.wavelength = 5125 # in Angstroem self.cable = 'BLUE' self.weighting = 'STRONG' self.guidewave = 6000 self.center = self._get_center() c = Cluster('NGC 2682') self.ebv = c['ebv'] self.dm = ast.distance_modulus(c['d']) target = {} self.table = [] target['id'] = 6000 target['name'] = 'M67center' target['ra'] = self.center[0] target['dec'] = self.center[1] target['class'] = 'C' self.table.append(target) self.targeted = [] # number of sky fibers self.skies = 6 # number of field orientation probes self.fops = 6 self.tweakcenter = False self.tweakangle = False print('center', self.center) print('E(B-V)', self.ebv) print('DM', self.dm)
def __init__(self, day=today('%Y_%m_%d'), settings=__import__('settings', level=0), mapping=__import__('mapping', level=0), output=Queue.Queue()): threading.Thread.__init__(self) self.day = day self.settings = settings self.mapping = mapping self.output = output self.datasource = DataSource(settings) self.config = Config('reader-%s.cfg' % day) self.deadline = str2timestamp(day_offset(day_str=self.day, offset=7, format='%Y_%m_%d'), format='%Y_%m_%d') self.stopped = False self.current_index = int(self.config.get('current_index', 0)) self.now = self.config.get('now') logging.info("Reader-%s init done!" % self.day)
class TwoMass(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.data = [] from datasource import DataSource self.wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de') def fromfile(self, filename=None): import pyfits if filename is None: filename = '/work2/jwe/m48/data/2MASS.fits' hdulist = pyfits.open(filename) self.data = hdulist[1].data hdulist.close() def todatabase(self): for d in self.data: print d['starid'], d['ra_cone'], d['dec_cone'] self.wifsip.execute("""UPDATE m48stars SET ra = %f, dec = %f WHERE starid = '%s';""" % (d['ra_cone'], d['dec_cone'], d['starid']), commit=False) self.wifsip.commit()
def lightcurve_fromdb(self, starid): """ extract a single lightcurve from the database and return epoch (hjd), magnitude and error """ from datasource import DataSource import numpy as np import log wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de') query = """SELECT frames.hjd, phot.mag_auto, phot.magerr_auto, phot.flags FROM frames, matched, phot WHERE id LIKE '%s' AND filter LIKE 'rp' AND frames.objid = matched.objid AND (phot.objid,phot.star) = (matched.objid,matched.star) ORDER BY hjd;""" % (starid) # AND hjd>2455473.5 AND hjd<2455477 AND frames.good log.log('/work1/jwe/NGC1647/analysis.log', 'fetching star %s' % starid) # AND frames.good # AND hjd>2455470 AND hjd<2455510 data = wifsip.query(query) wifsip.close() hjd = np.array([d[0] for d in data]) mag = np.array([d[1] for d in data]) err = np.array([d[2] for d in data]) return (hjd, mag, err)
def __init__(self): self.stock_cache = {} self.datasource = DataSource() # We will use this to pass data within the class as data can be huge # and we do not want data passing overhead self.stockdata = None
def __init__(self, param): ''' Constructor: initialize the database and query it according the parameter, if a string is given, then we assume, it is the name, if it is a tuple, then we take them as coordinates ''' from datasource import DataSource self.corot = DataSource(database='corot', user='******', host='pina.aip.de') if type(param) is str: try: values = self._byname(param)[0] except IndexError: values = [None,None,None,None,None,None,None,None,None,None] if type(param) is tuple: try: values = self._bycoord(param)[0] except IndexError: values = [None,None,None,None,None,None,None,None,None,None] keys = ['twomass', 'raj2000', 'dej2000', 'jmag', 'e_jmag' ,'hmag' , 'e_hmag', 'kmag' , 'e_kmag', 'coord'] for key, value in zip(keys,values): if key=='coord' and not value is None: # we want to return a tuple vals = value.strip('()').split(',') self[key] = (float(vals[0]),float(vals[1])) else: self[key] = value
def plot_cpd(self): ''' create plot for Heraeus Talk ''' from datasource import DataSource wifsip = DataSource(database='wifsip', user='******', host='oldpina.aip.de') query = """SELECT bv, period FROM m48stars WHERE good;""" data = wifsip.query(query) bv = np.array([d[0] for d in data]) period = np.array([d[1] for d in data]) import gyroage from functions import logspace bv360 = logspace(0.5, 2.0, num=100) #P = gyroage.gyroperiod(bv360, 360.0, version=2007) P, pc = gyroage.gyroperiod(bv360, 360.0, version=2003) plt.plot(bv360, pc, color='b', linestyle='--') plt.plot(bv360, P, color='r') plt.scatter(bv-self.ebv, period, s=1, edgecolor='none', c='k') plt.xlabel('(B - V)$_0$') plt.ylabel('period [days]') plt.ylim(0.0, 20.0) plt.xlim(0.0, 2.0) plt.grid() plt.savefig('/home/jwe/Documents/Talks/Heraeus2015/m48cpd.eps') plt.savefig('/home/jwe/Documents/Talks/Heraeus2015/m48cpd.pdf') plt.close()
class Yadav(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.data = [] from datasource import DataSource self.wifsip = DataSource(database=config.dbname, user=config.dbuser, host=config.dbhost) def fromfile(self, filename=None): hdulist = pyfits.open(filename) self.data = hdulist[1].data self.keys = [] for col in hdulist[1].columns: self.keys.append(col.name) hdulist.close() def todatabase(self): for d in self.data: print(d['seq']) record = {} for key in self.keys: record[key] = d[key] if np.isnan(record[key]): record[key] = 'NULL' query = """INSERT INTO m67 (seq, ra, dec, vmag, bmag, icmag, pmra, pmdec, pmb, hrv) VALUES (%(Seq)d, %(RAJ2000)f, %(DEJ2000)f, %(Vmag)s, %(Bmag)s, %(Icmag)s, %(pmRA)f, %(pmDE)f, %(Pmb)f, %(HRV)s);""" % record self.wifsip.execute(query, commit=False) self.wifsip.commit()
def make_cmd(show=False): import numpy as np import matplotlib.pyplot as plt from datasource import DataSource wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de') query = "SELECT vmag, bmag FROM ngc6940 WHERE vmag>0.0 and bmag>0.0;" data = wifsip.query(query) vmag = np.array([d[0] for d in data]) * 1.0042005546775856 + 0.24536565071778343 bmag = np.array([d[1] for d in data]) * 1.0017849466111253 + 1.3145083952754286 bv = bmag - vmag plt.scatter(bv - 0.214, vmag, edgecolor='none', alpha=0.5, s=2, c='k') plt.axvline(0.653, linestyle='--', color='y') plt.title('NGC6940 Color Magnitude Diagram E(B-V)=0.214') plt.ylim(21.0, 10.0) plt.xlim(0.0, 1.6) plt.xlabel('(B - V)$_0$') plt.ylabel('V [mag]') plt.grid() plt.minorticks_on() if show: plt.show() else: plt.savefig(config.resultpath + 'ngc6940cmd.eps') plt.savefig(config.resultpath + 'ngc6940cmd.pdf') plt.close()
def make_cmd(self): import pylab as plt import numpy as np from datasource import DataSource wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de') query = "SELECT vmag, bv FROM ngc2281stars where vmag>0.0 and bv>0.0;" data = wifsip.query(query) wifsip.close() vmag = np.array([d[0] for d in data]) bv = np.array([d[1] for d in data]) plt.scatter(bv - self.ebv, vmag, edgecolor='none', alpha=0.75) k = (21.0 - 10.5) / (1.8 - 0.0) d = 10.5 x = np.linspace(0.0, 2.5, 10) y = k * x + d plt.plot(x, y, linestyle='dashed', color='k') plt.ylim(21.0, 10.0) plt.xlim(0.0, 2.2) plt.title('NGC 2281') plt.xlabel('(B - V)$_0$') plt.ylabel('V [mag]') plt.grid() plt.savefig('/work1/jwe/Dropbox/NGC2281/plots/ngc2281cmd.pdf') #plt.show() plt.close()
def __init__(self): self.__on = True self.__actions = [ self.__add_person, self.__get_all_persons, self.__add_animal, self.__get_all_animals, self.__quit ] self.__datasource = DataSource()
def _fromdatabase(self): """ import the table from a database """ from datasource import DataSource from astropy.coordinates import SkyCoord # @UnresolvedImport from astropy import units as u self.wifsip = DataSource(database=config.dbname, user=config.dbuser, host=config.dbhost) self.stars = [] columns = self.wifsip.columns('ngc6633') params = {'columns': ', '.join(columns), 'key': columns[0]} query = "SELECT %(columns)s FROM ngc6633 ORDER BY %(key)s;" % params self._stars = self.wifsip.query(query) columns = self.wifsip.columns('ngc6633') data_types = self.wifsip.data_types('ngc6633') arraydata = [] c = columns.index('coord') for star in self._stars: starlist = list(star) coord = star[c] ra, dec = coord.strip('()').split(',') newcoord = SkyCoord(ra=float(ra)*u.deg, dec=float(dec)*u.deg) starlist[c] = newcoord arraydata.append(tuple(starlist)) dtype = zip(columns, data_types) #print dtype self.stars = np.array(arraydata, dtype = dtype)
def lightcurve_fromdb(self): """ extract a single lightcurve from the database and return epoch (hjd), magnitude and error """ from datasource import DataSource import numpy as np wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de') query = """SELECT frames.hjd, phot.mag_isocor, phot.magerr_isocor FROM frames, matched, phot WHERE id LIKE '%s' AND filter LIKE 'rp' AND frames.objid = matched.objid AND (phot.objid,phot.star) = (matched.objid,matched.star) ORDER BY hjd;""" % (self.id) # AND hjd>2455473.5 AND hjd<2455477 AND frames.good # AND frames.good # AND hjd>2455470 AND hjd<2455510 data = wifsip.query(query) wifsip.close() self.hjd = np.array([d[0] for d in data]) self.mag = np.array([d[1] for d in data]) self.err = np.array([d[2] for d in data]) self.hjd -= self.hjd[0]
def __init__(self): ''' Constructor ''' from datasource import DataSource table = DataSource(database='stella', user='******', host='pera.aip.de', dictcursor=True) columns = ['starid', 'ra', 'dec', '"b-y"', 'm1', 'c1', 'beta'] query = "SELECT %s FROM referencestars order by starid;" % ', '.join( columns) data = table.query(query) columns[3] = 'b-y' d0 = data[0] #for c in columns: print c dtypes = ['|S11', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4'] for c in columns: dtypes.append(type(d0[c])) arraydata = [] for star in data: arraydata.append(tuple(star)) self.stars = np.array(arraydata, dtype=zip(columns, dtypes))
def test_initialize_column_rename(self): df = pd.read_csv('sample.csv', parse_dates=['timestamp']) df.columns = ['TS', 'TRADINGSYMBOL', 'OPEN', 'HIGH', 'LOW', 'CLOSE', 'VOLUME', 'PREVCLOSE'] self.ds = DataSource(data=df, timestamp='TS', symbol='TRADINGSYMBOL') self.assertEqual(self.ds.data.columns[0], 'timestamp') self.assertEqual(self.ds.data.columns[1], 'symbol')
def __init__(self, objname=None, filtercol='V', dbname=None): ''' Constructor ''' self.wifsip = DataSource(database='wifsip', host='pina', user='******') print(objname) if objname is None: # 'M48 BVI' raise (ValueError, 'objname not set') else: self.objname = objname if filtercol in ('B', 'V', 'R', 'I', 'u', 'v', 'b', 'y', 'hbn', 'hbw'): self.filtercol = filtercol else: raise (ValueError, 'unknown filter color') self.frames = [] if dbname is None: # 'M48stars' raise (ValueError, 'tablename not set') else: self.dbname = dbname print('filter %s' % self.filtercol)
class StackedPhot(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.data = [] from datasource import DataSource self.wifsip = DataSource(database=config.dbname, user=config.dbuser, host=config.dbhost) def fromfile(self, filename=None): import pyfits hdulist = pyfits.open(filename) self.data = hdulist[1].data self.keys = [] for col in hdulist[1].columns: self.keys.append(col.name) hdulist.close() def todatabase(self): for d in self.data: print d['starid'] record = {} for key in self.keys: record[key] = d[key] query = """UPDATE ngc2236 SET vmag = %(VMAG)f, bmag = %(BMAG)f, bv = %(BMAG)f-%(VMAG)f, nv = %(V_FLAGS)d, nb = %(B_FLAGS)d, vmag_err = %(VMAG_ERR)f, bmag_err = %(BMAG_ERR)f, member=TRUE WHERE starid = '%(starid)s';""" % record self.wifsip.execute(query, commit=False) self.wifsip.commit()
def __init__(self, field, datapath, lightcurvepath, plotpath, filtercol='V', flaglimit=4, starslimit=10000, dettemp=-110.0): ''' Constructor ''' from datasource import DataSource self.wifsip = DataSource(database='stella', host='pera', user='******') self.coords = {} self.objids = [] self.field = field self.filtercol = filtercol self.flaglimit = flaglimit self.starslimit = starslimit self.dettemp = dettemp self.filename = datapath + self.field self.lightcurvepath = lightcurvepath self.plotpath = plotpath print(self.field) print('filter: %s' % self.filtercol) print('maxstars: %d' % self.starslimit) print('dettemp: <%.1f' % self.dettemp)
def getphot(self, ra, dec, filtercol): from datasource import DataSource wifsip = DataSource(database=config.dbname, user=config.dbuser, host=config.dbhost, dictcursor=True) params = {'filtercol': filtercol, 'ra': ra, 'dec': dec} #'IC 4756 v2 %% uvby' query = """SELECT mag_auto, zeropnt, expt, flux_auto FROM phot, frames WHERE object like 'M 67%%' AND phot.objid = frames.objid AND filter='%(filtercol)s' AND circle(phot.coord,0.0)<@circle(point(%(ra).11f,%(dec).11f),0.6/3600.0) AND flags=0""" % params data = wifsip.query(query) zeropnt = np.array([d['zeropnt'] for d in data]) mag_isocor = np.array([d['mag_auto'] for d in data]) expt = np.array([d['expt'] for d in data]) flux_auto = np.array([d['flux_auto'] for d in data]) if len(mag_isocor) == 0: return np.nan, np.nan, np.nan #mags = mag_isocor-zeropnt mags = -2.5 * np.log10(flux_auto / expt) valid = abs(mags - np.mean(mags)) < 2.0 * np.std(mags) mags = np.compress(valid, mags) mag = np.mean(mags) return mag, np.std(mags), len(mags)
def __init__(self, refframe): self.ref = {} self.refframe = refframe self.wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de') self.offset = {}
def make_cmd(self): import pylab as plt import numpy as np from datasource import DataSource wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de') query = "SELECT vmag, bv FROM ngc1647stars;" data = wifsip.query(query) wifsip.close() vmag = np.array([d[0] for d in data]) bv = np.array([d[1] for d in data]) plt.scatter(bv, vmag, edgecolor='none', alpha=0.75) k = 8 / (2.0 - 0.32) d = 20 - 2.0 * k x = np.linspace(0.0, 2.5, 10) y = k * x + d plt.plot(x, y, linestyle='dashed', color='k') plt.ylim(21.0, 8.0) plt.xlim(0.0, 2.2) plt.xlabel('B - V') plt.ylabel('V [mag]') plt.grid() plt.savefig('/work1/jwe/Dropbox/NGC1647/plots/ngc1647cmd.pdf') #plt.show() plt.close()
class CoRoTId(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.data = [] from datasource import DataSource self.wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de') def fromfile(self, filename=None): import pyfits if filename is None: filename = '/work2/jwe/NGC2236/NGC2236corotid.fits' hdulist = pyfits.open(filename) self.data = hdulist[1].data hdulist.close() def todatabase(self): for d in self.data: print d['starid'], d['corotid_2'] self.wifsip.execute("""UPDATE ngc2236 SET corotid = %d WHERE starid = '%s';""" % (d['corotid_2'], d['starid']), commit=False) self.wifsip.commit()
def __init__(self, redisClientManager, config, act): DataSource.__init__(self, config, act) self.redisClientManager = redisClientManager self.downloadedDir = "" self.key = "" if os.path.exists(self.dir + "/downloaded.txt"): with open(self.dir + "/downloaded.txt", 'r') as content_file: self.downloadedDir = content_file.read()
def __init__(self): ''' Constructor ''' from datasource import DataSource self.table = DataSource(database='stella', user='******', host='pera.aip.de') self.data = []
def __init__(self): ''' Constructor ''' from datasource import DataSource self.landolt = DataSource(database='stella', host='pera', user='******')
def buildtable(self): """ builds the table of stars """ import numpy as np epochs = len(self.objids) stars = len(self.stars) if fileexists('/work2/jwe/NGC2281/' + self.filter + 'array.npy'): m = np.load('/work2/jwe/NGC2281/' + self.filter + 'array.npy') else: from datasource import DataSource from framecal import FrameCal fc = FrameCal(self.filter) m = np.zeros([epochs, stars]) # objid is specific to a filter so we only need to query the objid wifsip = DataSource(host='pina', database='wifsip', user='******') for objid in self.objids: k = self.objids.index(objid) print k, epochs, objid, query = """SELECT matched.id, phot.mag_auto, phot.mag_errauto FROM phot, matched WHERE phot.objid like '%s' AND (matched.objid,matched.star) = (phot.objid,phot.star) AND phot.flags = 0;""" % objid result = wifsip.query(query) starids = [s[0] for s in result] mags = [s[1] for s in result] err = [s[2] for s in result] slope, intercept, _, _, _ = fc.calframe(objid) print len(mags) for starid in starids: i = self.stars.index(starid) m[k, i] = mags[starids.index(starid)] * slope + intercept np.save('/work2/jwe/NGC2281/' + self.filter + 'array.npy', m) wifsip.close() i = np.where(m == 0.0) m[i] = np.nan from scipy import stats # calculate the observed average for the stars avg = stats.nanmean(m, axis=0) for k in range(epochs): print k, epochs, self.objids[k] # calculate the mean of offsets off = stats.nanmedian(m[k, :] - avg) # correct epoch for mean of offsets m[k, :] += off # calculate new corrected means avg = stats.nanmean(m, axis=0) std = stats.nanstd(m, axis=0) for i in range(len(self.stars)): print self.stars[i], avg[i], std[i]
def __init__(self, field_name = 'M48field'): ''' Constructor ''' import ephem import datetime import pytz import astronomy as ast from datasource import DataSource self.field_name = field_name self.wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de') kpno = ephem.Observer() #31.958036,-111.600578 kpno.lon, kpno.lat = '-111.600578', '31.958036' kpno.horizon = '-0:34' kpno.elevation = 2096 tzi = pytz.timezone('MST') #fmt = '%Y-%m-%d %H:%M:%S %Z%z' obsdate = datetime.datetime(2015,2,9,23,0,0, tzinfo=tzi) kpno.date = obsdate + datetime.timedelta(7*ephem.hour) d = kpno.date.datetime() self.input_epoch = 2000 self.current_epoch = d.year + d.timetuple().tm_yday/365. # LST at mid-exposure in decimal hours lst = kpno.sidereal_time() print 'local siderial time: ',lst self.siderial_time = ast.hms2hh(str(lst)) self.exposure_length = 40.0/60.0 # in hours self.wavelength = 6000 # in Angstroem self.cable = 'BLUE' self.weighting = 'STRONG' self.guidewave = 6000 self.center = self._get_center() self.ebv = 0.031 # from Webda self.dm = 9.53 # from Webda target = {} self.table = [] target['id'] = 6000 target['name'] = 'M48center' target['ra'] = self.center[0] target['dec'] = self.center[1] target['class'] = 'C' self.table.append(target) self.targeted = [] # number of sky fibers self.skies = 6 # number of field orientation probes self.fops = 6 self.tweakcenter = False self.tweakangle = False
def __init__(self): ''' Constructor ''' from datasource import DataSource self.wifsip = DataSource(database='wifsip', user='******', host='pina.aip.de')
def __init__(self): ''' Constructor ''' self.data = [] from datasource import DataSource self.wifsip = DataSource(database=config.dbname, user=config.dbuser, host=config.dbhost)
def sqlQueryGetTeam(team, year): ''' This function grabs a new cursor list associated with one team and then returns this cursor list to parent function, taking in team and year as strings. ''' # Make a new database and cursor list database = DataSource() cursorList = database.getPlayersByTeam(team, year) return cursorList
def sqlQueryGetTeam(team, year): ''' This function grabs a new cursor with a query regarding one team, one year both entered as strings and returns the cursorList returned by the query. ''' # Make a new database and cursor list database = DataSource() cursorList = database.getPlayersByTeam(team, year) return cursorList
def sqlYearQuery(year): ''' This function grabs all data for one year, returning a list containing all information. Year is a string and refers to a nba season. ''' # Make a new database and cursor List database = DataSource() cursorList = database.getAllYear(year) return cursorList
def sqlPlayerYearQuery(player, year): ''' This function grabs a new resulting cursorList from a query regarding one player and one year. Player and year are both strings. ''' # Make a new database and cursor list database = DataSource() cursorList = database.getPlayerByNameAndYear(player, year) return cursorList
def sqlPlayerQuery(player): ''' This function grabs all data for one player, returning a list containing all information. Player is a string and refers to the name of the player. ''' # Make a new database and cursor List database = DataSource() cursorList = database.getPlayerByName(player) return cursorList
def __init__(self, util=None, standalone=False): """ Initializer :param util: utility object :param standalone: True - standalone version, False - part of Peppy player """ ScreensaverMeter.__init__(self) if util: self.util = util else: self.util = MeterUtil() use_vu_meter = getattr(self.util, USE_VU_METER, None) self.name = "peppymeter" base_path = "." if __package__: pkg_parts = __package__.split(".") if len(pkg_parts) > 0: base_path = os.path.join(os.getcwd(), "screensaver", self.name) parser = ConfigFileParser(base_path) self.util.meter_config = parser.meter_config self.outputs = {} if standalone: if self.util.meter_config[USE_LOGGING]: logging.basicConfig(level=logging.NOTSET) else: logging.disable(logging.CRITICAL) # no VU Meter support for Windows if "win" in sys.platform and self.util.meter_config[DATA_SOURCE][ TYPE] == SOURCE_PIPE: self.util.meter_config[DATA_SOURCE][TYPE] = SOURCE_NOISE self.data_source = DataSource(self.util) if self.util.meter_config[DATA_SOURCE][ TYPE] == SOURCE_PIPE or use_vu_meter == True: self.data_source.start_data_source() if self.util.meter_config[OUTPUT_DISPLAY]: self.meter = self.output_display(self.data_source) if self.util.meter_config[OUTPUT_SERIAL]: self.outputs[OUTPUT_SERIAL] = SerialInterface( self.util.meter_config, self.data_source) if self.util.meter_config[OUTPUT_I2C]: self.outputs[OUTPUT_I2C] = I2CInterface(self.util.meter_config, self.data_source) if self.util.meter_config[OUTPUT_PWM]: self.outputs[OUTPUT_PWM] = PWMInterface(self.util.meter_config, self.data_source) self.start_interface_outputs()
def sqlQuerySortBy(stat, year, order): ''' This function grabs a new cursor with a query regarding one player and prints that player's stats in table form. stat, year and order are all strings obtained from drop down menus. stat refers to the stat user wants. Each stat is a column in our table. Order can either be ascending or descending. ''' # Make a new database and cursor list database = DataSource() cursorList = database.sortBy(stat, year, order) return cursorList
def __init__(self): # Figure out if SE Linux is on and in enforcing mode self.is_selinux_enforcing = False # Just return if the SE Linux Status Tool is not installed if not DataSource.has_sestatus(): return # Figure out if we can execute heap and execute memory can_selinux_exec_heap = DataSource.sestatus_allow_execheap() can_selinux_exec_memory = DataSource.sestatus_allow_execmem() self.is_selinux_enforcing = (not can_selinux_exec_heap or not can_selinux_exec_memory)
def __init__(self, ctx, database): """ @param database [DataBase] database object @param columns [list] list of columns _id """ DataSource.__init__(self, ctx, database) self.table = [] self.states = {} self.entries = 0 self.rowcount = 0 assert isinstance(self.columns, list) assert isinstance(self.args, dict)
def lookup(cls, edition_or_identifier, data_source, operation=None, collection=None): from datasource import DataSource from edition import Edition from identifier import Identifier _db = Session.object_session(edition_or_identifier) if isinstance(edition_or_identifier, Identifier): identifier = edition_or_identifier elif isinstance(edition_or_identifier, Edition): identifier = edition_or_identifier.primary_identifier else: raise ValueError( "Cannot look up a coverage record for %r." % edition) if isinstance(data_source, basestring): data_source = DataSource.lookup(_db, data_source) return get_one( _db, CoverageRecord, identifier=identifier, data_source=data_source, operation=operation, collection=collection, on_multiple='interchangeable', )
def start(self, callback): self._logger.info('start: %s %d.', 'Start task sign', self._task['sign']) self._finish_callback = callback date_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') self._update_status(TASK_STATUS['PREPARING'], begin_time=date_time) self._data_source = DataSource() self._data_source.get_source(self._task['type'], self._source_callback)
def data_source(self): """Find the data source associated with this Collection. Bibliographic metadata obtained through the collection protocol is recorded as coming from this data source. A LicensePool inserted into this collection will be associated with this data source, unless its bibliographic metadata indicates some other data source. For most Collections, the integration protocol sets the data source. For collections that use the OPDS import protocol, the data source is a Collection-specific setting. """ data_source = None name = ExternalIntegration.DATA_SOURCE_FOR_LICENSE_PROTOCOL.get( self.protocol ) if not name: name = self.external_integration.setting( Collection.DATA_SOURCE_NAME_SETTING ).value _db = Session.object_session(self) if name: data_source = DataSource.lookup(_db, name, autocreate=True) return data_source
def all_from_data_sources(cls, _db, data_sources): """All custom lists from the given data sources.""" if not isinstance(data_sources, list): data_sources = [data_sources] ids = [] for ds in data_sources: if isinstance(ds, basestring): ds = DataSource.lookup(_db, ds) ids.append(ds.id) return _db.query(CustomList).filter(CustomList.data_source_id.in_(ids))
def run(): # modify connection string to point to MLS/SQL Server instance where you restored the database connectionstring = 'Driver=SQL Server;Server=MLMACHINE\\SQLSERVER17;Database=velibdb;Trusted_Connection=True;' ds = DataSource(connectionstring) df = ds.loaddata() pipeline = Pipeline(steps= [('outliers', OutliersHandler()), ('label',LabelDefiner()), ('dt', DateTimeFeaturesExtractor()), ('ts', TSFeaturesExtractor()), ('st', StatisticalFeaturesExtractor()), ('exclusion', FeaturesExcluder()), ('scaler', FeaturesScaler())] ) # Execute Pipeline df = pipeline.fit_transform(df) # split dataset test_size = 24 * 4 # one day test set of each station train = df.groupby('stationid').head(df.shape[0] - test_size) test = df.groupby('stationid').tail(test_size) # fit classifier clf = RxClassifier(computecontext = ds.getcomputecontext()) coeffs = clf.fit(train) #print coefficients and exclude stationid Factor print(coeffs.tail(14)) # run prediction on hold out set and evaluate y_pred = clf.predict(test.drop(['label'], axis=1, inplace = False)) y_truth = test['label'].as_matrix() print(classification_report(y_truth, y_pred))
def _get_hz_string_from_beagle_bone(): scale, hz_brand = 1, '0.0' if not DataSource.has_cpufreq_info(): return scale, hz_brand returncode, output = DataSource.cpufreq_info() if returncode != 0: return (scale, hz_brand) hz_brand = output.split('current CPU frequency is')[1].split('.')[0].lower() if hz_brand.endswith('mhz'): scale = 6 elif hz_brand.endswith('ghz'): scale = 9 hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip() hz_brand = to_hz_string(hz_brand) return (scale, hz_brand)
def parse_urn(cls, _db, identifier_string, must_support_license_pools=False): type, identifier_string = cls.type_and_identifier_for_urn(identifier_string) if must_support_license_pools: try: ls = DataSource.license_source_for(_db, type) except NoResultFound: raise Identifier.UnresolvableIdentifierException() except MultipleResultsFound: # This is fine. pass return cls.for_foreign_id(_db, type, identifier_string)
def __init__(self, util=None, standalone=False): """ Initializer :param util: utility object :param standalone: True - standalone version, False - part of Peppy player """ ScreensaverMeter.__init__(self) if util: self.util = util else: self.util = MeterUtil() use_vu_meter = getattr(self.util, USE_VU_METER, None) base_path = "." if __package__: pkg_parts = __package__.split(".") if len(pkg_parts) > 0: base_path = os.path.join(os.getcwd(), "screensaver", "peppymeter") parser = ConfigFileParser(base_path) self.util.meter_config = parser.meter_config self.outputs = {} if standalone: if self.util.meter_config[USE_LOGGING]: logging.basicConfig(level=logging.NOTSET) else: logging.disable(logging.CRITICAL) # no VU Meter support for Windows if "win" in sys.platform or use_vu_meter == False: if self.util.meter_config[DATA_SOURCE][TYPE] == SOURCE_PIPE: self.util.meter_config[DATA_SOURCE][TYPE] = SOURCE_NOISE self.data_source = DataSource(self.util.meter_config) if self.util.meter_config[DATA_SOURCE][TYPE] == SOURCE_PIPE or use_vu_meter == True: self.data_source.start_data_source() if self.util.meter_config[OUTPUT_DISPLAY]: self.meter = self.output_display(self.data_source) if self.util.meter_config[OUTPUT_SERIAL]: self.outputs[OUTPUT_SERIAL] = SerialInterface(self.util.meter_config, self.data_source) if self.util.meter_config[OUTPUT_I2C]: self.outputs[OUTPUT_I2C] = I2CInterface(self.util.meter_config, self.data_source) if self.util.meter_config[OUTPUT_PWM]: self.outputs[OUTPUT_PWM] = PWMInterface(self.util.meter_config, self.data_source) self.start_interface_outputs()
def lookup(self, _db, data_source, type, patron, refresher_method, allow_persistent_token=False, allow_empty_token=False): from datasource import DataSource if isinstance(data_source, basestring): data_source = DataSource.lookup(_db, data_source) credential, is_new = get_one_or_create( _db, Credential, data_source=data_source, type=type, patron=patron) if (is_new or (not credential.expires and not allow_persistent_token) or (not credential.credential and not allow_empty_token) or (credential.expires and credential.expires <= datetime.datetime.utcnow())): if refresher_method: refresher_method(credential) return credential
def initialize_data(cls, session, set_site_configuration=True): # Create initial content. from datasource import DataSource from classification import Genre from licensing import DeliveryMechanism list(DataSource.well_known_sources(session)) # Load all existing Genre objects. Genre.populate_cache(session) # Create any genres not in the database. for g in classifier.genres.values(): # TODO: On the very first startup this is rather expensive # because the cache is invalidated every time a Genre is # created, then populated the next time a Genre is looked # up. This wouldn't be a big problem, but this also happens # on setup for the unit tests. Genre.lookup(session, g, autocreate=True) # Make sure that the mechanisms fulfillable by the default # client are marked as such. for content_type, drm_scheme in DeliveryMechanism.default_client_can_fulfill_lookup: mechanism, is_new = DeliveryMechanism.lookup( session, content_type, drm_scheme ) mechanism.default_client_can_fulfill = True # If there is currently no 'site configuration change' # Timestamp in the database, create one. timestamp, is_new = get_one_or_create( session, Timestamp, collection=None, service=Configuration.SITE_CONFIGURATION_CHANGED, create_method_kwargs=dict(finish=datetime.datetime.utcnow()) ) if is_new: site_configuration_has_changed(session) session.commit() # Return a potentially-new Session object in case # it was updated by cls.update_timestamps_table return session
def unresolved_catalog(self, _db, data_source_name, operation): """Returns a query with all identifiers in a Collection's catalog that have unsuccessfully attempted resolution. This method is used on the metadata wrangler. :return: a sqlalchemy.Query """ coverage_source = DataSource.lookup(_db, data_source_name) is_not_resolved = and_( CoverageRecord.operation==operation, CoverageRecord.data_source_id==coverage_source.id, CoverageRecord.status!=CoverageRecord.SUCCESS, ) query = _db.query(Identifier)\ .outerjoin(Identifier.licensed_through)\ .outerjoin(Identifier.coverage_records)\ .outerjoin(LicensePool.work).outerjoin(Identifier.collections)\ .filter( Collection.id==self.id, is_not_resolved, Work.id==None ).order_by(Identifier.id) return query
def __init__(self, name = "mapcount", path = "/proc/kpagecount" ): DataSource.__init__(self, name, path, 'Q')
def __init__(self, type, path ): DataSource.__init__(self, type, path, 'Q')
@author: Joerg Weingrill <*****@*****.**> ''' from datasource import DataSource def getfile(sourcefile, targetdirectory): from subprocess import call call(['scp', '[email protected]:'+sourcefile,targetdirectory]) pass def convertfile(filename): from subprocess import call call(['./ses-writetxt.py', '--textfile', filename]) if __name__ == '__main__': query = "SELECT filename FROM obs WHERE object LIKE 'HN Peg' ORDER BY dateobs;" wifsip = DataSource(database='stella', user='******', host='pera.aip.de') result = wifsip.query(query) for r in result: filename = r[0] print filename, #science20151106B-0038_botzfxsEcd.fits path = filename.lstrip('science')[:8] sourcefile = '/stella/home/stella/spectra/'+path+'/'+filename+'_botzfxsEcd.fits' targetdirectory = '/work2/jwe/Projects/HNPeg/data' print sourcefile, targetdirectory #getfile(sourcefile,targetdirectory) convertfile('/work2/jwe/Projects/HNPeg/data/'+filename+'_botzfxsEcd.fits') #/stella/home/stella/spectra/20151106 print len(result)
def get_cpu_info_from_kstat(): ''' Returns the CPU info gathered from isainfo and kstat. Will return None if isainfo or kstat are not found. ''' try: # Just return None if there is no isainfo or kstat if not DataSource.has_isainfo() or not DataSource.has_kstat(): return None # If isainfo fails return None returncode, flag_output = DataSource.isainfo_vb() if flag_output == None or returncode != 0: return None # If kstat fails return None returncode, kstat = DataSource.kstat_m_cpu_info() if kstat == None or returncode != 0: return None # Various fields vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip() processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip() cache_size = 0 stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip()) model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip()) family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip()) # Flags flags = flag_output.strip().split('\n')[-1].strip().lower().split() flags.sort() # Convert from GHz/MHz string to Hz scale = 6 hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip() hz_advertised = to_hz_string(hz_advertised) # Convert from GHz/MHz string to Hz hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip() hz_actual = to_hz_string(hz_actual) # Get the CPU arch and bits arch, bits = parse_arch(DataSource.raw_arch_string) return { 'vendor_id' : vendor_id, 'hardware' : '', 'brand' : processor_brand, 'hz_advertised' : to_friendly_hz(hz_advertised, scale), 'hz_actual' : to_friendly_hz(hz_actual, 0), 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), 'hz_actual_raw' : to_raw_hz(hz_actual, 0), 'arch' : arch, 'bits' : bits, 'count' : DataSource.cpu_count, 'raw_arch_string' : DataSource.raw_arch_string, 'l2_cache_size' : cache_size, 'l2_cache_line_size' : 0, 'l2_cache_associativity' : 0, 'stepping' : stepping, 'model' : model, 'family' : family, 'processor_type' : 0, 'extended_model' : 0, 'extended_family' : 0, 'flags' : flags } except: return None
def get_cpu_info_from_registry(): ''' FIXME: Is missing many of the newer CPU flags like sse3 Returns the CPU info gathered from the Windows Registry. Will return None if not on Windows. ''' try: # Just return None if not on Windows if not DataSource.is_windows: return None # Get the CPU name processor_brand = DataSource.winreg_processor_brand() # Get the CPU vendor id vendor_id = DataSource.winreg_vendor_id() # Get the CPU arch and bits raw_arch_string = DataSource.winreg_raw_arch_string() arch, bits = parse_arch(raw_arch_string) # Get the actual CPU Hz hz_actual = DataSource.winreg_hz_actual() hz_actual = to_hz_string(hz_actual) # Get the advertised CPU Hz scale, hz_advertised = _get_hz_string_from_brand(processor_brand) # Get the CPU features feature_bits = DataSource.winreg_feature_bits() def is_set(bit): mask = 0x80000000 >> bit retval = mask & feature_bits > 0 return retval # http://en.wikipedia.org/wiki/CPUID # http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean # http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm flags = { 'fpu' : is_set(0), # Floating Point Unit 'vme' : is_set(1), # V86 Mode Extensions 'de' : is_set(2), # Debug Extensions - I/O breakpoints supported 'pse' : is_set(3), # Page Size Extensions (4 MB pages supported) 'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available 'msr' : is_set(5), # Model Specific Registers 'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages) 'mce' : is_set(7), # Machine Check Exception supported 'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available 'apic' : is_set(9), # Local APIC present (multiprocessor operation support) 'sepamd' : is_set(10), # Fast system calls (AMD only) 'sep' : is_set(11), # Fast system calls 'mtrr' : is_set(12), # Memory Type Range Registers 'pge' : is_set(13), # Page Global Enable 'mca' : is_set(14), # Machine Check Architecture 'cmov' : is_set(15), # Conditional MOVe instructions 'pat' : is_set(16), # Page Attribute Table 'pse36' : is_set(17), # 36 bit Page Size Extensions 'serial' : is_set(18), # Processor Serial Number 'clflush' : is_set(19), # Cache Flush #'reserved1' : is_set(20), # reserved 'dts' : is_set(21), # Debug Trace Store 'acpi' : is_set(22), # ACPI support 'mmx' : is_set(23), # MultiMedia Extensions 'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions 'sse' : is_set(25), # SSE instructions 'sse2' : is_set(26), # SSE2 (WNI) instructions 'ss' : is_set(27), # self snoop #'reserved2' : is_set(28), # reserved 'tm' : is_set(29), # Automatic clock control 'ia64' : is_set(30), # IA64 instructions '3dnow' : is_set(31) # 3DNow! instructions available } # Get a list of only the flags that are true flags = [k for k, v in flags.items() if v] flags.sort() return { 'vendor_id' : vendor_id, 'hardware' : '', 'brand' : processor_brand, 'hz_advertised' : to_friendly_hz(hz_advertised, scale), 'hz_actual' : to_friendly_hz(hz_actual, 6), 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), 'hz_actual_raw' : to_raw_hz(hz_actual, 6), 'arch' : arch, 'bits' : bits, 'count' : DataSource.cpu_count, 'raw_arch_string' : raw_arch_string, 'l2_cache_size' : 0, 'l2_cache_line_size' : 0, 'l2_cache_associativity' : 0, 'stepping' : 0, 'model' : 0, 'family' : 0, 'processor_type' : 0, 'extended_model' : 0, 'extended_family' : 0, 'flags' : flags } except: return None
def get_cpu_info_from_sysinfo(): ''' Returns the CPU info gathered from sysinfo. Will return None if sysinfo is not found. ''' try: # Just return None if there is no sysinfo if not DataSource.has_sysinfo(): return None # If sysinfo fails return None returncode, output = DataSource.sysinfo_cpu() if output == None or returncode != 0: return None # Various fields vendor_id = '' #_get_field(output, None, None, 'CPU #0: ') processor_brand = output.split('CPU #0: "')[1].split('"\n')[0] cache_size = '' #_get_field(output, None, None, 'machdep.cpu.cache.size') stepping = int(output.split(', stepping ')[1].split(',')[0].strip()) model = int(output.split(', model ')[1].split(',')[0].strip()) family = int(output.split(', family ')[1].split(',')[0].strip()) # Flags flags = [] for line in output.split('\n'): if line.startswith('\t\t'): for flag in line.strip().lower().split(): flags.append(flag) flags.sort() # Convert from GHz/MHz string to Hz scale, hz_advertised = _get_hz_string_from_brand(processor_brand) hz_actual = hz_advertised # Get the CPU arch and bits arch, bits = parse_arch(DataSource.raw_arch_string) return { 'vendor_id' : vendor_id, 'hardware' : '', 'brand' : processor_brand, 'hz_advertised' : to_friendly_hz(hz_advertised, scale), 'hz_actual' : to_friendly_hz(hz_actual, scale), 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), 'hz_actual_raw' : to_raw_hz(hz_actual, scale), 'arch' : arch, 'bits' : bits, 'count' : DataSource.cpu_count, 'raw_arch_string' : DataSource.raw_arch_string, 'l2_cache_size' : cache_size, 'l2_cache_line_size' : 0, 'l2_cache_associativity' : 0, 'stepping' : stepping, 'model' : model, 'family' : family, 'processor_type' : 0, 'extended_model' : 0, 'extended_family' : 0, 'flags' : flags } except: return None
def get_cpu_info_from_sysctl(): ''' Returns the CPU info gathered from sysctl. Will return None if sysctl is not found. ''' try: # Just return None if there is no sysctl if not DataSource.has_sysctl(): return None # If sysctl fails return None returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency() if output == None or returncode != 0: return None # Various fields vendor_id = _get_field(output, None, None, 'machdep.cpu.vendor') processor_brand = _get_field(output, None, None, 'machdep.cpu.brand_string') cache_size = _get_field(output, None, None, 'machdep.cpu.cache.size') stepping = _get_field(output, int, 0, 'machdep.cpu.stepping') model = _get_field(output, int, 0, 'machdep.cpu.model') family = _get_field(output, int, 0, 'machdep.cpu.family') # Flags flags = _get_field(output, None, None, 'machdep.cpu.features').lower().split() flags.sort() # Convert from GHz/MHz string to Hz scale, hz_advertised = _get_hz_string_from_brand(processor_brand) hz_actual = _get_field(output, None, None, 'hw.cpufrequency') hz_actual = to_hz_string(hz_actual) # Get the CPU arch and bits arch, bits = parse_arch(DataSource.raw_arch_string) return { 'vendor_id' : vendor_id, 'hardware' : '', 'brand' : processor_brand, 'hz_advertised' : to_friendly_hz(hz_advertised, scale), 'hz_actual' : to_friendly_hz(hz_actual, 0), 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), 'hz_actual_raw' : to_raw_hz(hz_actual, 0), 'arch' : arch, 'bits' : bits, 'count' : DataSource.cpu_count, 'raw_arch_string' : DataSource.raw_arch_string, 'l2_cache_size' : cache_size, 'l2_cache_line_size' : 0, 'l2_cache_associativity' : 0, 'stepping' : stepping, 'model' : model, 'family' : family, 'processor_type' : 0, 'extended_model' : 0, 'extended_family' : 0, 'flags' : flags } except: return None
def get_cpu_info_from_dmesg(): ''' Returns the CPU info gathered from dmesg. Will return None if dmesg is not found or does not have the desired info. ''' try: # Just return None if there is no dmesg if not DataSource.has_dmesg(): return None # If dmesg fails return None returncode, output = DataSource.dmesg_a() if output == None or returncode != 0: return None # Processor Brand long_brand = output.split('CPU: ')[1].split('\n')[0] processor_brand = long_brand.rsplit('(', 1)[0] processor_brand = processor_brand.strip() # Hz scale = 0 hz_actual = long_brand.rsplit('(', 1)[1].split(' ')[0].lower() if hz_actual.endswith('mhz'): scale = 6 elif hz_actual.endswith('ghz'): scale = 9 hz_actual = hz_actual.split('-')[0] hz_actual = to_hz_string(hz_actual) # Various fields fields = output.split('CPU: ')[1].split('\n')[1].split('\n')[0].strip().split(' ') vendor_id = None stepping = None model = None family = None for field in fields: name, value = field.split(' = ') name = name.lower() if name == 'origin': vendor_id = value.strip('"') elif name == 'stepping': stepping = int(value) elif name == 'model': model = int(value, 16) elif name == 'family': family = int(value, 16) # Flags flag_lines = [] for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']: if category in output: flag_lines.append(output.split(category)[1].split('\n')[0]) flags = [] for line in flag_lines: line = line.split('<')[1].split('>')[0].lower() for flag in line.split(','): flags.append(flag) flags.sort() # Convert from GHz/MHz string to Hz scale, hz_advertised = _get_hz_string_from_brand(processor_brand) # Get the CPU arch and bits arch, bits = parse_arch(DataSource.raw_arch_string) return { 'vendor_id' : vendor_id, 'hardware' : '', 'brand' : processor_brand, 'hz_advertised' : to_friendly_hz(hz_advertised, scale), 'hz_actual' : to_friendly_hz(hz_actual, 6), 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), 'hz_actual_raw' : to_raw_hz(hz_actual, 6), 'arch' : arch, 'bits' : bits, 'count' : DataSource.cpu_count, 'raw_arch_string' : DataSource.raw_arch_string, 'l2_cache_size' : 0, 'l2_cache_line_size' : 0, 'l2_cache_associativity' : 0, 'stepping' : stepping, 'model' : model, 'family' : family, 'processor_type' : 0, 'extended_model' : 0, 'extended_family' : 0, 'flags' : flags } except: return None
def get_cpu_info_from_proc_cpuinfo(): ''' Returns the CPU info gathered from /proc/cpuinfo. Will return None if /proc/cpuinfo is not found. ''' try: # Just return None if there is no cpuinfo if not DataSource.has_proc_cpuinfo(): return None returncode, output = DataSource.cat_proc_cpuinfo() if returncode != 0: return None # Various fields vendor_id = _get_field(output, None, '', 'vendor_id', 'vendor id', 'vendor') processor_brand = _get_field(output, None, None, 'model name','cpu') cache_size = _get_field(output, None, '', 'cache size') stepping = _get_field(output, int, 0, 'stepping') model = _get_field(output, int, 0, 'model') family = _get_field(output, int, 0, 'cpu family') hardware = _get_field(output, None, '', 'Hardware') # Flags flags = _get_field(output, None, None, 'flags', 'Features').split() flags.sort() # Convert from MHz string to Hz hz_actual = _get_field(output, None, '', 'cpu MHz', 'cpu speed', 'clock') hz_actual = hz_actual.lower().rstrip('mhz').strip() hz_actual = to_hz_string(hz_actual) # Convert from GHz/MHz string to Hz scale, hz_advertised = _get_hz_string_from_brand(processor_brand) # Try getting the Hz for a BeagleBone if hz_advertised == '0.0': scale, hz_advertised = _get_hz_string_from_beagle_bone() hz_actual = hz_advertised # Get the CPU arch and bits arch, bits = parse_arch(DataSource.raw_arch_string) return { 'vendor_id' : vendor_id, 'hardware' : hardware, 'brand' : processor_brand, 'hz_advertised' : to_friendly_hz(hz_advertised, scale), 'hz_actual' : to_friendly_hz(hz_actual, 6), 'hz_advertised_raw' : to_raw_hz(hz_advertised, scale), 'hz_actual_raw' : to_raw_hz(hz_actual, 6), 'arch' : arch, 'bits' : bits, 'count' : DataSource.cpu_count, 'raw_arch_string' : DataSource.raw_arch_string, 'l2_cache_size' : cache_size, 'l2_cache_line_size' : 0, 'l2_cache_associativity' : 0, 'stepping' : stepping, 'model' : model, 'family' : family, 'processor_type' : 0, 'extended_model' : 0, 'extended_family' : 0, 'flags' : flags } except: return None
def main(argv): target_url = '' COMMAND = '' ALL = False AUTH = False DATASOURCE = False TERM = False COURSE = False USER = False MEMBERSHIP = False CLEANUP = False datasource_PK1 = None usageStr = "\nrestdemo.py -t|--target <target root URL> -c|--command <command>\n" usageStr += "e.g restdemo.py -t www.myschool.edu -c create_course\n" usageStr += "command: <command>_<object> where <command> is one of the following:\n" usageStr += "\tcreate, read, read_all, update, delete\n" usageStr += "and <object> is one of the following:\n" usageStr += "\tdatasource, term, course, user, membership\n" usageStr += "-t is required; No -c args will run demo in predetermined order.\n" usageStr += "'-c authorize' demomonstrates the authorization process and does not create objects." usageStr += "-c commands require a valid datasource PK1 - \n" usageStr += "\ta datasource get will be run in these cases, defaulting to create\n" usageStr += "\tif the demo datasource does not exist." if len(sys.argv) > 1: #there are command line arguments try: opts, args = getopt.getopt(argv,"ht:c:",["target=","command="]) except getopt.GetoptError: print (usageStr) sys.exit(2) for opt, arg in opts: if opt == '-h': print (usageStr) sys.exit() elif opt == '-d': print ("Deleting at end of run.") CLEANUP = True elif opt in ("-t", "--target"): target_url = arg.lstrip() elif opt in ("-c", "--command"): COMMAND = arg else: COMMAND = "Run All" print ('[main] Target is:', target_url) print ('[main] Command is:', COMMAND) else: print(usageStr) sys.exit(2) #Set up some booleans for processing flags and order of processing if "course" in COMMAND: print("[main] Run course command") COURSE = True elif "user" in COMMAND: print("[main] Run user command") USER = True elif "membership" in COMMAND: print("[main] Run membership command") MEMBERSHIP = True elif "term" in COMMAND: print("[main] Run term command") TERM = True elif "datasource" in COMMAND: print("[main] Run datasource command") DATASOURCE = True elif "authorize" in COMMAND: print("[main] Run authorization command") AUTH = True else: print("[main] Empty Command: Run All\n") ALL = True print ('\n[main] Acquiring auth token...\n') authorized_session = AuthToken(target_url) authorized_session.setToken() print ('\n[main] Returned token: ' + authorized_session.getToken() + '\n') if not AUTH: #run commands in required order if running ALL if DATASOURCE or ALL: #process Datasource command print("\n[main] Run datasource command: " + ('ALL' if ALL else COMMAND) + '...') datasource_session = DataSource(target_url, authorized_session.getToken()) if 'datasource' in COMMAND: datasource_session.execute(COMMAND, authorized_session.getToken()) else: #datasource_session.getDataSources(authorized_session.getToken()) datasource_session.createDataSource(authorized_session.getToken()) datasource_PK1 = datasource_session.datasource_PK1 print("[main] datasource_PK1: " + datasource_PK1) datasource_session.getDataSource(authorized_session.getToken()) datasource_session.updateDataSource(authorized_session.getToken()) if TERM or ALL: term_session = Term(target_url, authorized_session.getToken()) #process term command print("\n[main] Run term command: " + ('ALL' if ALL else COMMAND) + '...') if 'term' in COMMAND: if (('delete' in COMMAND) or ('read' in COMMAND)): print ("[main] Deleting or getting does not require a datasource.") else: print("[main] datasource_PK1: not known... searching...") datasource_session = DataSource(target_url, authorized_session.getToken()) datasource_session.getDataSource(authorized_session.getToken()) datasource_PK1 = datasource_session.datasource_PK1 print("[main] datasource_PK1: " + datasource_PK1) if (datasource_PK1 is None): print ("[main] data source not found, creating for demo...") datasource_session.createDataSource(authorized_session.getToken()) datasource_PK1 = datasource_session.datasource_PK1 term_session.execute(COMMAND, datasource_PK1, authorized_session.getToken()) else: #term_session.getTerms(authorized_session.getToken()) term_session.createTerm(datasource_PK1, authorized_session.getToken()) term_session.getTerm(authorized_session.getToken()) term_session.updateTerm(datasource_PK1, authorized_session.getToken()) if COURSE or ALL: course_session = Course(target_url, authorized_session.getToken()) #process course command print("\n[main] Run course command: " + ('ALL' if ALL else COMMAND) + '...') if 'course' in COMMAND: if (('delete' in COMMAND) or ('read' in COMMAND)): print ("[main] Deleting or getting does not require a datasource.") else: print("[main] datasource_PK1: not known... searching...") datasource_session = DataSource(target_url, authorized_session.getToken()) datasource_session.getDataSource(authorized_session.getToken()) datasource_PK1 = datasource_session.datasource_PK1 print("[main] datasource_PK1: " + datasource_PK1) if (datasource_PK1 is None): print ("[main] data source not found, creating for demo...") datasource_session.createDataSource(authorized_session.getToken()) datasource_PK1 = datasource_session.datasource_PK1 course_session.execute(COMMAND, datasource_PK1, authorized_session.getToken()) else: #course_session.getCourses(authorized_session.getToken()) course_session.createCourse(datasource_PK1, authorized_session.getToken()) course_session.getCourse(authorized_session.getToken()) course_session.updateCourse(datasource_PK1, authorized_session.getToken()) if USER or ALL: user_session = User(target_url, authorized_session.getToken()) #process user command print("\n[main] Run user command: " + ('ALL' if ALL else COMMAND) + '...') if 'user' in COMMAND: if (('delete' in COMMAND) or ('read' in COMMAND)): print ("[main] Deleting or getting does not require a datasource.") else: print("[main] datasource_PK1: not known... searching...") datasource_session = DataSource(target_url, authorized_session.getToken()) datasource_session.getDataSource(authorized_session.getToken()) datasource_PK1 = datasource_session.datasource_PK1 print("[main] datasource_PK1: " + datasource_PK1) if (datasource_PK1 is None): print ("[main] data source not found, creating for demo...") datasource_session.createDataSource(authorized_session.getToken()) datasource_PK1 = datasource_session.datasource_PK1 user_session.execute(COMMAND, datasource_PK1, authorized_session.getToken()) else: #user_session.getUsers(authorized_session.getToken()) user_session.createUser(datasource_PK1, authorized_session.getToken()) user_session.getUser(authorized_session.getToken()) user_session.updateUser(datasource_PK1, authorized_session.getToken()) if MEMBERSHIP or ALL: membership_session = Membership(target_url, authorized_session.getToken()) #process membership command print("\n[main] Run membership command: " + ('ALL' if ALL else COMMAND) + '...') if 'membership' in COMMAND: if (('delete' in COMMAND) or ('read' in COMMAND)): print ("[main] Deleting or getting does not require a datasource.") else: print("[main] datasource_PK1: not known... searching...") datasource_session = DataSource(target_url, authorized_session.getToken()) datasource_session.getDataSource(authorized_session.getToken()) datasource_PK1 = datasource_session.datasource_PK1 print("[main] datasource_PK1: " + datasource_PK1) if (datasource_PK1 is None): print ("[main] data source not found, creating for demo...") datasource_session.createDataSource(authorized_session.getToken()) datasource_PK1 = datasource_session.datasource_PK1 membership_session.execute(COMMAND, datasource_PK1, authorized_session.getToken()) else: #membership_session.getMemberships(authorized_session.getToken()) membership_session.createMembership(datasource_PK1, authorized_session.getToken()) membership_session.getMembership(authorized_session.getToken()) membership_session.updateMembership(datasource_PK1, authorized_session.getToken()) membership_session.readUserMemberships(authorized_session.getToken()) #clean up if not using individual commands if ALL: print('\n[main] Completing Demo and deleting created objects...') print ("[main] Deleting membership") membership_session.deleteMembership(authorized_session.getToken()) print ("[main] Deleting Course") user_session.deleteUser(authorized_session.getToken()) print ("[main] Deleting Course") course_session.deleteCourse(authorized_session.getToken()) print ("[main] Deleting Term") term_session.deleteTerm(authorized_session.getToken()) print ("[main] Deleting DataSource") datasource_session.deleteDataSource(authorized_session.getToken()) else: print("Remember to delete created demo objects!") print("[main] Processing Complete")