def __init__(self, mmemory, typeaccess=0): if not isinstance(mmemory, Memory): raise TypeError("ERREUR") self.mmemory = mmemory self.symbols = Symbols(self.mmemory) self.typeaccess = typeaccess
def trigger_dog_respin(self, mode="MG"): # TODO: can Wild be count as Dog symbol in this case? if mode=="MG": return len(set(self.window[0]))==1 and self.window[0][0]==Symbols.index("Dog") elif mode=="FG": return ((len(set(self.window[-1]))==1 and self.window[-1][0]==Symbols.index("Dog")) or (len(set(self.window[0]))==1 and self.window[0][0]==Symbols.index("Dog")) )
def setUp(self): self.symbols = Symbols() conn = pymongo.MongoClient() db = conn.test_marketdata symbols = db.test_symbols self.symbols._symbols = symbols # replaces real db with test one self.symbols.clean()
def __init__(self, symbols=None, tracing=False): if symbols is not None: self.symbols = symbols else: self.symbols = Symbols() self.subrs = dict([(name, getattr(subrs, name)) for name in dir(subrs) if not name.startswith("_")]) self.builtins = dict([(name, getattr(builtins, name)) for name in dir(builtins) if not name.startswith("_")]) self.udfs = {} self.tracing = tracing
def check_payment(symbols_on_line, odds): symbol = symbols_on_line[0] count = 1 for n in range(1,len(symbols_on_line)): next_symbol = symbols_on_line[n] if symbol==next_symbol: count += 1 elif (next_symbol==Symbols.index("Wild") and symbol!=Symbols.index("Scatter")): count += 1 else: break payment = Odds[symbol][5-count] return count, payment
def __init__(self, mmemory, typeaccess=0) : if not isinstance(mmemory, Memory): raise TypeError("ERREUR") self.mmemory = mmemory self.symbols = Symbols(self.mmemory) self.typeaccess = typeaccess
def __init__(self, name, fields, exchanges=[]): ''' Constructor ''' self.name = name self.exchanges = exchanges self.rawpath = join(Config.DATA_BASEDIR, "raw/US", name) self.pklpath = join(Config.DATA_BASEDIR, "processed/US", name) self.date_from = datetime.datetime(1990, 1, 1) self.date_to = datetime.datetime.now() self.symbols = Symbols() self.csv_delimiter = "," self.csv_skiprows = 1 self.csv_cols = range(0, 7) # date, o, h, l, c, v, adj_close self.fields = fields self.field_name_to_idx = {f[1]: f[0] for f in enumerate(fields)} self.raw_datefile = join(Config.MYDIR, "NYSE_dates.txt") self.datefile = join(Config.DATA_BASEDIR, "NYSE_dates.pkl") self.dates = None self.dtype = self._setup_dtype() self.num_workers = 4 if not os.path.exists(self.rawpath): os.makedirs(self.rawpath) if not os.path.exists(self.pklpath): os.makedirs(self.pklpath) if not os.path.exists(self.datefile): self._setup_dataframe()
def iter_tokens(tokens): for i, j in tokens: try: s = symbol_table[j] class Token(s): pass t = Token t.id = i if not s.value: t.value = j except KeyError: if i in ['(NAME)', '(LITERAL)', '(SYMBOL)']: s = symbol_table[i] class Token(s): pass t = Token if i == '(SYMBOL)': t.value = Symbols(j) else: t.value = j t.id = i else: raise SyntaxError('Undefined operator: {0}'.format(j)) yield Token yield symbol_table['(ENDMARKER)']
def trigger_free_game(self): scatter_code = Symbols.index("Scatter") num_scatter = (self.window[1].count(scatter_code) + self.window[2].count(scatter_code) + self.window[3].count(scatter_code) ) return num_scatter>=Num_Scatter_To_Trigger_Free
def get_fixed_symbols(self, fixed_symbols, window): fixed_symbol_positions = [] fixed_symbol_codes = [Symbols.index(fixed_symbol) for fixed_symbol in fixed_symbols] for n, col in enumerate(window): for m, sym in enumerate(col): if sym in fixed_symbol_codes: fixed_symbol_positions.append((n,m,sym)) return fixed_symbol_positions
def get_current_data(self, symbols): yahoo_symbols = map( lambda x: Symbols.get_mapped_symbol(x, Symbols.YahooSymbolMapping), symbols) url_template = 'http://finance.yahoo.com/d/quotes.csv?s={}&f=ol1hgvd1' url = url_template.format(','.join(yahoo_symbols)) print url content = HttpHelper.http_get(url) #print content records = content.split('\n')[:-1] return map(YahooScraper.parse_record, records)
def spin(self): # If the MainGameA strip is chosen, then server will pick one randomly using this # weight, and replace all mystery symbol with this symbol for this round only. symbol_replace_mystery = random_pick(list(range(len(Weight_For_Mystery))), Weight_For_Mystery) stop_index = [random.choice(range(len(rl))) for rl in self.reel] for n, win in enumerate(self.window): stop = stop_index[n] self.window[n] = self.extended_reel[n][stop:stop+len(win)] # Now we replace the `Mystery` symbol (if any) with the symbol we just picked. self.window[n] = [symbol_replace_mystery if x==Symbols.index("Mystery") else x for x in self.window[n]]
class UpdateMarketDataIntegrationTest(unittest.TestCase): def setUp(self): self.symbols = Symbols() conn = pymongo.MongoClient() db = conn.test_marketdata symbols = db.test_symbols self.symbols._symbols = symbols # replaces real db with test one self.symbols.clean() self.symbols.add(['AAPL']) def test_update_marketdata(self): from_date = datetime(2012, 9, 20) to_date = datetime(2012, 9, 21) update_marketdata(from_date, to_date, self.symbols) res = self.symbols.select_historical_prices('AAPL', from_date, to_date) self.assertEqual(2, len(res)) self.assertEqual(from_date, res[0]['date']) self.assertEqual(to_date, res[1]['date']) self.assertEqual(705.07, res[1]['high'])
def update_marketdata(from_date=None, to_date=None, sym=Symbols()): ''' Fetch latest market data and upate it in db ''' for s in sym.symbols(): if not from_date: from_date = datetime.now() - timedelta(days=10*365) # fetch market data for 10 years if not to_date: to_date = datetime.now() + timedelta(days=2) # use a future date since there might be issues with timezones date = sym.last_date(s) fdate = date + timedelta(days=1) if date is not None else from_date (res, data) = yahoo.fetch_market_data(s, fdate, to_date) if res: sym.insert_historical_prices(s, [(x[0], x[1], x[2], x[3], x[4], x[5], x[6]) for x in data]) else: # There are several reasons update can fail: 1. No new data; 2. wrong symbol; 3. Other reason. print('Failed updating symbol %s' % s)
class GVTasks: offset_list = 0 offset_list_next = 0 offset_name = 0 offset_pid = 0 offset_uid = 0 # 4 differents ways to get tasks tasks_mem = Tasks() tasks_ps = Tasks() tasks_proc = Tasks() tasks_procforce = Tasks() tasks_kill = Tasks() tasks_check = Tasks() def __init__(self, mmemory, typeaccess=0): if not isinstance(mmemory, Memory): raise TypeError("ERREUR") self.mmemory = mmemory self.symbols = Symbols(self.mmemory) self.typeaccess = typeaccess def getOffsetPid(self, data): """ Find the offset of pid in the task_struct struct """ i = 0 find1 = find2 = -1 offset = 0 while ((i < len(data)) and (offset == 0)): if (find1 == 1): find2 = find1 find1 = unpack("<L", data[i:i + 4])[0] if (find1 == find2): offset = i i = i + 4 offset = offset + self.offset_list return offset def getOffsetUid(self, data): """ Find the offset of uid in the task_struct struct """ offset = data.find( "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" ) offset2 = data.rfind( "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" ) if (offset2 < 400): offset = offset2 - 4 offset = offset + self.offset_pid return offset def getOffsetListNext(self, data): """ Find the offset of next task in the list_head struct """ i = 0 first_addr = second_addr = find1 = find2 = 0 offset = -1 while ((i < len(data)) and (offset == -1)): second_addr = first_addr first_addr = unpack("<L", data[i:i + 4])[0] # print first_addr if (first_addr == second_addr and first_addr > 0xc0000000 and i > 0x20): find2 = find1 find1 = i if ((find1 - find2) == 16): offset = find2 + 20 i = i + 4 return offset def getOffsetList(self, data): """ Find the offset of tasks in the task_struct struct """ i = 0 l = [] count = 0 prec = "" find = -1 offset = 0 for i in range(0, len(data), 4): var = "%x" % unpack("<L", data[i:i + 4])[0] l.append(var) for j in l: # print j if (prec == j): count = count + 1 # print "COUNT" if (find != 0 and count == 2): offset = l.index(j) offset = offset - 4 find = 0 ## print j if (j != "0"): prec = j #print "TROUVE %d %s" % (offset, l.pop(offset)) return offset def getTasksMemory(self): """ Get tasks list by /dev/kmem or /dev/mem """ current_addr = list_addr = 0 name = "" self.mmemory.open("r", self.typeaccess) init_task = self.symbols.find_symbol("init_task") data = self.mmemory.read(init_task, 1000) self.offset_name = data.find("swapper") data = self.mmemory.read(init_task + 40, 100) self.offset_list = self.getOffsetList(data) self.offset_list = self.offset_list * 4 + 40 #print "OFFSET NAME %d" % self.offset_name #print "OFFSET LIST TASKS %d" % self.offset_list addr = unpack("<L", self.mmemory.read(init_task + self.offset_list, 4, 0))[0] data = self.mmemory.read(addr, 256, 0) self.offset_list_next = self.getOffsetListNext(data) #print "OFFSET LIST NEXT %d" % self.offset_list_next current_addr = unpack( "<L", self.mmemory.read(addr + self.offset_list_next, 4, 0))[0] data = self.mmemory.read(current_addr + self.offset_list, 100, 0) self.offset_pid = self.getOffsetPid(data) #print "OFFSET PID %d" % self.offset_pid data = self.mmemory.read(current_addr + self.offset_pid, 500, 0) self.offset_uid = self.getOffsetUid(data) #print "OFFSET UID %d" % self.offset_uid while (name != "swapper"): name = self.mmemory.read(current_addr + self.offset_name, 16, 0) name = name[:string.find(name, '\0')] pid = unpack( "<L", self.mmemory.read(current_addr + self.offset_pid, 4, 0))[0] uid = unpack( "<L", self.mmemory.read(current_addr + self.offset_uid, 4, 0))[0] gid = unpack( "<L", self.mmemory.read(current_addr + self.offset_uid + 16, 4, 0))[0] self.tasks_mem.map_tasks[int(pid)] = [ int(uid), int(gid), name, current_addr, 0 ] self.tasks_mem.list_tasks.append(int(pid)) list_addr = unpack( "<L", self.mmemory.read(current_addr + self.offset_list, 4, 0))[0] current_addr = unpack( "<L", self.mmemory.read(list_addr + self.offset_list_next, 4, 0))[0] self.tasks_mem.map_tasks.pop(0) self.tasks_mem.list_tasks.pop() self.mmemory.close() def getTasksPs(self): """ Get tasks list by /bin/ps """ i, o = os.popen2('/bin/ps -eo user,pid,ruid,rgid,state,comm') j = o.readline() j = o.readline() while (j != ""): l = j.split() self.tasks_ps.map_tasks[int( l[1])] = [int(l[2]), int(l[3]), l[5], 0, 0] self.tasks_ps.list_tasks.append(int(l[1])) j = o.readline() o.close() i.close() def openProcTaskStatus(self, file): fd = 0 try: fd = open(file, "r") except IOError: return [] l = fd.readlines() name = l[0].split() pid = l[4].split() uid = l[7].split() gid = l[8].split() fd.close() return [pid[1], uid[1], gid[1], name[1]] def getTasksProc(self): """ Get tasks list by /proc """ for rep in os.walk("/proc"): if (rep[0][6:].isdigit()): l = self.openProcTaskStatus(rep[0] + "/status") self.tasks_proc.map_tasks[int( l[0])] = [int(l[1]), int(l[2]), l[3], 0, 0] self.tasks_proc.list_tasks.append(int(l[0])) if (os.access(rep[0] + "/task", os.F_OK) == True): for srep in os.listdir(rep[0] + "/task"): if (srep != l[0]): ll = self.openProcTaskStatus(rep[0] + "/task/" + srep + "/status") self.tasks_proc.map_tasks[int(ll[0])] = [ int(ll[1]), int(ll[2]), ll[3], 0, 1 ] self.tasks_proc.list_tasks.append(int(ll[0])) def getTasksProcForce(self): """ Get tasks list by bruteforcing /proc """ for i in range(1, 65535, 1): if (os.access("/proc/" + str(i) + "/status", os.F_OK) == True): l = self.openProcTaskStatus("/proc/" + str(i) + "/status") if (l != []): self.tasks_procforce.map_tasks[int( l[0])] = [int(l[1]), int(l[2]), l[3], 0, 0] self.tasks_procforce.list_tasks.append(int(l[0])) if (os.access("/proc/" + str(i) + "/task", os.F_OK) == True): for srep in os.listdir("/proc/" + str(i) + "/task"): if (srep != l[0]): ll = self.openProcTaskStatus("/proc/" + str(i) + "/task/" + srep + "/status") self.tasks_procforce.map_tasks[int(ll[0])] = [ int(ll[1]), int(ll[2]), ll[3], 0, 1 ] self.tasks_procforce.list_tasks.append( int(ll[0])) def getTasksKill(self): """ Get tasks list by kill """ for i in range(1, 65535): try: os.kill(i, 0) self.tasks_kill.map_tasks[i] = [0, 0, None, 0, 0] self.tasks_kill.list_tasks.append(i) except OSError: None def _simpleViewTasks(self, tasks): print "PID\t UID\t GID\t NAME\t\t\t ADDR" for i in tasks.list_tasks: print "%d\t %d\t %d\t %-16s\t @ 0x%x" % ( i, tasks.map_tasks[i][0], tasks.map_tasks[i][1], tasks.map_tasks[i][2], tasks.map_tasks[i][3]) def viewTasks(self): self.getTasksMemory() self._simpleViewTasks(self.tasks_mem) def _checkTasks(self, ref, cmp, check): for i in ref.map_tasks: if (cmp.map_tasks.has_key(i) == False and ref.map_tasks[i][4] == 0): if (self.tasks_check.map_tasks.has_key(i) == False): check.map_tasks[i] = ref.map_tasks.get(i) check.list_tasks.append(i) def checkViewTasks(self): self.getTasksPs() self.getTasksProc() self.getTasksProcForce() self.getTasksMemory() self.getTasksKill() self._checkTasks(self.tasks_proc, self.tasks_ps, self.tasks_check) self._checkTasks(self.tasks_procforce, self.tasks_proc, self.tasks_check) self._checkTasks(self.tasks_mem, self.tasks_procforce, self.tasks_check) self._checkTasks(self.tasks_kill, self.tasks_proc, self.tasks_check) if (self.tasks_check.list_tasks != []): print "LISTS OF TASKS HIDE" self._simpleViewTasks(self.tasks_check) else: print "NO TASKS HIDE" print "YOUR SYSTEM SEEMS BE SAFE !"
class DataSource(object): ''' Spec for a Stock data source ''' __metaclass__ = abc.ABCMeta def __init__(self, name, fields, exchanges=[]): ''' Constructor ''' self.name = name self.exchanges = exchanges self.rawpath = join(Config.DATA_BASEDIR, "raw/US", name) self.pklpath = join(Config.DATA_BASEDIR, "processed/US", name) self.date_from = datetime.datetime(1990, 1, 1) self.date_to = datetime.datetime.now() self.symbols = Symbols() self.csv_delimiter = "," self.csv_skiprows = 1 self.csv_cols = range(0, 7) # date, o, h, l, c, v, adj_close self.fields = fields self.field_name_to_idx = {f[1]: f[0] for f in enumerate(fields)} self.raw_datefile = join(Config.MYDIR, "NYSE_dates.txt") self.datefile = join(Config.DATA_BASEDIR, "NYSE_dates.pkl") self.dates = None self.dtype = self._setup_dtype() self.num_workers = 4 if not os.path.exists(self.rawpath): os.makedirs(self.rawpath) if not os.path.exists(self.pklpath): os.makedirs(self.pklpath) if not os.path.exists(self.datefile): self._setup_dataframe() def __repr__(self): res = "name: " + self.name res += "\n rawpath: " + self.rawpath res += "\n pklpath: " + self.pklpath res += "\n exchanges: " + ", ".join([x.name for x in self.exchanges]) return res def _setup_dtype(self): """ Data types need to be specified for the columns of CSV files. * Date object is stored in object format. * All other fields are parsed as float. """ names = [] formats = [] for f in self.fields: names.append(f) if f == DataItem.DATE: formats.append("O") # date in object format else: formats.append(float) return {'names': names, 'formats': formats} def _csvfile(self, sym): """ CVS file for a given symbol """ return join(self.rawpath, sym + ".csv") def _pklfile(self, sym): """ Pickle file for a given symbol """ return join(self.pklpath, sym + ".pkl") @property def symbol_list(self): """ Returns a list of symbols for all exchanges """ return reduce(lambda x, y: x + y.symbol_list(), self.exchanges, []) @abc.abstractmethod def _hist_url(self, date_from, date_to, symbol): """ Returns a URL for getting the historical data from the web """ return @abc.abstractmethod def _csv_field_converters(self): """ Converters for the CSV columns. Numpy can do the simple conversions, But converters need to be specified for complex fields. E.g. parsing a Data string to datetime object """ return def _setup_dataframe(self, datefile=None): """ Setup an empty dataframe using NYSE dates as index. This will be used to as base frame for the stock data. """ if datefile is None: datefile = self.raw_datefile dates = np.loadtxt(datefile, dtype='O', converters={ 0: lambda x: dt.strptime(x, "%m/%d/%Y") }) dateIdx = pandas.tseries.index.DatetimeIndex(dates) empty_df = pandas.DataFrame(index=dateIdx) empty_df.save(self.datefile) def _load_dataframe(self): """ Load the previously saved dataframe """ return read_pickle(self.datefile) def download(self, symbol): """Download data for a given symbol This will store the data in pickle file """ url = self._hist_url(self.date_from, self.date_to, symbol) url_get = urllib2.urlopen(url) # url_get = open(DataSource.TESTFILE, "rb") csvfile = open(self._csvfile(symbol), "wb") csvfile.write(url_get.read()) csvfile.close() self._pickle(symbol) return symbol def _load_csv(self, symbol): """ Load a CSV file into a pandas Dataframe """ data_file = self._csvfile(symbol) data = np.loadtxt(data_file, dtype=self.dtype, delimiter=self.csv_delimiter, skiprows=self.csv_skiprows, converters=self._csv_field_converters(), usecols=self.csv_cols) data = data[::-1] # reverse the order fields = data.dtype.names[1:] # all except the date df = pandas.DataFrame({f: data[f] for f in fields}, index=data[DataItem.DATE]) return df def _pickle(self, symbol): """ Load CSV data and save the object to a file. load_csv is expensive, as it needs to convert data to appropriate datatypes. pickle saves the processed data that can efficiently reloaded. """ df = self._load_csv(symbol) df.save(self._pklfile(symbol)) return symbol def load(self, symbol): """ load symbol dataframe from a pre saved file This loads the pre-processed data that was previously stored using pickle(). """ return read_pickle(self._pklfile(symbol)) def loadData(self, symbols, field="adj_close", from_=None, to_=None): """ Load data for symbols for given date range. Returns a DataFrame. Columns are indexed by symbols e.g: from_date = datetime.datetime.strptime("2012/1/1", "%Y/%M/%d") to_date = datetime.datetime.now() syms=["AAPL", "GOOG", "COG"] ds.loadData(syms, "close", from_date, to_date) # DataFrame <class 'pandas.core.frame.DataFrame'> DatetimeIndex: 248 entries, 2012-01-03 00:00:00 to 2012-12-24 00:00:00 Data columns: AAPL 245 non-null values GOOG 245 non-null values COG 245 non-null values dtypes: float64(3) # df[:3] AAPL GOOG COG 2012-01-03 411.23 665.41 76.80 2012-01-04 413.44 668.28 82.32 2012-01-05 418.03 659.01 82.71 """ if from_ is None: from_ = self.date_from if to_ is None: to_ = self.date_to df = self._load_dataframe()[from_:to_] for s in symbols: df[s] = self.load(s)[field] return df def create_symbol_list(self): for exch in self.exchanges: exch.get_symbol_list() self.symbols.add(exch)
def __init__(self, configfile, isTraining=False): self.isTraining = isTraining self.configfile = configfile logger.info('Reading configuration from: ' + configfile) self.cfg = ConfigParser(interpolation=ExtendedInterpolation()) self.cfg.read(configfile) parameters = self.cfg['Parameters'] self.samplerate = int(parameters['samplerate']) self.numcep = int(parameters['numcep']) self.numcontext = int( parameters['numcontext']) if 'numcontext' in parameters else 0 self.rand_shift = int( parameters['rand_shift']) if 'rand_shift' in parameters else 0 self.feature_size = (2 * self.numcontext + 1) * self.numcep self.batch_size = int(parameters['batch_size']) self.epochs = int(parameters['epochs']) self.learningrate = float(parameters['learningrate']) self.model_dir = parameters['model_dir'] self.start_step = int(parameters['start_step']) self.report_step = int(parameters['report_step']) self.num_gpus = int(parameters['num_gpus']) self.label_context = int(parameters['label_context']) self.batch_size = self.batch_size * \ (self.num_gpus if self.num_gpus > 0 else 1) self.punc_regex = parameters['punc_regex'] self.network = parameters['network'] self.train_input = None self.test_input = None self.mfcc_input = None self.mfcc_output = None self.sym_file = None if 'sym_file' in parameters: self.sym_file = parameters['sym_file'] if isTraining: self.symbols = Symbols(self.label_context, self.sym_file) else: self.symbols = Symbols(self.label_context) parameters = self.cfg['Train'] if 'input' in parameters: self.train_input = parameters['input'] parameters = self.cfg['MFCC Featurizer'] if 'input' in parameters: self.mfcc_input = parameters['input'] if 'output' in parameters: self.mfcc_output = parameters['output'] self.start_marker = self.end_marker = None if 'start_marker' in parameters: self.start_marker = parameters['start_marker'] if 'end_marker' in parameters: self.end_marker = parameters['end_marker'] parameters = self.cfg['Test'] if 'input' in parameters: self.test_input = parameters['input'] elif not self.train_input: raise ValueError("Missing 'test_input' in configuration file: " + configfile)
class Config(object): def __init__(self, configfile, isTraining=False): self.isTraining = isTraining self.configfile = configfile logger.info('Reading configuration from: ' + configfile) self.cfg = ConfigParser(interpolation=ExtendedInterpolation()) self.cfg.read(configfile) parameters = self.cfg['Parameters'] self.samplerate = int(parameters['samplerate']) self.numcep = int(parameters['numcep']) self.numcontext = int( parameters['numcontext']) if 'numcontext' in parameters else 0 self.rand_shift = int( parameters['rand_shift']) if 'rand_shift' in parameters else 0 self.feature_size = (2 * self.numcontext + 1) * self.numcep self.batch_size = int(parameters['batch_size']) self.epochs = int(parameters['epochs']) self.learningrate = float(parameters['learningrate']) self.model_dir = parameters['model_dir'] self.start_step = int(parameters['start_step']) self.report_step = int(parameters['report_step']) self.num_gpus = int(parameters['num_gpus']) self.label_context = int(parameters['label_context']) self.batch_size = self.batch_size * \ (self.num_gpus if self.num_gpus > 0 else 1) self.punc_regex = parameters['punc_regex'] self.network = parameters['network'] self.train_input = None self.test_input = None self.mfcc_input = None self.mfcc_output = None self.sym_file = None if 'sym_file' in parameters: self.sym_file = parameters['sym_file'] if isTraining: self.symbols = Symbols(self.label_context, self.sym_file) else: self.symbols = Symbols(self.label_context) parameters = self.cfg['Train'] if 'input' in parameters: self.train_input = parameters['input'] parameters = self.cfg['MFCC Featurizer'] if 'input' in parameters: self.mfcc_input = parameters['input'] if 'output' in parameters: self.mfcc_output = parameters['output'] self.start_marker = self.end_marker = None if 'start_marker' in parameters: self.start_marker = parameters['start_marker'] if 'end_marker' in parameters: self.end_marker = parameters['end_marker'] parameters = self.cfg['Test'] if 'input' in parameters: self.test_input = parameters['input'] elif not self.train_input: raise ValueError("Missing 'test_input' in configuration file: " + configfile) def load_network(self, fortraining=False): package = self.network.split('.') classname = package[-1] module = importlib.import_module('.'.join(package[:-1])) return getattr(module, classname)(self, fortraining=fortraining) def print_config(self): config_str = '\n' config_str += ('samplerate=%d\n' % self.samplerate) config_str += ('numcep=%d\n' % self.numcep) config_str += ('numcontext=%d\n' % self.numcontext) config_str += ('rand_shift=%d\n' % self.rand_shift) config_str += ('batch_size=%d\n' % self.batch_size) config_str += ('epochs=%d\n' % self.epochs) config_str += ('learningrate=%f\n' % self.learningrate) config_str += ('model_dir=%s\n' % self.model_dir) config_str += ('start_step=%d\n' % self.start_step) config_str += ('report_step=%d\n' % self.report_step) config_str += ('num_gpus=%d\n' % self.num_gpus) config_str += ('label_context=%d\n' % self.label_context) config_str += ('punc_regex=%s\n' % self.punc_regex) config_str += ('network=%s\n' % self.network) config_str += ('sym_file=%s\n' % self.sym_file) config_str += ('train_input=%s\n' % self.train_input) config_str += ('test_input=%s\n' % self.test_input) config_str += ('mfcc_input=%s\n' % self.mfcc_input) config_str += ('mfcc_output=%s\n' % self.mfcc_output) config_str += ('start_marker=%s\n' % self.start_marker) config_str += ('end_marker=%s\n' % self.end_marker) logger.info(config_str) def write_symbols(self): self.symbols.write(self.sym_file) def write(self, filename): logger.info('Writing configuration to: ' + filename) with open(filename, 'w') as configfile: self.cfg.write(configfile)
def get_marketdata(symbol, from_date, to_date): ''' Retrieve market data for specific symbol ''' return Symbols().select_historical_prices(symbol, from_date, to_date)
pygame.display.flip() elif symbol.type == "O": pygame.draw.rect(background, RED, [50 + offx, 50 + offy, 67, 67]) screen.blit(background, (0, 0)) pygame.display.flip() screen = pygame.display.set_mode((300, 300)) # creates the screen background = pygame.Surface((300, 300)) # creates the background image background = background.convert() board = pygame.Surface((300, 300)) pygame.draw.rect(background, WHITE, [50, 50, 201, 201]) draw_lines() screen.blit(background, (0, 0)) # blits background onto screen pygame.display.flip() # updates display symbols = Symbols() symbol = Symbol() while not end: for event in pygame.event.get(): # deteremines which key is pressed if event.type == pygame.QUIT: # if you click the quit :( end = True # breaks the while loop if event.type == pygame.MOUSEBUTTONDOWN: mouse_pos = pygame.mouse.get_pos() mx = mouse_pos[0] my = mouse_pos[1] if 50 <= mx <= 251 and 50 <= my <= 251: grid_pos = (get_pos(mx), get_pos(my)) if occupied_check(symbols, grid_pos): symbol.add(grid_pos)
def install(): Symbols.set("#t", True) Symbols.set("#f", False) Symbols.set("apply", Native("apply", Native.apply, 2)) Symbols.set("car", Native("car", Native.car, 1)) Symbols.set("cdr", Native("cdr", Native.cdr, 1)) Symbols.set("cons", Native("cons", Native.cons, 2)) Symbols.set("define", Native("define", Native.define, 2)) Symbols.set("display", Native("display", Native.display, 1)) Symbols.set("equal?", Native("equal?", Native.equal, 2)) Symbols.set("if", Native("if", Native._if, 3)) Symbols.set("lambda", Native("lambda", Native._lambda, 2)) Symbols.set("length", Native("length", Native.length, 1)) Symbols.set("list", Native("list", Native.list, -1)) Symbols.set("list?", Native("list?", Native.islist, 1)) Symbols.set("load", Native("load", Native.load, 1)) Symbols.set("null?", Native("null?", Native.isnull, 1)) #Symbols.set("macro", Native("macro", Native.macro, 2) Symbols.set("quote", Native("quote", Native.quote, -1)) Symbols.set("+", Native("+", None, -1)) Symbols.set("-", Native("-", None, -1)) Symbols.set("*", Native("*", None, -1)) Symbols.set("/", Native("/", None, -1)) Symbols.set(">", Native(">", None, 2)) Symbols.set("<", Native("<", None, 2)) Symbols.set(">=", Native(">=", None, 2)) Symbols.set("<=", Native("<=", None, 2)) Symbols.set("=", Native("=", None, 2))
class GVTasks: offset_list = 0 offset_list_next = 0 offset_name = 0 offset_pid = 0 offset_uid = 0 # 4 differents ways to get tasks tasks_mem = Tasks() tasks_ps = Tasks() tasks_proc = Tasks() tasks_procforce = Tasks() tasks_kill = Tasks() tasks_check = Tasks() def __init__(self, mmemory, typeaccess=0) : if not isinstance(mmemory, Memory): raise TypeError("ERREUR") self.mmemory = mmemory self.symbols = Symbols(self.mmemory) self.typeaccess = typeaccess def getOffsetPid(self, data) : """ Find the offset of pid in the task_struct struct """ i = 0 find1 = find2 = -1 offset = 0 while((i < len(data)) and (offset == 0)) : if(find1 == 1): find2 = find1 find1 = unpack("<L", data[i:i+4])[0] if(find1 == find2) : offset = i i = i + 4 offset = offset + self.offset_list return offset def getOffsetUid(self, data) : """ Find the offset of uid in the task_struct struct """ offset = data.find("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") offset2 = data.rfind("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") if(offset2 < 400) : offset = offset2 - 4 offset = offset + self.offset_pid return offset def getOffsetListNext(self, data) : """ Find the offset of next task in the list_head struct """ i = 0 first_addr = second_addr = find1 = find2 = 0 offset = -1 while((i < len(data)) and (offset == -1)) : second_addr = first_addr first_addr = unpack("<L", data[i:i+4])[0] # print first_addr if(first_addr == second_addr and first_addr > 0xc0000000 and i > 0x20): find2 = find1 find1 = i if((find1-find2) == 16): offset = find2+20 i = i + 4 return offset def getOffsetList(self, data) : """ Find the offset of tasks in the task_struct struct """ i = 0 l = [] count = 0 prec = "" find = -1 offset = 0 for i in range(0, len(data), 4): var = "%x" % unpack("<L", data[i:i+4])[0] l.append(var) for j in l : # print j if(prec == j) : count = count + 1 # print "COUNT" if(find != 0 and count == 2): offset = l.index(j) offset = offset - 4 find = 0 ## print j if(j != "0") : prec = j #print "TROUVE %d %s" % (offset, l.pop(offset)) return offset def getTasksMemory(self) : """ Get tasks list by /dev/kmem or /dev/mem """ current_addr = list_addr = 0 name = "" self.mmemory.open("r", self.typeaccess) init_task = self.symbols.find_symbol("init_task") data = self.mmemory.read(init_task, 1000) self.offset_name = data.find("swapper") data = self.mmemory.read(init_task+40, 100) self.offset_list = self.getOffsetList(data) self.offset_list = self.offset_list * 4 + 40 #print "OFFSET NAME %d" % self.offset_name #print "OFFSET LIST TASKS %d" % self.offset_list addr = unpack("<L", self.mmemory.read(init_task+self.offset_list, 4, 0))[0] data = self.mmemory.read(addr, 256, 0) self.offset_list_next = self.getOffsetListNext(data) #print "OFFSET LIST NEXT %d" % self.offset_list_next current_addr = unpack("<L", self.mmemory.read(addr + self.offset_list_next, 4, 0))[0] data = self.mmemory.read(current_addr+self.offset_list, 100, 0) self.offset_pid = self.getOffsetPid(data) #print "OFFSET PID %d" % self.offset_pid data = self.mmemory.read(current_addr+self.offset_pid, 500, 0) self.offset_uid = self.getOffsetUid(data) #print "OFFSET UID %d" % self.offset_uid while(name != "swapper") : name = self.mmemory.read(current_addr + self.offset_name, 16, 0) name = name[:string.find(name, '\0')] pid = unpack("<L", self.mmemory.read(current_addr + self.offset_pid, 4, 0))[0] uid = unpack("<L", self.mmemory.read(current_addr + self.offset_uid, 4, 0))[0] gid = unpack("<L", self.mmemory.read(current_addr + self.offset_uid+16, 4, 0))[0] self.tasks_mem.map_tasks[int(pid)] = [int(uid), int(gid), name, current_addr, 0] self.tasks_mem.list_tasks.append(int(pid)) list_addr = unpack("<L", self.mmemory.read(current_addr + self.offset_list, 4, 0))[0] current_addr = unpack("<L", self.mmemory.read(list_addr + self.offset_list_next, 4, 0))[0] self.tasks_mem.map_tasks.pop(0) self.tasks_mem.list_tasks.pop() self.mmemory.close() def getTasksPs(self) : """ Get tasks list by /bin/ps """ i, o = os.popen2('/bin/ps -eo user,pid,ruid,rgid,state,comm') j = o.readline() j = o.readline() while(j != ""): l = j.split() self.tasks_ps.map_tasks[int(l[1])] = [int(l[2]), int(l[3]), l[5], 0, 0] self.tasks_ps.list_tasks.append(int(l[1])) j = o.readline() o.close() i.close() def openProcTaskStatus(self, file) : fd = 0 try : fd = open(file, "r") except IOError : return [] l = fd.readlines() name = l[0].split() pid = l[4].split() uid = l[7].split() gid = l[8].split() fd.close() return [pid[1], uid[1], gid[1], name[1]] def getTasksProc(self) : """ Get tasks list by /proc """ for rep in os.walk("/proc"): if(rep[0][6:].isdigit()): l = self.openProcTaskStatus(rep[0] + "/status") self.tasks_proc.map_tasks[int(l[0])] = [int(l[1]), int(l[2]), l[3], 0, 0] self.tasks_proc.list_tasks.append(int(l[0])) if(os.access(rep[0] + "/task", os.F_OK) == True): for srep in os.listdir(rep[0] + "/task"): if(srep != l[0]): ll = self.openProcTaskStatus(rep[0] + "/task/" + srep + "/status") self.tasks_proc.map_tasks[int(ll[0])] = [int(ll[1]), int(ll[2]), ll[3], 0, 1] self.tasks_proc.list_tasks.append(int(ll[0])) def getTasksProcForce(self) : """ Get tasks list by bruteforcing /proc """ for i in range(1, 65535, 1) : if(os.access("/proc/" + str(i) + "/status", os.F_OK) == True): l = self.openProcTaskStatus("/proc/" + str(i) + "/status") if(l != []): self.tasks_procforce.map_tasks[int(l[0])] = [int(l[1]), int(l[2]), l[3], 0, 0] self.tasks_procforce.list_tasks.append(int(l[0])) if(os.access("/proc/" + str(i) + "/task", os.F_OK) == True): for srep in os.listdir("/proc/" + str(i) + "/task"): if(srep != l[0]): ll = self.openProcTaskStatus("/proc/" + str(i) + "/task/" + srep + "/status") self.tasks_procforce.map_tasks[int(ll[0])] = [int(ll[1]), int(ll[2]), ll[3], 0, 1] self.tasks_procforce.list_tasks.append(int(ll[0])) def getTasksKill(self) : """ Get tasks list by kill """ for i in range(1, 65535) : try : os.kill(i, 0) self.tasks_kill.map_tasks[i] = [0, 0, None, 0, 0] self.tasks_kill.list_tasks.append(i) except OSError : None def _simpleViewTasks(self, tasks) : print "PID\t UID\t GID\t NAME\t\t\t ADDR" for i in tasks.list_tasks: print "%d\t %d\t %d\t %-16s\t @ 0x%x" % (i, tasks.map_tasks[i][0], tasks.map_tasks[i][1], tasks.map_tasks[i][2], tasks.map_tasks[i][3]) def viewTasks(self) : self.getTasksMemory() self._simpleViewTasks(self.tasks_mem) def _checkTasks(self, ref, cmp, check) : for i in ref.map_tasks : if(cmp.map_tasks.has_key(i) == False and ref.map_tasks[i][4] == 0): if(self.tasks_check.map_tasks.has_key(i) == False): check.map_tasks[i] = ref.map_tasks.get(i) check.list_tasks.append(i) def checkViewTasks(self) : self.getTasksPs() self.getTasksProc() self.getTasksProcForce() self.getTasksMemory() self.getTasksKill() self._checkTasks(self.tasks_proc, self.tasks_ps, self.tasks_check) self._checkTasks(self.tasks_procforce, self.tasks_proc, self.tasks_check) self._checkTasks(self.tasks_mem, self.tasks_procforce, self.tasks_check) self._checkTasks(self.tasks_kill, self.tasks_proc, self.tasks_check) if(self.tasks_check.list_tasks != []) : print "LISTS OF TASKS HIDE" self._simpleViewTasks(self.tasks_check) else : print "NO TASKS HIDE" print "YOUR SYSTEM SEEMS BE SAFE !"
class TestMarketDataDb(unittest.TestCase): def setUp(self): self.symbols = Symbols() conn = pymongo.MongoClient() db = conn.test_marketdata symbols = db.test_symbols self.symbols._symbols = symbols # replaces real db with test one self.symbols.clean() self.symbols.add(['AAPL']) def test_single_hist_price(self): dt = datetime(2013, 7, 13) self.symbols.insert_historical_prices( 'AAPL', [(dt, 100.0, 101.0, 99.0, 100.0, 100, 100.0)]) res = self.symbols.select_historical_prices('AAPL', dt, dt) self.assertEqual(1, len(res)) self.assertEqual(dt, res[0]['date']) self.assertEqual(100.0, res[0]['open']) self.assertEqual(101.0, res[0]['high']) self.assertEqual(99.0, res[0]['low']) self.assertEqual(100.0, res[0]['close']) self.assertEqual(100, res[0]['volume']) self.assertEqual(100.0, res[0]['adj_close']) def test_hist_price_for_undefined_date(self): dt = datetime(2013, 7, 13) res = self.symbols.select_historical_prices('AAPL', dt, dt) self.assertEqual([], res) def test_three_hist_price(self): d1 = datetime(2013, 7, 13) d2 = datetime(2013, 7, 14) d3 = datetime(2013, 7, 15) self.symbols.insert_historical_prices( 'AAPL', [(d1, 100.0, 101.0, 99.0, 100.0, 100, 100.0)]) self.symbols.insert_historical_prices( 'AAPL', [(d3, 99.0, 100.0, 98.0, 99.0, 99, 99.0)]) self.symbols.insert_historical_prices( 'AAPL', [(d2, 101.0, 102.0, 100.0, 101.0, 101, 101.0)]) res = self.symbols.select_historical_prices('AAPL', d1, d3) self.assertEqual(3, len(res)) self.assertEqual(d1, res[0]['date']) self.assertEqual(100.0, res[0]['open']) self.assertEqual(d2, res[1]['date']) self.assertEqual(101.0, res[1]['open']) self.assertEqual(d3, res[2]['date']) self.assertEqual(99.0, res[2]['open']) def test_date_filtering(self): d1 = datetime(2013, 7, 13) d2 = datetime(2013, 7, 14) d3 = datetime(2013, 7, 15) d4 = datetime(2013, 7, 16) self.symbols.insert_historical_prices( 'AAPL', [(d1, 100.0, 101.0, 99.0, 100.0, 100, 100.0)]) self.symbols.insert_historical_prices( 'AAPL', [(d2, 101.0, 102.0, 100.0, 101.0, 101, 101.0)]) self.symbols.insert_historical_prices( 'AAPL', [(d3, 99.0, 100.0, 98.0, 99.0, 99, 99.0)]) self.symbols.insert_historical_prices( 'AAPL', [(d4, 98.0, 99.0, 97.0, 98.0, 98, 98.0)]) res = self.symbols.select_historical_prices('AAPL', d2, d3) self.assertEqual(2, len(res)) self.assertEqual(d2, res[0]['date']) self.assertEqual(101.0, res[0]['open']) self.assertEqual(d3, res[1]['date']) self.assertEqual(99.0, res[1]['open']) def test_reinsert_historical_prices(self): d1 = datetime(2013, 7, 13) d2 = datetime(2013, 7, 14) d3 = datetime(2013, 7, 15) self.symbols.insert_historical_prices( 'AAPL', [(d1, 100.0, 101.0, 99.0, 100.0, 100, 100.0), (d2, 101.0, 102.0, 100.0, 101.0, 101, 101.0), (d3, 99.0, 100.0, 98.0, 99.0, 99, 99.0)]) res = self.symbols.select_historical_prices('AAPL', d1, d3) self.assertEqual(3, len(res)) self.symbols.insert_historical_prices( 'AAPL', [(d2, 101.0, 102.0, 100.0, 101.0, 101, 101.0)]) res = self.symbols.select_historical_prices('AAPL', d1, d3) self.assertEqual(3, len(res)) self.assertEqual(d1, res[0]['date']) self.assertEqual(100.0, res[0]['open']) self.assertEqual(d2, res[1]['date']) self.assertEqual(101.0, res[1]['open']) self.assertEqual(d3, res[2]['date']) self.assertEqual(99.0, res[2]['open']) def test_last_date(self): d = [ datetime(2013, 7, 16), datetime(2013, 7, 14), datetime(2013, 7, 13), datetime(2013, 7, 17), datetime(2013, 7, 12), datetime(2013, 7, 15) ] for x in d: self.symbols.insert_historical_prices( 'AAPL', [(x, 1.0, 1.0, 1.0, 1.0, 1, 1.0)]) res = self.symbols.last_date('AAPL') self.assertEquals(datetime(2013, 7, 17), res) def test_last_date_when_no_data(self): res = self.symbols.last_date('AAPL') self.assertEquals(None, res)
def mirror_full_stack_symbols(self): # TODO: Only mirror the first and last columns? if len(set(self.window[0]))==1 and self.window[0][0]==Symbols.index("Dog"): self.window[-1] = self.window[0] elif len(set(self.window[-1]))==1 and self.window[-1][0]==Symbols.index("Dog"): self.window[0] = self.window[-1]
class TestSymbols(unittest.TestCase): def setUp(self): self.symbols = Symbols() conn = pymongo.MongoClient() db = conn.test_marketdata symbols = db.test_symbols self.symbols._symbols = symbols # replaces real db with test one self.symbols.clean() def test_clean(self): self.symbols.add(['AAPL']) self.symbols.clean() act = self.symbols.symbols() self.assertEquals(0, len(act)) def test_add(self): exp = ['AAPL'] self.symbols.add(exp) self.assertListEqual(exp, self.symbols.symbols()) def test_remove(self): exp = ['AAPL'] added = ['MSFT'] self.symbols.add(exp + added) self.symbols.remove(added) self.assertListEqual(exp, self.symbols.symbols())
class Protocall: def __init__(self, symbols=None, tracing=False): if symbols is not None: self.symbols = symbols else: self.symbols = Symbols() self.subrs = dict([(name, getattr(subrs, name)) for name in dir(subrs) if not name.startswith("_")]) self.builtins = dict([(name, getattr(builtins, name)) for name in dir(builtins) if not name.startswith("_")]) self.udfs = {} self.tracing = tracing def enable_tracing(self): self.tracing = True def disable_tracing(self): self.tracing = False def execute(self, block): for statement in block.statement: print "statement:", statement if self.tracing: print "hit ENTER for statement:" print text_format.MessageToString(statement, as_one_line=True) print "with local variables:", print self.symbols.locals() line = sys.stdin.readline().strip() try: if statement.HasField("assignment"): result = self.assignment(statement) elif statement.HasField("array_assignment"): result = self.array_assignment(statement) elif statement.HasField("call"): result = self.invoke(statement.call) elif statement.HasField("conditional"): e_result = self.evaluate(statement.conditional.if_scope.expression) if is_true(e_result): result = self.execute(statement.conditional.if_scope.scope.block) else: for expression_scope in statement.conditional.elif_scope: e_result = self.evaluate(expression_scope.expression) if is_true(e_result): result = self.execute(expression_scope.scope.block) break else: if len(statement.conditional.else_scope.block.statement): result = self.execute(statement.conditional.else_scope.block) elif statement.HasField("return_"): e_result = self.evaluate(statement.return_.expression) if isinstance(e_result, protocall_pb2.Expression): result = e_result elif isinstance(e_result, protocall_pb2.Atom): result = protocall_pb2.Expression() result.atom.CopyFrom(e_result) elif isinstance(e_result, protocall_pb2.Array): result = protocall_pb2.Expression() result.atom.CopyFrom(e_result) elif isinstance(e_result, int): result = protocall_pb2.Expression() result.atom.literal.integer.value = e_result elif isinstance(e_result, str): result = protocall_pb2.Expression() result.atom.literal.string.value = e_result else: print e_result.__class__ raise RuntimeError ## Should call return here elif statement.HasField("while_"): while True: e_result = self.evaluate(statement.while_.expression_scope.expression) if is_true(e_result): self.execute(statement.while_.expression_scope.scope.block) else: break result = None elif statement.HasField("define"): ## Only support definition of top-level fields identifier = statement.define.field.component[0].name self.udfs[identifier] = statement.define.scope.block result = None else: raise RuntimeError(str(statement)) except Exception as e: print "Execution failed at line:" print text_format.MessageToString(statement, as_one_line=True) import pdb; pdb.set_trace() print "foo" return result def handle_atom(self, atom): if atom.HasField("literal"): result = atom elif atom.HasField("expression"): result = self.evaluate(atom.expression) elif atom.HasField("field"): result = self.symbols.lookup_local(atom.field) elif atom.HasField("array_ref"): array = self.symbols.lookup_local(atom.array_ref.field) result = self.evaluate(array.element[atom.array_ref.index.value]) else: raise RuntimeError return result def evaluate(self, expression): result = None assert isinstance(expression, protocall_pb2.Expression), type(expression) if expression.HasField("atom"): if expression.atom.literal.HasField("array"): for element in expression.atom.literal.array.element: e = protocall_pb2.Expression() e.atom.CopyFrom(self.evaluate(element)) element.CopyFrom(e) result = expression.atom else: result = self.handle_atom(expression.atom) elif expression.HasField("call"): result = self.invoke(expression.call) elif expression.HasField("arithmetic_operator"): print "evaluate arithmetic operator" print "left before=", expression.arithmetic_operator.left print "right before=", expression.arithmetic_operator.right left = self.evaluate(expression.arithmetic_operator.left) right = self.evaluate(expression.arithmetic_operator.right) print "left=", left print "right=", right r = arithmetic_operators[expression.arithmetic_operator.operator](left, right) result = protocall_pb2.Atom() result.literal.CopyFrom(r) elif expression.HasField("comparison_operator"): left = self.evaluate(expression.comparison_operator.left) right = self.evaluate(expression.comparison_operator.right) r = comparison_operators[expression.comparison_operator.operator](left, right) result = protocall_pb2.Atom() result.literal.CopyFrom(r) else: raise RuntimeError import pdb; pdb.set_trace() print "expression:", expression return result def invoke(self, call): # For now, only support fields with a single component assert (len(call.field.component) == 1) name = call.field.component[0].name if name in self.subrs: function = self.subrs[name] result = function(self, call.argument, self.symbols) elif name in self.builtins or name in self.udfs: if name in self.builtins: function = self.builtins[name] elif name in self.udfs: function = self.udfs[name] else: raise RuntimeError args = [ (arg.identifier.name, self.evaluate(arg.expression)) for arg in call.argument ] self.symbols.push_frame() for arg in args: f = protocall_pb2.Field() f.component.add().name = arg[0] self.symbols.add_local_symbol(f, arg[1]) if type(function) == types.FunctionType: result = function(args, self.symbols) elif isinstance(function, protocall_pb2.Block): result = self.execute(function) self.symbols.pop_frame() else: raise KeyError, name return result def assignment(self, a): print "assignment:", a e = self.evaluate(a.assignment.expression) if isinstance(e, protocall_pb2.Expression): e = e.atom if isinstance(e, protocall_pb2.Atom): if e.HasField("literal"): if e.literal.HasField("integer"): v = e.literal.integer.value elif e.literal.HasField("string"): v = e.literal.string.value elif e.literal.HasField("array"): v = e.literal.array elif e.literal.HasField("proto"): v = e else: raise RuntimeError else: raise RuntimeError else: raise RuntimeError self.symbols.add_local_symbol(a.assignment.field, v) return v def array_assignment(self, a): e = self.evaluate(a.array_assignment.expression) n = self.symbols.lookup(a.array_assignment.array_ref.field) x = a.array_assignment.array_ref.index.value n.element[a.array_assignment.array_ref.index.value].atom.CopyFrom(e) self.symbols.add_local_symbol(a.array_assignment.array_ref.field, e) return e
def main(argv): output = None start_recording = 0 path = "~/lttng-traces/kernelspace-tracing-20161207-135859" try: path = argv[0] except: if not path: raise TypeError(HELP) try: opts, args = getopt.getopt(argv[1:], "hs:p:") except getopt.GetoptError: raise TypeError(HELP) for opt, arg in opts: if opt == '-h': raise TypeError(HELP) elif opt == '-o': output = arg if not output: output = "./traces/traces.json" # Create TraceCollection and add trace: program_symbols = Symbols("./program-symbols.txt") kernel_symbols = Symbols("./symbols.txt") traces = babeltrace.reader.TraceCollection() trace_handle = traces.add_traces_recursive(path, "ctf") if trace_handle is None: raise IOError("Error adding trace") trace_events = [] statistics = "function_name,guest_duration,host_duration,overhead,depth\n" print("--- Converting traces ---") for event in traces.events: if event.name != "kvm_x86_hypercall": continue fields = dict() for k, v in event.items(): field_type = event._field(k).type fields[k] = format_value(field_type, v) nr = fields['nr'] is_sched_switch = (nr == SCHED_SWITCH_HYPERCALL_NR) if is_sched_switch: # trace_events.append({ # 'pid': 0, # 'tid': 0, # 'name': 'sched_switch', # 'ph': 'i', # 'ts': ns_to_us(event.timestamp), # 's': 'g', # 'args': { # 'cpu': fields['a1'], # 'success': fields['a0'] # }, # }) nr else: is_kernelspace = (nr == KERNELSPACE_HYPERCALL_NR) is_userspace = (nr == USERSPACE_HYPERCALL_NR) function_address = fields['a0'] function_name = function_address if is_kernelspace: function_name = kernel_symbols.get_symbol_name(function_address) if is_userspace: function_name = program_symbols.get_symbol_name(function_address) if function_name == "main": start_recording = True if not start_recording: continue is_entry = fields['a1'] == 0 if is_entry: # add entry event until we find the exit event if function_address not in function_entry_map: function_entry_map[function_address] = [] depth = fields["a3"] if is_kernelspace: call = {'start': event.timestamp, 'end': event.timestamp + 10, 'name': function_name} if has_conflicts(call, depth): continue function_entry_map[function_address].append({ 'pid': 0, 'tid': nr, 'name': function_address, 'ph': 'X', 'dur': 0, 'ts': event.timestamp, 'args': { # 'cpu_id_entry': fields['cpu_id'], 'cpu': fields['a2'], 'depth': depth }, }) # Handle exit elif function_address in function_entry_map: if len(function_entry_map[function_address]) == 0: print("not found", function_address, function_name) continue event_json = function_entry_map[function_address].pop() guest_duration = fields['a2'] depth = fields['a3'] host_duration = event.timestamp - event_json['ts'] overhead = round((1 - (guest_duration / host_duration)) * 100, 2) # if is_kernelspace: # call = {'start': event_json['ts'], 'end': event.timestamp, 'name': function_name} # if has_conflicts(call, depth): # continue # add_level_call(call, depth) event_json['name'] = function_name event_json['ts'] = ns_to_us(event_json['ts']) event_json['dur'] = ns_to_us(host_duration) event_json['tdur'] = ns_to_us(guest_duration) event_json['args']['depth'] = depth event_json['args']['overhead'] = overhead event_json['args']['guest_duration_ns'] = guest_duration # event_json['args']['cpu_id_exit'] = fields['cpu_id'] event_json['args']['host_duration_ns'] = host_duration trace_events.append(event_json) statistics += '"{}",{},{},{}\n'.format(function_name, guest_duration, host_duration, overhead, depth) if function_name == "main": start_recording = False print("--- Done ---") add_metadata(trace_events, "thread_name", 0, KERNELSPACE_HYPERCALL_NR, {'name': "Kernelspace"}) add_metadata(trace_events, "thread_name", 0, USERSPACE_HYPERCALL_NR, {'name': "Userspace"}) add_metadata(trace_events, "process_name", 0, USERSPACE_HYPERCALL_NR, {'name': "VM0"}) add_metadata(trace_events, "process_labels", 0, USERSPACE_HYPERCALL_NR, {'labels': "Ubuntu 16.04"}) add_metadata(trace_events, "thread_sort_index", 0, KERNELSPACE_HYPERCALL_NR, {'sort_index': -5}) add_metadata(trace_events, "thread_sort_index", 0, USERSPACE_HYPERCALL_NR, {'sort_index': -10}) content = json.dumps({ "traceEvents": trace_events, "displayTimeUnit": "ns" }) with open(output, "w") as f: f.write(content) # detect_conflicts(level_calls) statistics_output = "./rscript/statistics_"+os.path.splitext(os.path.basename(output))[0]+".csv" with open(statistics_output, "w") as file: file.write(statistics)