def findfavoritesbeforeht(): livefhgames = allgamesdf[(allgamesdf['progress'] == 'live')] print('\n{} live FH games found'.format(len(livefhgames))) msg_title = '# Favorites fell behind #\n' for index, row in livefhgames.iterrows(): summary = 'Reached {}\' in {} - {} at {}-{}'.format(row['timer'], row['team_home'], row['team_away'], row['hgoals'], row['ggoals']) + '\n' + \ 'Check stats at https://777score.com/{}\n'.format(row['777url']) try: s = Stats(row['match_id']).flatjsonfile stats = summarytext(s) except: stats = 'No stats file found' if (row['fav_home'] == 'x' and row['hgoals'] < row['ggoals']) or ( row['fav_away'] == 'x' and row['hgoals'] > row['ggoals']): msg_favbefht = msg_title + summary + '\nodds: {} - {}\n'.format( row['pmodd1'], row['pmodd2']) + stats Telegram().send_message(chat_id=-1001403993640, msg=(row['match_id'], "FHG", msg_favbefht)) msg_title = '# Favorites not ahead before HT #\n' for index, row in livefhgames.iterrows(): summary = 'Reached {}\' in {} - {} at {}-{}'.format(row['timer'], row['team_home'], row['team_away'], row['hgoals'], row['ggoals']) + '\n' + \ 'Check stats at https://777score.com/{}\n'.format(row['777url']) try: s = Stats(row['match_id']).flatjsonfile stats = summarytext(s) except: stats = 'No stats file found' if (30 < int(row['timer']) < 46) and ( (row['fav_home'] == 'x' and row['hgoals'] <= row['ggoals']) or (row['fav_away'] == 'x' and row['hgoals'] >= row['ggoals'])): msg_favnotaheadbfht = msg_title + summary + '\nodds: {} - {}\n'.format( row['pmodd1'], row['pmodd2']) + stats Telegram().send_message(chat_id=-1001403993640, msg=(row['match_id'], "FHG", msg_favnotaheadbfht))
def processFolder(datasetPath, binaryRootPath): """Call your executable for all sequences in all categories.""" stats = Stats(datasetPath) #STATS for category in getDirectories(datasetPath): categoryPath = os.path.join(datasetPath, category) if isValidVideoFolder(categoryPath): stats.addCategories(category) #STATS for video in getDirectories(categoryPath): videoPath = categoryPath if video != "FramesRGB" and video != "FramesT" and video != "groundtruthRgb" and video != "groundtruthT": binaryPath = os.path.join(binaryRootPath, category, video) if isValidResultsFolder(binaryPath): confusionMatrix = compareWithGroungtruth( videoPath, os.path.join(binaryPath, 'MoG')) stats.update(category, video + '/MoG', confusionMatrix) confusionMatrix = compareWithGroungtruth( videoPath, os.path.join(binaryPath, 'SubSENSE')) stats.update(category, video + '/SubSENSE', confusionMatrix) stats.writeCategoryResult(category) stats.writeOverallResults()
def findunderdogsearly(): livefhgames = allgamesdf[(allgamesdf['progress'] == 'live')] print('\n{} live FH games found'.format(len(livefhgames))) msg_title = '# Underdogs slightly better #\n' for index, row in livefhgames.iterrows(): summary = 'Reached {}\' in {} - {} at {}-{}'.format(row['timer'], row['team_home'], row['team_away'], row['hgoals'], row['ggoals']) + '\n' + \ 'Check stats at https://777score.com/{}\n'.format(row['777url']) stats = '' try: s = Stats(row['match_id']) stats = s.flatjsonfile stats_text = str(s) if (row['fav_home'] == 'x' and row['hgoals'] == row['ggoals']) or ( row['fav_away'] == 'x' and row['hgoals'] == row['ggoals']): if (stats['dangerous attacks reldiff'] > 140 and row['fav_away'] == 'x' and int(stats['dangerous attacks home']) > 20) or ( stats['dangerous attacks reldiff'] < 80 and row['fav_home'] == 'x' and int(stats['dangerous attacks away']) > 20): msg_underdog = msg_title + summary + '\nodds: {} - {}\n'.format( row['pmodd1'], row['pmodd2']) + stats_text Telegram().send_message(chat_id=-1001403993640, msg=(row['match_id'], "FHG", msg_underdog)) except: stats_text = 'No stats file found'
def state_save(self): log = logging.getLogger("agent.ping_manager.save") if self.MAX_CYCLE == -1 or self.cycle.countCycle < self.MAX_CYCLE: self.startCycle = time.time() self.cycle.countCycle += 1 # CREATE NEW RAW FILE r_file = None if self.SAVE_RAW_TRACES == True: filename = str(self.hostname) + "_" + str(int( self.startCycle)) + ".rw" r_file = open(os.path.join(self.RAW_DIR, filename), 'w') r_file.write( "#ID\tTARGET_IP\tSEQ_N\tCYCLE\tRTT\tTIME_RECEIVED\tTIME_SENT\n" ) # CREATE STATS REFERENCES self.shared.stats = Stats(self.shared.destList, self.cycle.nDests, self.startCycle, self.cycle.countCycle, self.shared.parameters) #<-------------------------------------------------------------------------> SAVE STATS self.shared.receiver.receive.set_stats(self.shared.stats, r_file) log.info("State 03 finished") return "ping" # start new ping_cycle else: log.info("State 03 finished") return "idle" # stop pinging
def k_means(self, n_clusters, n_data, dataset=None): if dataset is None: _, _, dataset = self.get_dataset(n_data) # Learning kmeans = KMeans(n_clusters=n_clusters).fit(dataset) # Printing for label in range(n_clusters): label_index = [ i for i, x in enumerate(kmeans.labels_) if x == label ] recipes = [self.jr.recipes[index] for index in label_index] ingredients_ranked = Stats(self.jr).get_ingr_rank(recipes) print("\n\nCluster", label, ":") ranking_string = "" for index in range(10): ingredient = ingredients_ranked[index] if ingredient[0] != 'unique': ranking_string += ingredient[1][0] + " (" + str( ingredient[1][1]) + "), " print(ranking_string + '\n') for index_recipe in label_index[:4]: self.jr.id = index_recipe self.jr.read_recipe() return kmeans
def __init__(self, name): self.name = name self.skill_tree = None self.skill_point = 0 self.stats = Stats(self) self.avatar = WarriorAvatar() self.backpack = Backpack(self) self.equipments = Equipments(self)
class Reddit: reddit = praw.Reddit(client_id=os.environ['CLIENT_ID'], client_secret=os.environ['CLIENT_SECRET'], password=os.environ['REDDIT_PASSWORD'], user_agent=os.environ['USER_AGENT'], username=os.environ['REDDIT_USERNAME']) data = MemeData() collect_data = True stats = Stats(reddit) def get_investments(self, comment): cnt = 0 comment.replies.replace_more(limit=100) for reply in comment.replies: if '!invest' in reply.body: cnt += 1 for reply in reply.replies: if reply.author.name == 'MemeInvestor_bot': cnt += self.get_investments(reply) return cnt def scan(self): retour = [] for submission in self.reddit.subreddit('memeeconomy').new(limit=15): time_delta = ( int(datetime.datetime.timestamp(datetime.datetime.today())) - submission.created_utc) / 60 posted_at = datetime.datetime.fromtimestamp( submission.created_utc).strftime('%H:%M:%S') if time_delta > 4: break investments = 0 submission.comments.replace_more(limit=None) for comment in submission.comments: if comment.author.name == 'MemeInvestor_bot': invest_comment = comment investments = self.get_investments(invest_comment) break ratio = investments / time_delta meme = { 'id': str(submission.id), 'title': submission.title, 'updoots': submission.ups, 'investements': investments, 'time': posted_at, 'time_stamp': str(submission.created_utc), 'ratio': str(ratio), 'flair': str(submission.author_flair_text), 'upvotes': None } if self.collect_data and 3 <= time_delta < 4: #self.stats.post_stats(meme) self.data.add(meme) retour.append(meme) return retour
def __init__(self, ID_num, num_docs, doc_front_rate, doc_back_rate, patient_rate, department_size, waiting_size, admit_rate, labs_enabled=True, lab_rate=20, CT_enabled=True, num_CTs=1, CT_rate=15, verbose=True): self.ID_num = ID_num self.erack = queue.PriorityQueue() self.rads_queue = queue.PriorityQueue() self.time = 0 self.num_CTs = num_CTs self.CT_rate = CT_rate self.lab_rate = lab_rate self.num_docs = num_docs self.labs_enabled = labs_enabled self.CT_enabled = CT_enabled self.DoctorList = [] self.CTList = [] self.DispoList = [] self.AdmitList = [] self.patient_rate = patient_rate ## patient rate in terms of new patients per hour self.department_size = department_size self.waiting_size = waiting_size self.WR = queue.PriorityQueue(waiting_size) self.admit_rate = admit_rate ## average time in minutes to admit a patient self.doc_front_rate = doc_front_rate self.doc_back_rate = doc_back_rate self.verbose = verbose ## use debug / status messages for i in range(self.num_docs): self.DoctorList.append( Doctor(self, 1, self.doc_front_rate, self.doc_back_rate, 8)) if self.CT_enabled: for j in range(self.num_CTs): self.CTList.append(CT(self, self.CT_rate)) if self.labs_enabled: self.Laboratory = Laboratory(self, self.lab_rate) self.stats = Stats(num_docs, patient_rate, department_size, waiting_size)
def findfhg(sport=sport, date=today): print('Finding FHG for {} at {}'.format(date, sport)) allgamesdf = pd.read_csv( path + 'datafiles/{}/{}/allgames_{}_{}.csv'.format(date, sport, date, sport)) livegames = allgamesdf[(allgamesdf['islive'] == 'x')] livefhgames = livegames[(livegames['timer'].astype(int) < 46)] print('\n{} live FH games found'.format(len(livefhgames))) latefhgames = livefhgames[(livefhgames['timer'].astype(int) > 30)] fhg_watch = latefhgames[latefhgames['fhg'] == 'x'] if not latefhgames.empty: print('###################################') print('# Late FHG #') print('###################################') msg_list_title = '# FHG from List Alert #\n' msg_late00_title = '# FH still 0-0 Alert #\n' for index, row in latefhgames.iterrows(): summary = 'Reached {}\' in {} - {} at {}-{}'.format(row['timer'],row['team_home'],row['team_away'],row['hgoals'],row['ggoals']) + '\n' + \ 'Check stats at https://777score.com/{}\n'.format(row['777url']) try: s = Stats(row['match_id']).flatjsonfile stats = summarytext(s) except: stats = 'No stats file found' print('{}\' {}-{} {} - {} '.format(row['timer'], row['hgoals'], row['ggoals'], row['team_home'], row['team_away'])) if row['fhg'] == 'x' and row['hgoals'] == '0' and row[ 'ggoals'] == '0': msg_list = msg_list_title + summary + stats #TwitterMsg().senddm(userids=userids_list, msg=(row['match_id'], "FHG_list", msg_list)) Telegram().send_message(chat_id=telegram_chat_id, msg=(row['match_id'], "FHG_list", msg_list)) elif row['hgoals'] == '0' and row['ggoals'] == '0': msg_late00 = msg_late00_title + summary + stats #TwitterMsg().senddm(userids=userids, msg=(row['match_id'], "FHG", msg_late00)) #Telegram().send_message(chat_id=telegram_chat_id, msg=(row['match_id'], "FHG", msg_late00)) else: print("no FHG candidates \n")
def index(season=2015): season = int(season) champ = season - 1 # render current season if (not (champ in availableSeasons)): # render season not available print 'no data for ' + str(season) return redirect(url_for('index')) #data = season parser = HReferenceParser('app/static/data/' + str(season) + '.csv') games = parser.getGames() schedule = Schedule(games) gameLog = GameLog() stats = Stats() beltHolder = availableSeasons[champ] defendingChamp = beltHolder beltGame = None for g in schedule.games: beltGame = stats.analyzeGame(g, beltHolder) if beltGame: gameLog.addGame(beltGame) beltHolder = beltGame.getBeltHolderAfterGame() upcomingChampGame = schedule.getUpcomingChampionshipGame(beltHolder) upcomingChampGameIfHomeTeamWins = None upcomingChampGameIfAwayTeamWins = None if upcomingChampGame: upcomingChampGameIfHomeTeamWins = schedule.getUpcomingChampionshipGame( upcomingChampGame.getHomeTeam(), upcomingChampGame.getAwayTeam()) upcomingChampGameIfAwayTeamWins = schedule.getUpcomingChampionshipGame( upcomingChampGame.getAwayTeam(), upcomingChampGame.getHomeTeam()) data = {'id': beltHolder.getID(), 'name': beltHolder.getName()} return render_template( 'index.html', games=gameLog.getGames(), availableSeasons=availableSeasons, defendingChamp=defendingChamp, beltHolder=beltHolder, isOngoingSeason=season, stats=stats, gameLog=gameLog, upcomingChampGame=upcomingChampGame, upcomingChampGameIfHomeTeamWins=upcomingChampGameIfHomeTeamWins, upcomingChampGameIfAwayTeamWins=upcomingChampGameIfAwayTeamWins, sortedStats=stats.getSortedStats(), currentSeason=season, )
def simulation(): i = self.nop self.text[7] = self.word gms = '' gtd = self.draw() get = Stats(self.ticket(), gtd, self.matcher, self.winnings, self.length, self.differ) while i is not 0: gtt = self.ticket() get.ticket = gtt gtp = get.the_purchase() gtc = get.the_cost(self.cost) gtr = get.the_return() gto = get.the_profit() gte = get.the_percentage() gtn = get.the_number() gtd = get.the_draw() gtt = get.the_ticket() if self.length[1] > 0: gms = get.the_special() gmb = get.the_match() data = [self.name, gtp, gtc, gtr, gto, gte, gtn, gms, gmb, gtd, gtt] to = '\n%s%s\n\n%s%s\n%s%s\n\n%s%s\n%s%s\n%s%s\n\n%s%s\n%s%s\n\n%s%s\n\n%s%s\n%s%s\n\n' console.update(console.window, 0, to % (self.text[0], data[0], self.text[1], data[1], self.text[2], data[2], self.text[3], data[3], self.text[4], data[4], self.text[5], data[5], self.text[6], data[6], self.text[7], data[7], self.text[8], data[8], self.text[9], data[9], self.text[10], data[10])) if gtd == gtt: exit() i -= 1
def printTotalStats(h, out): stats = Stats.Stats() print("Total stats for simulation with h = {0}:".format(h)) cr = stats.clientRequestsTotalAvg() print(" * Average client requests: {0:.1f}".format(cr)) ct = stats.clientTimeWaitedTotalAvg()/1000 print(" * Average client time waited: {0:.1f} ms".format(ct)) rf = stats.reconstructedFilesAvg() print(" * Average reconstructed files: {0:.0f}".format(rf)) lm = stats.lostSwitchMessagesAvg() print(" * Average lost switch messages: {0:.0f}".format(lm)) out.write("{0} {1} {2} {3} {4}\n".format(h, cr, ct, rf, lm)) stats.reset() sys.stdout.flush()
def __init__(self): # self.field = [] self.bots = [] self.mutator = Mutator() self.graphics = BotGraphics() self.generateMap() self.graphics.setStat(0, "ITERATION") self.graphics.setStat(2, "MOVE") self.graphics.setStat(4, "LAST BEST") self.stat = Stats()
def create_group(stocks=["UAL", "JBLU"], weights=[50, 50], group_name="Custom", start="1989-12-31", end="2049-12-31"): g = stk.r[stocks] g = g[start:end] # calculate group return cum_g = (1 + g).cumprod() * weights cum_g.loc[g.index.min() - timedelta(days=1)] = weights cum_g.insert(g.shape[1], "Portfolio", cum_g.sum(axis=1)) cum_g.sort_index(inplace=True) port_r = cum_g["Portfolio"].pct_change() g.insert(g.shape[1], "Portfolio", port_r) return Stats(g, "daily", group_name)
def processFolder(datasetPath, binaryRootPath): """Call your executable for all sequences in all categories.""" stats = Stats(datasetPath) #STATS for category in getDirectories(datasetPath): stats.addCategories(category) #STATS categoryPath = os.path.join(datasetPath, category) for video in getDirectories(categoryPath): videoPath = os.path.join(categoryPath, video) binaryPath = os.path.join(binaryRootPath, category, video) if isValidVideoFolder(videoPath): confusionMatrix = compareWithGroungtruth(videoPath, binaryPath) stats.update(category, video, confusionMatrix) stats.writeCategoryResult(category) stats.writeOverallResults()
def runSimulations(machineCount, frameCount): G.machineCount = machineCount G.requestCount = machineCount/2 G.frameCount = frameCount outputName = "output-mc{0}-fc{1}.txt".format(G.machineCount, G.frameCount) output = open(outputName, "w") for h in range(G.machineCount - G.requestCount): G.extraRequestCount = h print("### Simulating with h = {0}".format(h)) for i in range(G.simCount): runSimulation(i, h) printTotalStats(h, output) output.flush() output.close() stats = Stats.Stats() return stats.usedMemoryTotalAvg()
def __init__(self, root: object, size=50): """Create a simulation with the given field size. :root: tkinter.Tk graphics object """ self.size = size self._sapiens = [] # all sapiens in the simulation self._field = Field(size) self.step = 0 self._view = SimulatorView(root, size) self._colours = { State.SUSCEPTIBLE: 'slate blue', State.INFECTED: 'red', State.RECOVERED: 'spring green', State.DEAD: 'black' } self._stats = Stats() self.reset()
def post(self): user_request_parser = RequestParser(bundle_errors=True) user_request_parser.add_argument("password", required=True) user_request_parser.add_argument("username", required=True) user_request_parser.add_argument("email", required=True) args = user_request_parser.parse_args() users = User.objects(email=args["email"]) if len(users) > 0: return {"error": "That email is taken"} users = User.objects(username=args["username"]) if len(users) > 0: return {"error": "That username is taken"} user = User(email=args["email"], username=args["username"], password=User.set_password(args["password"])) user.save() user = User.return_helper(user) stat = Stats(userId=user['id']) stat.save() user["stats"] = Stats.return_helper(stat) return {"user": user}
def processFolder(datasetPath, binaryRootPath): """Call your executable for all sequences in all categories.""" stats = Stats(datasetPath) #STATS f = open(datasetPath + '\\' + 'fscore.txt', 'w') for category in getDirectories(datasetPath): stats.addCategories(category) #STATS categoryPath = os.path.join(datasetPath, category) for video in getDirectories(categoryPath): videoPath = os.path.join(categoryPath, video) binaryPath = os.path.join(binaryRootPath, category, video) if isValidVideoFolder(videoPath): confusionMatrix = compareWithGroungtruth(videoPath, binaryPath) stats.update(category, video, confusionMatrix) alpha = 0.000001 fscore = (2.0 * confusionMatrix[0])/ (((2.0 * confusionMatrix[0]) + confusionMatrix[1] + confusionMatrix[2]) + alpha) f.write(video + ' : ' + str(fscore) + '\n') else: print ('Invalid folder : ' + videoPath) stats.writeCategoryResult(category) stats.writeOverallResults() f.close()
def __init__(self, bs_params, slice_params, client_params): self.n_clients = 100 self.clients = self.clients_init(self.n_clients, client_params) self.base_stations = self.base_stations_init(bs_params, slice_params) self.x_range = (0, 1000) self.y_range = (0, 1000) self.stats = Stats(self.base_stations, None, (self.x_range, self.y_range)) for client in self.clients: client.stat_collector = self.stats self.action_list = [(0, 0, 0), (0.05, -0.025, -0.025), (-0.05, +0.025, +0.025), (-0.025, +0.05, -0.025), (+0.025, -0.05, +0.025), (-0.025, -0.025, +0.05), (+0.025, +0.025, -0.05)] self.action_space = spaces.Discrete(7) self.state = None high = np.ones(shape=(9, )) low = -high self.observation_space = spaces.Box(low, high, dtype=np.float32) self.steps_beyond_done = None self.user_threshold = 0.7 self.seed()
def initUI(self): self.scoreBox = QGroupBox() self.homeTeam = QGroupBox() self.awayTeam = QGroupBox() self.stats = Stats() self.scoreLayout = QGridLayout() self.homeLayout = QGridLayout() self.awayLayout = QGridLayout() self.createGridLayout() self.scoreBox.setLayout(self.scoreLayout) self.homeTeam.setLayout(self.homeLayout) self.awayTeam.setLayout(self.awayLayout) windowLayout = QVBoxLayout() windowLayout.addWidget(self.scoreBox) windowLayout.addWidget(self.homeTeam) windowLayout.addWidget(self.awayTeam) self.setLayout(windowLayout) self.show()
class Analyzer(object): # global static instance of the analyzer itself #self = Analyzer() allBindings = ArrayList() references = LinkedHashMap() semanticErrors = HashMap() parseErrors = HashMap() cwd = None nCalled = 0 multilineFunType = False path = ArrayList() uncalled = HashSet() callStack = HashSet() importStack = HashSet() astCache = AstCache() cacheDir = str() failedToParse = HashSet() stats = Stats() builtins = None # Builtins() logger = logging.getLogger(__name__) loadingProgress = None projectDir = str() # below doesn't work for some reason.... """ def init_vars(self): self.allBindings = ArrayList() self.references = LinkedHashMap() self.semanticErrors = HashMap() self.parseErrors = HashMap() self.cwd = None self.nCalled = 0 self.multilineFunType = False self.path = ArrayList() self.uncalled = HashSet() self.callStack = HashSet() self.importStack = HashSet() self.astCache = AstCache() self.cacheDir = str() self.failedToParse = HashSet() self.stats = Stats() self.builtins = None # Builtins() self.logger = logging.getLogger(__name__) self.loadingProgress = None self.projectDir = str() """ # singleton pattern _instance = None def __new__(cls, *args, **kwargs): if not cls._instance: cls._instance = super(Analyzer, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self): self.moduleTable = Scope(None, Scope.ScopeType.GLOBAL) self.loadedFiles = ArrayList() self.globaltable = Scope(None, Scope.ScopeType.GLOBAL) import time millis = int(round(time.time() * 1000)) self.stats.putInt("startTime", millis) self.logger = logging.getLogger(__name__) if not hasattr(Analyzer, 'self'): setattr(Analyzer, 'self', self) self.builtins = Builtins() self.builtins.init() #self.addPythonPath() self.createCacheDir() self.getAstCache() # main entry to the analyzer def analyze(self, path): self.projectDir = _.unifyPath(path) self.loadFileRecursive(self.projectDir) def setCWD(self, cd): if cd is not None: self.cwd = cd #if cd is not None: # self.cwd = _.unifyPath(cd) def addPaths(self, p): for s in p: addPath(s) def addPath(self, p): self.path.add(_.unifyPath(p)) def setPath(self, path): self.path = ArrayList(len(path)) self.addPaths(path) def addPythonPath(self): path = System.getenv("PYTHONPATH") if path is not None: for p in segments: self.addPath(p) def getLoadPath(self): loadPath = ArrayList() if self.cwd is not None: loadPath.append(self.cwd) if self.projectDir is not None and os.path.isdir(self.projectDir): loadPath.append(self.projectDir) loadPath += self.path return loadPath def inStack(self, f): return f in self.callStack def pushStack(self, f): self.callStack.add(f) def popStack(self, f): self.callStack.remove(f) def inImportStack(self, f): return f in self.importStack def pushImportStack(self, f): self.importStack.add(f) def popImportStack(self, f): self.importStack.remove(f) def getAllBindings(self): return self.allBindings def getCachedModule(self, file_): t = self.moduleTable.lookupType(_.moduleQname(file_)) if t is None: return None elif t.isUnionType(): for tt in t.asUnionType().getTypes(): if tt.isModuleType(): return tt return None elif t.isModuleType(): return t else: return None def getDiagnosticsForFile(self, file_): errs = self.semanticErrors.get(file_) if errs is not None: return errs return ArrayList() #@overloaded def putRef(self, node, bs): if not hasattr(bs, '__len__'): bs = [bs] if not (isinstance(node, (Url, ))): ref = Ref(node) bindings = self.references.get(ref) if bindings is None: bindings = ArrayList() self.references[ref] = bindings for b in bs: if not b in bindings: bindings.append(b) b.addRef(ref) def getReferences(self): """ generated source for method getReferences """ return self.references def putProblem(self, *args): if len(args) == 2: return self.putProblem0(*args) else: return self.putProblem1(*args) #@overloaded def putProblem0(self, loc, msg): """ generated source for method putProblem """ file_ = loc.getFile() if file_ is not None: self.addFileErr(file_, loc.start, loc.end, msg) # for situations without a Node #@putProblem.register(object, str, int, int, str) def putProblem1(self, file_, begin, end, msg): """ generated source for method putProblem_0 """ if file_ is not None: self.addFileErr(file_, begin, end, msg) def addFileErr(self, file_, begin, end, msg): """ generated source for method addFileErr """ d = Diagnostic(file_, Diagnostic.Category.ERROR, begin, end, msg) self.getFileErrs(file_, self.semanticErrors).append(d) def getParseErrs(self, file_): return self.getFileErrs(file_, self.parseErrors) def getFileErrs(self, file_, _map): msgs = _map.get(file_) if msgs is None: msgs = ArrayList() _map[file_] = msgs return msgs def loadFile(self, path): _.msg("loading: " + path) path = _.unifyPath(path) if not os.path.isfile(path): self.finer("\nfile not not found or cannot be read: " + path) return None module_ = self.getCachedModule(path) if module_ is not None: self.finer("\nusing cached module " + path + " [succeeded]") return module_ # detect circular import if Analyzer.self.inImportStack(path): return None # set new CWD and save the old one on stack oldcwd = self.cwd self.setCWD(os.path.join(*path.split(os.sep)[:-1])) Analyzer.self.pushImportStack(path) mod = self.parseAndResolve(path) # restore old CWD self.setCWD(oldcwd) return mod def isInLoadPath(self, dir): for s in getLoadPath(): if File(s) == dir: return True return False def parseAndResolve(self, file_): self.finer("Analyzing: " + file_) self.loadingProgress.tick() try: ast = self.getAstForFile(file_) if ast is None: self.failedToParse.add(file_) return None else: self.finer("resolving: " + file_) mod = ast.resolve(self.moduleTable) assert isinstance(mod, ModuleType) self.finer("[success]") self.loadedFiles.append(file_) return mod except MemoryError as e: if self.astCache is not None: self.astCache.clear() import gc gc.collect() return None def createCacheDir(self): """ generated source for method createCacheDir """ self.cacheDir = _.makePathString(_.getSystemTempDir(), "pysonar2", "ast_cache") f = self.cacheDir _.msg("AST cache is at: " + self.cacheDir) if not os.path.exists(f): os.makedirs(f) if not os.path.exists(f): _.die("Failed to create tmp directory: " + self.cacheDir + ".Please check permissions") def getAstCache(self): """ generated source for method getAstCache """ if self.astCache is None: self.astCache = AstCache.get() return self.astCache.INSTANCE # # * Returns the syntax tree for {@code file}. <p> # def getAstForFile(self, file_): return self.getAstCache().getAST(file_) def getBuiltinModule(self, qname): return self.builtins.get(qname) def makeQname(self, names): if _.isEmpty(names): return "" ret = "" i = 0 while i < len(names) - 1: ret += names[i].id + "." i += 1 ret += names[len(names) - 1].id return ret # # * Find the path that contains modname. Used to find the starting point of locating a qname. # * # * @param headName first module name segment # def locateModule(self, headName): loadPath = self.getLoadPath() for p in loadPath: startDir = os.sep.join([p, headName]) initFile = _.joinPath(startDir, "__init__.py") if os.path.exists(initFile): return p startFile = startDir + ".py" if os.path.exists(startFile): return p return None def loadModule(self, name, scope): if _.isEmpty(name): return None from Binding import Binding qname = self.makeQname(name) mt = self.getBuiltinModule(qname) if mt is not None: scope.insert( name[0].id, Url(Builtins.LIBRARY_URL + mt.getTable().getPath() + ".html"), mt, Binding.Kind.SCOPE) return mt # If there's more than one segment # load the packages first prev = None startPath = self.locateModule(name[0].id) if startPath is None: return None path = startPath for i, n in enumerate(name): path = os.sep.join([path, name[i].id]) initFile = _.joinPath(path, "__init__.py") if os.path.isfile(initFile): mod = self.loadFile(initFile) if mod is None: return None if prev is not None: prev.getTable().insert(name[i].id, name[i], mod, Binding.Kind.VARIABLE) else: scope.insert(name[i].id, name[i], mod, Binding.Kind.VARIABLE) prev = mod elif i == len(name) - 1: startFile = path + ".py" if os.path.isfile(startFile): mod = self.loadFile(startFile) if mod is None: return None if prev is not None: prev.getTable().insert(name[i].id, name[i], mod, Binding.Kind.VARIABLE) else: scope.insert(name[i].id, name[i], mod, Binding.Kind.VARIABLE) prev = mod else: return None return prev # # * Load all Python source files recursively if the given fullname is a # * directory; otherwise just load a file. Looks at file extension to # * determine whether to load a given file. # def loadFileRecursive(self, fullname): count = self.countFileRecursive(fullname) if self.loadingProgress is None: self.loadingProgress = FancyProgress(count, 50) if os.path.isdir(fullname): for root, dirs, files in os.walk(fullname): for f in files: self.loadFileRecursive(root + os.sep + f) for d in dirs: self.loadFileRecursive(root + os.sep + d) else: if fullname.endswith(".py"): self.loadFile(fullname) # count number of .py files def countFileRecursive(self, fullname): sum = 0 if os.path.isdir(fullname): for root, dirs, files in os.walk(fullname): for f in files: sum += self.countFileRecursive(root + os.sep + f) for d in dirs: sum += self.countFileRecursive(root + os.sep + d) else: if fullname.endswith(".py"): sum += 1 return sum def finish(self): """ generated source for method finish """ # progress.end(); _.msg("\nFinished loading files. " + str(self.nCalled) + " functions were called.") _.msg("Analyzing uncalled functions") self.applyUncalled() # mark unused variables for b in self.allBindings: if not b.getType().isClassType() and not b.getType().isFuncType( ) and not b.getType().isModuleType() and _.isEmpty(b.getRefs()): Analyzer.self.putProblem( b.getNode(), "Unused variable: " + b.__class__.__name__) for ent in self.references.items(): self.convertCallToNew(ent[0], ent[1]) _.msg(self.getAnalysisSummary()) def close(self): """ generated source for method close """ self.astCache.close() def convertCallToNew(self, ref, bindings): """ generated source for method convertCallToNew """ if ref.isRef(): return if len(bindings) == 0: return nb = bindings[0] t = nb.getType() if t.isUnionType(): t = t.asUnionType().firstUseful() if t is None: return if not t.isUnknownType() and not t.isFuncType(): ref.markAsNew() def addUncalled(self, cl): """ generated source for method addUncalled """ if not cl.func.called: self.uncalled.add(cl) def removeUncalled(self, f): if f in self.uncalled: self.uncalled.remove(f) def applyUncalled(self): """ generated source for method applyUncalled """ progress = FancyProgress(len(self.uncalled), 50) while not _.isEmpty(self.uncalled): uncalledDup = list(self.uncalled) for cl in uncalledDup: progress.tick() Call.apply(cl, None, None, None, None, None) def getAnalysisSummary(self): sb = [] sb.append("\n" + _.banner("analysis summary")) duration = _.formatTime(_.millis() - self.stats.getInt("startTime")) sb.append("\n- total time: " + duration) sb.append("\n- modules loaded: " + str(len(self.loadedFiles))) sb.append("\n- semantic problems: " + str(len(self.semanticErrors))) sb.append("\n- failed to parse: " + str(len(self.failedToParse))) # calculate number of defs, refs, xrefs nDef = 0 nXRef = 0 for b in self.getAllBindings(): nDef += 1 nXRef += len(b.getRefs()) sb.append("\n- number of definitions: " + str(nDef)) sb.append("\n- number of cross references: " + str(nXRef)) sb.append("\n- number of references: " + str(len(self.getReferences()))) resolved = self.stats.getInt("resolved") unresolved = self.stats.getInt("unresolved") sb.append("\n- resolved names: " + str(resolved)) sb.append("\n- unresolved names: " + str(unresolved)) sb.append("\n- name resolve rate: " + _.percent(resolved, resolved + unresolved)) sb.append("\n" + _.getGCStats()) return ''.join(sb) def getLoadedFiles(self): files = ArrayList() for file_ in self.loadedFiles: if file_.endswith(".py"): files.append(file_) return files def registerBinding(self, b): self.allBindings.append(b) def log(self, level, msg): _.msg(msg) def severe(self, msg): self.log(Level.SEVERE, msg) def warn(self, msg): self.log(Level.WARNING, msg) def info(self, msg): self.log(Level.INFO, msg) def fine(self, msg): self.log(Level.FINE, msg) def finer(self, msg): self.log('*a log level*', msg) def __str__(self): return "<Analyzer:locs=" + len(self.references) + ":probs=" + len( self.semanticErrors) + ":files=" + len(self.loadedFiles) + ">"
## Main Script env = gym.make('everglades-v0') players = {} names = {} # Inputs for the dqn agent are: # state size, actions, player #, seed players[0] = agent0_class() names[0] = agent0_class.__name__ players[1] = agent1_class(env.num_actions_per_turn, 1) names[1] = agent1_class.__name__ # init stat class stats = Stats() # load model # uncomment if you're starting from the begining players[0].load_model() for game in range(numberOfGames): # get inital state current_state = env.reset( players=players, config_dir = config_dir, map_file = map_file, unit_file = unit_file, output_dir = output_dir, pnames = names,
def diffStats(name1, vals1, name2, vals2): """Compute RMS difference between two Numeric vectors.""" from Stats import Stats label = name2 + ' - ' + name1 diff = vals2 - vals1 return Stats().label(label).addm(diff)
bmk = bmk[bmk_cols] bmk.columns = ["manager", "ewfl", "cwfl", "sp500"] bmk.index.names = ["date"] ewfl = pd.pivot_table(bmk, values="ewfl", index="date", columns="manager") cwfl = pd.pivot_table(bmk, values="cwfl", index="date", columns="manager") sp500 = pd.pivot_table(bmk, values="sp500", index="date", columns="manager") stk_cols = ["Security", "Close"] stk = stk[stk_cols] stk.columns = ["stock", "price"] stk.index.names = ["date"] stk = pd.pivot_table(stk, values="price", index="date", columns="stock") stk = stk.pct_change() # *************************** TRADITIONAL GROUPS ****************************** roe = Stats(roe, "daily", "ROE") ewfl = Stats(ewfl, "daily", "EWFL") cwfl = Stats(cwfl, "daily", "CWFL") sp500 = Stats(sp500, "daily", "SP500") stk = Stats(stk, "daily", "STOCKS") TGP = pd.Series([roe, ewfl, cwfl, sp500, stk]) def fillgaps(data): s = [] data.r.apply(lambda col: s.append(col.loc[col.first_valid_index(): col.last_valid_index()].fillna(0))) data.r = pd.DataFrame(s).transpose() data.r0 = pd.DataFrame(s).transpose() TGP.apply(lambda x: fillgaps(x))
import pandas as pd import numpy as np import db_scripts as dbs from Stats import Stats from bokeh.io import curdoc from bokeh.plotting import figure, output_file, show, ColumnDataSource from bokeh.layouts import row, column, gridplot, widgetbox from bokeh.models import HoverTool from bokeh.models.widgets import Panel, Tabs, Slider test = Stats(pd.DataFrame({}), "daily", "TEST") trials = 1 mu = 0 sigma = 40 mcd = test.gbm(n_scenarios=trials, mu=mu, sigma=sigma) def callback2(attr, old, new): mu = slider2.value sigma = slider3.value new_data = test.gbm(n_scenarios=trials, mu=mu, sigma=sigma) source.data = {"x": new_data.index, "y": new_data.values} slider2 = Slider(title="mu", start=-20, end=20, step=1, value=mu) slider3 = Slider(title="sigma", start=0, end=50, step=1, value=sigma) slider2.on_change("value", callback2) slider3.on_change("value", callback2) source = ColumnDataSource({"x": mcd.index, "y": mcd.values}) plot2 = figure(title="GBM Stock Price Paths", plot_width=1000,
r = choice(arange(t_), size=k_, replace=False) prob_bs[q, r] = 1 / k_ ens[0, q] = EffectiveScenarios(prob_bs[[q], :], typ) # - # ## HFP histogram and statistics # + q_ = prob_bs.shape[0] option = namedtuple('option', 'n_bins') option.n_bins = 10 * log(epsi.shape[1]) p, x = {}, {} for q in range(q_): p[q], x[q] = HistogramFP(epsi, prob_bs[[q], :], option) mu, sdev, VaR, CVaR, skewness, kurtosis = Stats(epsi, prob_bs) # - # ## Figure date_tick = arange(99, t_ - 1, 680) date_dt = array([date_mtop(i) for i in date]) myFmt = mdates.DateFormatter('%d-%b-%Y') # ## q=0 for q in range(2): figure() # FP profile plt.subplot2grid((3, 3), (0, 0), colspan=2)
) try: os.makedirs(self.STATS_DIR) log.info("Directory %s do not exists, created it." % self.STATS_DIR) except OSError, (errno, msg): if errno != 17: log.error("%s" % msg) self.shared.sender.event.set() #killing:exception self.cycle.countCycle += 1 # CREATE STATS REFERENCES self.shared.stats = Stats(self.shared.destList, self.cycle.nDests, self.startCycle, self.cycle.countCycle, self.shared.parameters) self.shared.receiver.receive.set_stats(self.shared.stats, r_file) except Exception, e: log.error("%s " % e) self.shared.sender.event.set() #killing:exception open socket log.info("State 01 finished") return "ping" #------------------------------------------------------------------------ # ++ STATE 02 -> PING #------------------------------------------------------------------------ def state_ping(self):
weights = r_[ens1 * d1, ens2 * d2] weights = weights / npsum(weights) # ## Optimal set of Flex. Probs as log-mixture opt_p = exp(weights[0] * log(p1) + weights[1] * log(p2)) opt_p = opt_p / npsum(opt_p) ens_optp = EffectiveScenarios(opt_p, typ) # ## S&P returns histogram and statistics option = namedtuple('option', 'n_bins') option.n_bins = 10 * log(epsi.shape[1]) p_eps, x_eps = HistogramFP(epsi, opt_p, option) m, Sdev, VaR, CVaR, Sk, K = Stats(epsi, opt_p) # ## Generate the figure # + date_tick = arange(99, len(date), 380) date_dt = array([date_mtop(i) for i in date]) myFmt = mdates.DateFormatter('%d-%b-%y') figure(figsize=(16, 10)) # VIX ax = plt.subplot2grid((2, 5), (0, 0), colspan=2) ph0 = ax.plot(date_dt, p1[0], lw=0.5, color='gray') xticks([]) yticks([]) ax2 = ax.twinx()
pred = torch.round(output.squeeze()) # Rounds the output to 0/1 test_pred_vector.append(pred.item()) correct_tensor = pred.eq(labels.float().view_as(pred)) correct = np.squeeze(correct_tensor.cpu().numpy()) num_correct += np.sum(correct) print("Test loss: {:.3f}".format(np.mean(test_losses))) test_acc = num_correct / len(test_loader.dataset) print("Test accuracy: {:.3f}%".format(test_acc * 100)) test_acc = num_correct / len(test_loader.dataset) print("Test accuracy: {:.3f}%".format(test_acc * 100)) test_labels_vector = np.array(test_labels_vector) test_pred_vector = np.array(test_pred_vector) stats = Stats(test_labels_vector, test_pred_vector) all_stats.append(stats) cf_matrix = stats.confusion_matrix() cf_matrices.append(cf_matrix) accuracy = stats.accuracy() accuracies.append(accuracy) recall = stats.recall() recalls.append(recall) precision = stats.precision() precisions.append(precision) f1 = stats.f_measure()