def get_run_id(self, category, user): """Retrieves a run's ID in the given category name by the given user.""" try: cat_name = self.fuzzy_match_categorylevel(category) leaderboard = self.get_categorylevel_leaderboard(cat_name) runs = leaderboard["runs"] players = leaderboard["players"]["data"] names_by_run = {} for r in runs: idx = runs.index(r) run_data = Run(r["run"]) # run try: user_name = players[idx]["names"]["international"] except KeyError: continue names_by_run[run_data.get_run_id()] = user_name result = process.extractOne(user, names_by_run.values()) log.info( f"Found run by and matched user {user} to {result[0]} with {result[1]}% confidence" ) # thanks speedrun.com return list(names_by_run.keys())[list(names_by_run.values()).index( result[0])] except srcomapi.exceptions.APIRequestException: return None
def config(self): # map our UID and GID to the same UID/GID in the container cmd = ('printf "uid {} 1000\ngid {} 1000" | sudo lxc config set ' '{} raw.idmap -' ''.format(os.getuid(), os.getgid(), self.name)) Run.run(cmd, shell_bool=True) # add the libcgroup root directory (where we did the build) into # the container cmd2 = list() if self.privileged: cmd2.append('sudo') cmd2.append('lxc') cmd2.append('config') cmd2.append('device') cmd2.append('add') cmd2.append(self.name) cmd2.append('libcgsrc') # arbitrary name of device cmd2.append('disk') # to appease gcov, mount the libcgroup source at the same path as we # built it. This can be worked around someday by using # GCOV_PREFIX_STRIP, but that was more difficult to setup than just # doing this initially cmd2.append('source={}'.format(self.libcg_dir)) cmd2.append('path={}'.format(self.libcg_dir)) return Run.run(cmd2)
def delete(config, controller_list, cgname, recursive=False): if isinstance(controller_list, str): controller_list = [controller_list] cmd = list() if not config.args.container: cmd.append('sudo') cmd.append(Cgroup.build_cmd_path('cgdelete')) if recursive: cmd.append('-r') if controller_list: controllers_and_path = '{}:{}'.format(','.join(controller_list), cgname) else: controllers_and_path = ':{}'.format(cgname) cmd.append('-g') cmd.append(controllers_and_path) if config.args.container: config.container.run(cmd) else: Run.run(cmd)
def import_data(self, store): for line in self.__runs: course_code = line[SIRunImporter.COURSE] cardnr = line[SIRunImporter.CARDNR] self._punches = [] i = SIRunImporter.BASE while i < len(line): self.add_punch(int(line[i]), line[i + 1]) i += 2 run = Run( int(cardnr), course=course_code, punches=self._punches, card_start_time=self.__datetime(line[SIRunImporter.START]), card_finish_time=self.__datetime(line[SIRunImporter.FINISH]), check_time=self.__datetime(line[SIRunImporter.CHECK]), clear_time=self.__datetime(line[SIRunImporter.CLEAR]), readout_time=self.__datetime(line[SIRunImporter.READOUT]), store=store) run.complete = True store.add(run) if self._replay is True: print "Commiting Run %s for SI-Card %s" % (course_code, cardnr) store.commit() sleep(self._interval)
def __init__(self, cotrans = False): self.run = Run() self.run.cotrans = cotrans self.__processor = None self._targets = None self._masks = None self._profiles = None self.force_mask = None
def _read(self, socket_id, uuid, path, read_args): run = self._get_run(uuid) if run is None: Run.read_run_missing(self._bundle_service, self, socket_id) else: # Reads may take a long time, so do the read in a separate thread. threading.Thread(target=Run.read, args=(run, socket_id, path, read_args)).start()
def _netcat(self, socket_id, uuid, port, message): run = self._worker_state_manager._get_run(uuid) if run is None: Run.read_run_missing(self._bundle_service, self, socket_id) else: # Reads may take a long time, so do the read in a separate thread. threading.Thread(target=Run.netcat, args=(run, socket_id, port, message)).start()
def _run(self, bundle, resources): if self.shared_file_system: bundle_path = bundle['location'] else: bundle_path = self._dependency_manager.get_run_path(bundle['uuid']) run = Run(self._bundle_service, self._docker, self._image_manager, self, bundle, bundle_path, resources) if run.run(): self._worker_state_manager.add_run(bundle['uuid'], run)
def setup(config): user_name = Run.run('whoami', shell_bool=True) group_name = Run.run('groups', shell_bool=True).split(' ')[0] CgroupCli.create(config, controller_list=CONTROLLER, cgname=PARENT_NAME, user_name=user_name, group_name=group_name)
def create(config, controller_list, cgname, user_name=None, group_name=None, dperm=None, fperm=None, tperm=None, tasks_user_name=None, tasks_group_name=None, cghelp=False): if isinstance(controller_list, str): controller_list = [controller_list] cmd = list() if not config.args.container: cmd.append('sudo') cmd.append(Cgroup.build_cmd_path('cgcreate')) if user_name is not None and group_name is not None: cmd.append('-a') cmd.append('{}:{}'.format(user_name, group_name)) if dperm is not None: cmd.append('-d') cmd.append(dperm) if fperm is not None: cmd.append('-f') cmd.append(fperm) if tperm is not None: cmd.append('-s') cmd.append(tperm) if tasks_user_name is not None and tasks_group_name is not None: cmd.append('-t') cmd.append('{}:{}'.format(tasks_user_name, tasks_group_name)) if cghelp: cmd.append('-h') if controller_list: controllers_and_path = '{}:{}'.format(','.join(controller_list), cgname) else: controllers_and_path = ':{}'.format(cgname) cmd.append('-g') cmd.append(controllers_and_path) if config.args.container: config.container.run(cmd) else: Run.run(cmd)
def teardown(config): pids = Cgroup.get_pids_in_cgroup(config, CGNAME, CONTROLLER) if pids: for p in pids.splitlines(): if config.args.container: config.container.run(['kill', '-9', p]) else: Run.run(['sudo', 'kill', '-9', p]) Cgroup.delete(config, CONTROLLER, CGNAME)
def create_run(self, match): self.run = Run() map_name = match.group(4) self.run.map_file = map_name self.run.give_recommendation = (match.group(5) == 'True') self.run.with_justification = (match.group(6) == 'True') self.run.start_real_time = float(match.group(2))
def _run(self, bundle, resources): if self.shared_file_system: bundle_path = bundle['location'] else: bundle_path = self._dependency_manager.get_run_path(bundle['uuid']) run = Run(self._bundle_service, self._docker, self._image_manager, self, bundle, bundle_path, resources) if run.run(): with self._runs_lock: self._runs[bundle['uuid']] = run
def setup(config): f = open(CONFIG_FILE_NAME, 'w') f.write(CONFIG_FILE) f.close() if config.args.container: config.container.run(['useradd', '-p', 'Test019#1', USER]) config.container.run(['groupadd', GROUP]) else: Run.run(['sudo', 'useradd', '-p', 'Test019#1', USER]) Run.run(['sudo', 'groupadd', GROUP])
def test_run_update_available(self): """ Test ability to run PyUpdaterWxDemo and confirm that an update is available. """ self.assertEqual(__version__, CURRENT_VERSION) from run import Run self.app = Run(argv=['RunTester', '--debug'], clientConfig=self.clientConfig) self.assertEqual(self.app.statusBar.GetStatusText(), "Update available but application is not frozen.") sys.stderr.write("We can only restart a frozen app!\n")
def __init__(self, debug=False): ''' initializes the advanced class for awscm :param debug: enables debug information to be printed ''' self.debug = debug self.run = Run(debug=debug) self.result_dir = {} self.final_result = ''
def start_run(self, name, fitness, configuration): if not name: name = 'Default Run' run = Run(name, fitness, configuration, self) # Special case when we are restarting a previously crashed run if self.restart: run.restart() return run run.run() return run
def test_run_no_updates_available(self): """ Test ability to run PyUpdaterWxDemo and confirm that no updates are available. """ from run import Run self.assertRaises(SystemExit, Run, ['RunTester', '--version']) self.assertEqual(__version__, CURRENT_VERSION) self.app = Run(argv=['RunTester', '--debug'], clientConfig=self.clientConfig) self.assertEqual(self.app.statusBar.GetStatusText(), "No available updates were found.")
def get_result(): if not request.args or 'input' not in request.args: # 没有指定imgage则返回全部 return json.dumps({'result':'args error'}) else: try: input = request.args['input'] main = Run(type = 'line') res = main.online(input) return res except: return json.dumps({'result': 'error'})
def open(self, path): self.state = 1 self.run = Run() # get a new Run() self.showMsg(path + " open") self.run.reset(path) self.text_path.setText(unicode(path, "gbk", "ignore")) self.updateTable() self.pcr_yorn = True self.window_order.init(self.run.job) self.window_order.ui.combobox_job.currentIndex = self.run.job self.tabwidget_run.setEnabled(True) return
def policy_value_fn(self, board): run_game = Run() legal_positions = [] for i in list(board.generate_legal_moves()): legal_positions.append(i.uci()) current_state = np.ascontiguousarray(run_game.current_state().reshape( -1, 18, 8, 8)) #current_state = run_game.current_state() #print(current_state) act_probs, value = self.policy_value(current_state) act_probs = zip(legal_positions, act_probs[0]) #print('act_probs,value', list(act_probs), value) return act_probs, value
def __run_cgrules(config): cmd = list() cmd.append('sudo') cmd.append(Cgroup.build_daemon_path('cgrulesengd')) cmd.append('-d') cmd.append('-n') if config.args.container: raise ValueError('Running cgrules within a container is not ' 'supported') else: Run.run(cmd, shell_bool=True)
def join_children(self, config): for child in self.children: child.join(1) for child in self.children_pids: try: if config.args.container: config.container.run(['kill', str(child)]) else: Run.run(['kill', str(child)]) except (RunError, ContainerError): # ignore any errors during the kill command. this is belt # and suspenders code pass
def teardown(config): os.remove(CONFIG_FILE_NAME) try: if config.args.container: config.container.run(['userdel', USER]) config.container.run(['groupdel', GROUP]) else: Run.run(['sudo', 'userdel', USER]) Run.run(['sudo', 'groupdel', GROUP]) except (ContainerError, RunError, ValueError): pass Cgroup.delete(config, CONTROLLER, CGNAME)
def __infinite_loop(config, sleep_time=1): cmd = [ "/usr/bin/perl", "-e", "'while(1){{sleep({})}};'".format(sleep_time) ] try: if config.args.container: config.container.run(cmd, shell_bool=True) else: Run.run(cmd, shell_bool=True) except RunError: # when the process is killed, a RunError will be thrown. let's # catch and suppress it pass
def join_children(self, config): # todo - make this smarter. this is ugly, but it works for now cmd = ['sudo', 'killall', 'cgrulesengd'] try: if config.args.container: config.container.run(cmd, shell_bool=True) else: Run.run(cmd, shell_bool=True) except (RunError, ContainerError): # ignore any errors during the kill command. this is belt # and suspenders code pass for child in self.children: child.join(1)
def _parseRun(self, data): run = Run() run.id = data[0] run.submitTime = data[1] run.problemId = data[2] run.status = data[3] run.time = data[4] run.language = data[5] run.memory = data[6] run.userId = data[7] return run
def main(): # Setup client to access database client = pymongo.MongoClient('mongodb://tjames-pc:27017/') # Select database db = client['test_db'] # Select collection within database runs = db['runs'] print("Number of documents: %d" % runs.estimated_document_count()) # Run a find command to select a subset of documents query = { # 'daughter_card': 'DC0', # 'rx': 4 'notes': { '$exists': True } } count = runs.count_documents(query) print("Number of documents in query: %d" % count) if count: cursor = runs.find(query) run = Run.fromDict(cursor[0]) df = run.getDataFrame() sns.scatterplot(x='time', y='BER', data=df) plt.yscale('log') plt.ylim(1e-8, 1) plt.show()
def lscgroup(config, cghelp=False, controller=None, path=None): cmd = list() cmd.append(Cgroup.build_cmd_path('lscgroup')) if cghelp: cmd.append('-h') if controller is not None and path is not None: if isinstance(controller, list): for idx, ctrl in enumerate(controller): cmd.append('-g') cmd.append('{}:{}'.format(ctrl, path[idx])) elif isinstance(controller, str): cmd.append('-g') cmd.append('{}:{}'.format(controller, path)) else: raise ValueError('Unsupported controller value') if config.args.container: ret = config.container.run(cmd) else: ret = Run.run(cmd) return ret
def cgexec(config, controller, cgname, cmdline, sticky=False, cghelp=False): """cgexec equivalent method """ cmd = list() if not config.args.container: cmd.append('sudo') cmd.append(Cgroup.build_cmd_path('cgexec')) cmd.append('-g') cmd.append('{}:{}'.format(controller, cgname)) if sticky: cmd.append('--sticky') if isinstance(cmdline, str): cmd.append(cmdline) elif isinstance(cmdline, list): for entry in cmdline: cmd.append(entry) if cghelp: cmd.append('-h') if config.args.container: return config.container.run(cmd, shell_bool=True) else: return Run.run(cmd, shell_bool=True)
def lssubsys(config, ls_all=False, cghelp=False, hierarchies=False, mount_points=False, all_mount_points=False): cmd = list() cmd.append(Cgroup.build_cmd_path('lssubsys')) if ls_all: cmd.append('-a') if cghelp: cmd.append('-h') if hierarchies: cmd.append('-i') if mount_points: cmd.append('-m') if all_mount_points: cmd.append('-M') if config.args.container: ret = config.container.run(cmd) else: ret = Run.run(cmd) return ret
def clear(config, empty=False, cghelp=False, load_file=None, load_dir=None): cmd = list() if not config.args.container: cmd.append('sudo') cmd.append(Cgroup.build_cmd_path('cgclear')) if empty: cmd.append('-e') if cghelp: cmd.append('-h') if load_file is not None: cmd.append('-l') cmd.append(load_file) if load_dir is not None: cmd.append('-L') cmd.append(load_dir) if config.args.container: return config.container.run(cmd) else: return Run.run(cmd)
def __init__(self, *args, **kwargs): Tk.__init__(self, *args, **kwargs) # Force fullscreen self.geometry("{0}x{1}+0+0".format(self.winfo_screenwidth() - 3, self.winfo_screenheight() - 3)) self.columnconfigure(0, weight=5) self.columnconfigure(1, weight=5) self.columnconfigure(2, weight=3) self.rowconfigure(0, weight=2) self.rowconfigure(1, weight=2) self.rowconfigure(2, weight=2) # Set the title self.title("AI-Prog Module 1 - Kristian Ekle & Thor Håkon") # Reference to menu self.menubar = Menu(self) # Reference to run class self.run = Run(self) # The sizof the squares in the map self.sqsize = 15 # Defining the colors for the icons self.icon = { ".": "#CCFFFF", "#": "#4c4c4c", "S": "#FF6600", "G": "#33CC33", " ": "#CCFFFF", ".S": "#B8E6E6", ".G": "#99EB99" } # How long we should wait between each redraw self.delay = 20 # Radiobuttons self.v1 = None self.v2 = None self.v3 = None self.v4 = None # Config self.algoritm = 0 self.astar_stat = StringVar() self.bfs_stat = StringVar() self.dfs_stat = StringVar() # Populate the menues self.populate_menu()
def parseUserRuns(user, fromRunID = -1): if fromRunID >= 0: url = "http://acm.tju.edu.cn/toj/status.php?user=%s&start=%d" % (user, fromRunID) else: url = "http://acm.tju.edu.cn/toj/status.php?user=%s" % user html = _readURL(url) lines = html.split('\n') runs = list() for linePos in xrange(18, len(lines)-5, 5): token = lines[linePos] if token.find("</table") == 0: break run = Run() run.id = int(_getContent(token, 31)) token = lines[linePos+1] run.submitTime = _getContent(token, 4) if token.find("Compilation Error") >= 0: run.status = "Compilation Error" else: run.status = _getContent(token, len(token)-17, 0) token = lines[linePos+2] run.problemId = int(_getContent(token, 27)) run.language = _getContent(token, 44) token = lines[linePos+3] timeStr = _getContent(token, len(token)-13) run.time = int(timeStr[0]) * 60000 + int(timeStr[2:4]) * 1000 + int(timeStr[5:7]) * 10 token = lines[linePos+4] kb = _getContent(token, 4) run.memory = int(kb[ : len(kb)-1]) run.userId = _getContent(token, len(token)-15, 0) runs.append(run) return runs
class LogParser(object): def __init__(self): self.session = None self.next_session_id = 1 self.session_begin_re = re.compile( '{"type":"session_begin", "data":{"game_id":"VistaLights", ' '"player_id":"[0-9a-fA-F\-]+", "session_id":"[0-9a-fA-F\-]+", ' '"build_id":"", "version":"2.0", "condition":"", ' '"client_time":"([0-9\.]+)", ' '"details":{"bg":"(CE|STEM|non-STEM)"}}}') self.session_end_re = re.compile( '{"type":"session_end", "data":{"session_id":"[0-9a-fA-F/-]+", ' + '"run_count":"[0-9]+", "client_time":"[0-9\.]+", "details":{}}}') self.run_begin_re = re.compile( '{"type":"run_begin", "data":{"session_id":"[a-fA-F0-9\-]+", ' + '"run_id":"[a-fA-F0-9\-]+", "run_seqno":"([0-9]+)", ' + '"client_time":"([0-9\.]+)", "details":{"current_time":' + '"([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+(?: [AP]M)?)", ' + '"map":"(houston_game_[0-9])", ' + '"give_recommendation":"(True|False)", ' + '"with_justification":"(True|False)"}}}') self.run_end_re = re.compile( '{"type":"run_end", "data":{"run_id":"[0-9a-fA-f\-]+", ' + '"action_count":"[0-9]+", "client_time":"([0-9\.]+)", ' + '"details":{"current_time":' + '"([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+(?: [AP]M)?)", ' + '"budget":"([0-9\.\-E]+)", "welfare":"([0-9\.\-E]+)", ' + '"dock_utilization":"([0-9\.\-E]+)"}}}') self.key_stroke_action_re = re.compile( '{"type":"action", "data":{"run_id":"[0-9a-fA-F\-]+", ' '"action_seqno":"[0-9]+", "type":"[0-9]+", ' '"client_time":"([0-9\.E\-]+)", ' '"details":{"current_time":"([0-9]+/[0-9]+/[0-9]+ ' '[0-9]+:[0-9]+:[0-9]+(?: [AP]M)?)", "keystroke":"([A-Za-z0-9]+)", ' '"mouse_x":"([0-9\.E\-]+)", "mouse_y":"([0-9\.E\-]+)"}}}') self.recommendation_action_re = re.compile( '{"type":"action", "data":{"run_id":"[0-9a-fA-F\-]+", ' '"action_seqno":"[0-9]+", "type":"[0-9]+", ' '"client_time":"([0-9\.E\-]+)", ' '"details":{"current_time":"([0-9]+/[0-9]+/[0-9]+ ' '[0-9]+:[0-9]+:[0-9]+(?: [AP]M)?)", "isAccepted":"(True|False)", ' '"ship":"([0-9]+)", "priority":"([0-9]+)"*') self.phase_action_re = re.compile( '{"type":"action", "data":{"run_id":"[0-9a-fA-f\-]+", ' '"action_seqno":"[0-9]+", "type":"[0-9]+", ' '"client_time":"([0-9\.E\-]+)", ' '"details":{"current_time":' '"([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+(?: [AP]M)?)", ' '"phase":"(Decision|Simulation)"' '(, "money":"([0-9\.E\-]+)", ' '"welfare":"([0-9\.E\-]+)")?' '}}}') self.priority_action_re = re.compile( '{"type":"action", "data":{"run_id":"[0-9a-fA-F\-]+", ' '"action_seqno":"[0-9]+", "type":"[0-9]+", ' '"client_time":"([0-9E\-\.]+)", "details":{"current_time":' '"([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+(?: [AP]M)?)", ' '"ship_id":"([0-9]+)", "new_priority":"([0-9]+)"}}}') self.cleaning_action_re = re.compile( '{"type":"action", "data":{"run_id":"[0-9a-fA-f\-]+", ' '"action_seqno":"[0-9]+", "type":"[0-9]+", "client_time":' '"([0-9\.\-E]+)", "details":{"current_time":' '"([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+(?: [AP]M)?)", ' '"solution":"([a-zA-Z]+)"}}}') def parse(self, file_name): self.file_name = file_name file = open("raw_data/" + file_name) lines = file.readlines() for line in lines: self.process_line(line) session = self.session self.session = None return session def process_line(self, line): if self.try_session_begin(line): return if self.try_run_begin(line): return if self.try_key_stroke_action(line): return if self.try_recommendation_action(line): return if self.try_phase_action(line): return if self.try_cleaning_action(line): return if self.try_priority_action(line): return if self.try_run_end(line): return if self.try_session_end(line): return def try_session_begin(self, line): match = self.session_begin_re.match(line) if match != None: self.create_session(match) return True return False def create_session(self, match): self.session = Session() self.session.name = self.file_name[0:-5] self.session.bg_tag = match.group(2) self.session.id = self.next_session_id self.next_session_id += 1 print("Session created: " + self.file_name) def try_session_end(self, line): if self.session_end_re.match(line) != None: self.end_session() return True return False def end_session(self): print("Session ended") def try_run_begin(self, line): match = self.run_begin_re.match(line) if match != None: self.create_run(match) return True return False def create_run(self, match): self.run = Run() map_name = match.group(4) self.run.map_file = map_name self.run.give_recommendation = (match.group(5) == 'True') self.run.with_justification = (match.group(6) == 'True') self.run.start_real_time = float(match.group(2)) def try_run_end(self, line): match = self.run_end_re.match(line) if match != None: self.end_run(match) return True return False def end_run(self, match): self.run.money = float(match.group(3)) self.run.welfare = float(match.group(4)) self.run.dock_utilization = float(match.group(5)) self.run.end_real_time = float(match.group(1)) if self.run.map_file == 'houston_game_0': self.run.is_tutorial = True else: self.session.add_run(self.run) self.run = None def try_key_stroke_action(self, line): match = self.key_stroke_action_re.match(line) if match != None: self.create_key_stroke_action(match) return True return False def create_key_stroke_action(self, match): action = KeyStrokeAction() action.real_time = float(match.group(1)) action.virtual_time = self.parse_time(match.group(2)) action.key = match.group(3) action.mouse_x = float(match.group(4)) action.mouse_y = float(match.group(5)) self.run.add_action(action) self.run.num_key_action += 1 def parse_time(self, time_string): if 'M' in time_string: return time.strptime(time_string, "%m/%d/%Y %I:%M:%S %p") else: return time.strptime(time_string, "%m/%d/%Y %H:%M:%S") def try_recommendation_action(self, line): match = self.recommendation_action_re.match(line) if match != None: self.create_recommendation_action(match) return True return False def create_recommendation_action(self, match): action = RecommendationAction() action.real_time = float(match.group(1)) action.virtual_time = self.parse_time(match.group(2)) action.accepted = (match.group(3) == 'True') action.ship_id = int(match.group(4)) action.priority = int(match.group(5)) self.run.add_action(action) self.run.total_recommendation += 1 if action.accepted: self.run.accepted_recommendation += 1 def try_phase_action(self, line): match = self.phase_action_re.match(line) if match != None: self.create_phase_action(match) return True return False def create_phase_action(self, match): action = PhaseAction() action.real_time = float(match.group(1)) action.virtual_time = self.parse_time(match.group(2)) action.phase = match.group(3) if match.group(5): action.money = float(match.group(5)) action.welfare = float(match.group(6)) self.run.add_action(action) def try_priority_action(self, line): match = self.priority_action_re.match(line) if match != None: self.create_priority_action(match) return True return False def create_priority_action(self, match): action = PriorityAction() action.real_time = float(match.group(1)) action.virtual_time = self.parse_time(match.group(2)) action.ship_id = int(match.group(3)) action.priority = int(match.group(4)) self.run.add_action(action) def try_cleaning_action(self, line): match = self.cleaning_action_re.match(line) if match != None: self.create_cleaning_action(match) return True return False def create_cleaning_action(self, match): action = CleaningAction() action.real_time = float(match.group(1)) action.virtual_time = self.parse_time(match.group(2)) action.solution = match.group(3) self.run.add_action(action)
class Spats(object): """The main SPATS driver. :param cotrans: pass `True` for cotrans-style experiments. """ def __init__(self, cotrans = False): self.run = Run() self.run.cotrans = cotrans self.__processor = None self._targets = None self._masks = None self._profiles = None self.force_mask = None @property def _processor(self): if not self.__processor: self._addMasks() self.__processor = self.run._get_processor_class()(self.run, self._targets, self._masks) return self.__processor @property def targets(self): return self._targets def _addMasks(self): if not self._masks: self._masks = [ Mask(m) for m in self.run.masks ] def reset_processor(self): self.__processor = None def addTargets(self, *target_paths): """Used to add one or more target files for processing. Can be called multiple times to add more targets. Inputs are expected to be in FASTA format with one or more targets per path. Must be called before processing. :param args: one or more filesystem paths to target files. """ targets = [] for path in target_paths: for name, seq in fasta_parse(path): targets.append((name, seq, 1 + len(targets))) self._addTargets(targets) def addTarget(self, name, seq, rowid = -1): self._addTargets( [ (name, seq, rowid if rowid != -1 else 0 if self._targets is None else len(self._targets.targets)) ] ) def loadTargets(self, pair_db): self._addTargets(pair_db.targets()) def _addTargets(self, target_list): targets = self._targets or Targets() for name, seq, rowid in target_list: targets.addTarget(name, seq.upper().replace('U', 'T'), rowid) if not targets.targets: raise Exception("didn't get any targets!") targets.minimum_match_length = self.run.minimum_target_match_length self._targets = targets def process_pair(self, pair): """Used process a single :class:`.pair.Pair`. Typically only used for debugging or analysis of specific cases. :param pair: a :class:`.pair.Pair` to process. """ if not self.run.pair_length: self.run.pair_length = len(pair.r1.original_seq) _set_debug(self.run) _debug("> processing " + pair.identifier + "\n --> " + pair.r1.original_seq + " , " + pair.r2.original_seq) _debug(" rc(R1): {}".format(pair.r1.reverse_complement)) try: self._processor.process_pair(pair) if pair.failure: _debug(pair.failure) else: assert(pair.has_site) _debug(" ===> KEPT {}-{}".format(pair.site, pair.end)) except: print("**** Error processing pair: {} / {}".format(pair.r1.original_seq, pair.r2.original_seq)) raise def _memory_db_from_pairs(self, data_r1_path, data_r2_path): if not self.run.quiet: print("Parsing pair data...") start = time.time() db = PairDB() total_pairs = db.parse(data_r1_path, data_r2_path) report = "Parsed {} records in {:.1f}s".format(total_pairs, time.time() - start) # unclear if this helps, but potentially useful for further analysis later, and doesn't cost *too* much # but if it's holding things up, nuke it db.index() report += ", indexed in {:.1f}s".format(time.time() - start) if self.run.quiet: _debug(report) else: print(report) return db def process_pair_data(self, data_r1_path, data_r2_path, force_mask = None): """Used to read and process a pair of FASTQ data files. Note that this parses the pair data into an in-memory SQLite database, which on most modern systems will be fine except for the largest input sets. If you hit memory issues, create a disk-based SQLite DB via :class:`.db.PairDB` and then use :meth:`.process_pair_db`. Note that this may be called multiple times to process more than one set of data files before computing profiles. :param data_r1_path: path to R1 fragments :param data_r2_path: path to matching R2 fragments. """ self.run._force_mask = force_mask self.run.apply_config_restrictions() self.force_mask = Mask(force_mask) if force_mask else None use_quality = self.run._parse_quality if not self.run.skip_database and not use_quality: self.process_pair_db(self._memory_db_from_pairs(data_r1_path, data_r2_path)) else: with FastFastqParser(data_r1_path, data_r2_path, use_quality) as parser: if not self.run.pair_length: self.run.pair_length = parser.pair_length() self._process_pair_iter(parser.iterator(batch_size = 131072)) def process_pair_db(self, pair_db, batch_size = 65536): """Processes pair data provided by a :class:`.db.PairDB`. Note that this may be called multiple times to process more than one set of inputs before computing profiles. :param pair_db: a :class:`.db.PairDB` of pairs to process. """ self.run.apply_config_restrictions() if not self.run.pair_length: self.run.pair_length = pair_db.pair_length() if not self._targets: self.loadTargets(pair_db) result_set_id = pair_db.add_result_set(self.run.result_set_name or "default", self.run.resume_processing) if self.run.writeback_results else None if self._processor.uses_tags: self._processor.setup_tags(pair_db) if self.run.resume_processing: db_iter = pair_db.unique_pairs_with_counts_and_no_results(result_set_id, batch_size = batch_size) elif self.run._redo_tag: db_iter = pair_db.unique_pairs_with_counts_and_tag(self.run.cmp_set_id, self.run._redo_tag, batch_size = batch_size) elif self.run._process_all_pairs: if not self.run.quiet: print("Using all_pairs...") db_iter = pair_db.all_pairs(batch_size = batch_size) else: db_iter = pair_db.unique_pairs_with_counts(batch_size = batch_size) self._process_pair_iter(db_iter, pair_db, result_set_id) #@profile def _process_pair_iter(self, pair_iter, pair_db = None, result_set_id = None): _set_debug(self.run) start = time.time() # force the processor to load and do whatever indexing/etc is required self._processor worker = SpatsWorker(self.run, self._processor, pair_db, result_set_id) if not self.run.quiet: print("Processing pairs{}...".format(" with mask={}".format(self.force_mask.chars) if self.force_mask else "")) worker.force_mask = self.force_mask worker.run(pair_iter) if not self.run.quiet: self._report_counts(time.time() - start) def _report_counts(self, delta = None): counters = self.counters total = counters.total_pairs print("Successfully processed {} properly paired fragments:".format(counters.registered_pairs)) warn_keys = [ "multiple_R1_match", ] skip_keypat = re.compile("(prefix_)|(mut_count_)|(indel_len)") skipped_some = False countinfo = counters.counts_dict() for key in sorted(countinfo.keys(), key = lambda k : countinfo[k], reverse = True): if skip_keypat.search(key): skipped_some = True continue print(" {}{} : {} ({:.1f}%)".format("*** " if key in warn_keys else "", key, countinfo[key], 100.0 * (float(countinfo[key])/float(total)) if total else 0)) print("Masks:") for m in self._masks: kept, total = counters.mask_kept(m), counters.mask_total(m) print(" {}: kept {}/{} ({:.1f}%)".format(m.chars, kept, total, (100.0 * float(kept)) / float(total) if total else 0)) if 1 < len(self._targets.targets): print("Targets:") tmap = { t.name : counters.target_total(t) for t in self._targets.targets } total = counters.registered_pairs for tgt in sorted(self._targets.targets, key = lambda t : tmap[t.name], reverse = True): if tmap[tgt.name] > 0: print(" {}: {} ({:.1f}%)".format(tgt.name, tmap[tgt.name], (100.0 * float(tmap[tgt.name])) / float(total) if total else 0)) if skipped_some: print("Some counters not printed above; use 'spats_tool dump ...' commands to obtain.") if delta: print("Total time: ({:.1f}s)".format(delta)) @property def counters(self): """Returns the underlying :class:`.counters.Counters` object, which contains information about site and tag counts. """ return self._processor.counters def compute_profiles(self): """Computes beta/theta/c reactivity values after pair data have been processed. :return: a :class:`.profiles.Profiles` object, which contains the reactivities for all targets. """ self._profiles = Profiles(self._targets, self.run, self._processor.counters) self._profiles.compute() return self._profiles def write_reactivities(self, output_path): """Convenience function used to write the reactivities to an output file. Must be called after :meth:`.compute_profiles`. :param output_path: the path for writing the output. """ self._profiles.write(output_path) def store(self, output_path): """Saves the state of the SPATS run for later processing. :param output_path: the path for writing the output. Recommended file extension is `.spats` """ if os.path.exists(output_path): os.remove(output_path) pair_db = PairDB(output_path) pair_db.store_run(self.run) pair_db.add_targets(self.targets) pair_db.store_counters("spats", self.counters) def load(self, input_path): """Loads SPATS state from a file. :param input_path: the path of a previously saved SPATS session. """ pair_db = PairDB(input_path) pair_db.load_run(self.run) self.loadTargets(pair_db) pair_db.load_counters("spats", self.counters) def validate_results(self, data_r1_path, data_r2_path, algorithm = "find_partial", verbose = False): """Used to validate the results of the current run using against a different algorithm. Must be run after running :meth:`.process_pair_data`, or after loading the data (:meth:`.load`) from a previously-run session. :param data_r1_path: path to R1 fragments :param data_r2_path: path to matching R2 fragments. :param algorithm: Generally the default is correct, but you can select a particular algorithm for data validation (see :attr:`.run.Run.algorithm`). :param verbose: set to `True` for detailed output of mismatched sites. :return: `True` if results validate, `False` otherwise. """ original_algorithm = self.run.algorithm if original_algorithm == algorithm: raise Exception("Validation cannot be run using the same algorithm.") if not self.counters.registered_dict(): raise Exception("Normal SPATS run required first in order to validate the results.") other = Spats() other.run.load_from_config(self.run.config_dict()) other.run.algorithm = algorithm other._targets = self._targets other.process_pair_data(data_r1_path, data_r2_path) match_count, total = self.compare_results(other, verbose = verbose) if match_count == total: print("Original results ({} algorithm) validated using {} algorithm, {} registered sites match.".format(original_algorithm, algorithm, match_count)) return True else: print("Validation FAILURE: results ({} algorithm) only match {}/{} registered sites (when validated using {} algorithm).".format(original_algorithm, match_count, total, algorithm)) return False def compare_results(self, other_spats, verbose = False): """Used to compare the results of the current run against another SPATS instance. Must be run after running :meth:`.process_pair_data`, or after loading the data (:meth:`.load`) from a previously-run session. :param other_spats: :class:`.Spats` instance to compare. :param verbose: set to `True` for detailed output of mismatched sites. :return: `(match_count, total)` : `match_count` indicates the number of sites matched, `total` indicates total number of sites. """ our_counts = self.counters.registered_dict() their_counts = other_spats.counters.registered_dict() match_count = 0 total = 0 for key, value in our_counts.iteritems(): total += 1 if their_counts.get(key, 0) == value: match_count += 1 elif verbose: print("Mismatch {}: {} != {}".format(key, value, their_counts.get(key, 0))) return match_count, total
def run(self): population = self.initial_population() for i in range(self.gens): print "Generation " + str(i) # Evaluate fitnesses of all in population: fitnesses = np.zeros(self.pop) best_fitness = 0.0 best = population[0] for j in range(self.pop): specimen = population[j] r = Run() fit = r.fitness(specimen, "", save=False) fitnesses[j] = fit if fit > best_fitness: best_fitness = fit best = specimen print "Mean fitness = " + str(np.mean(fitnesses)) print "Best individual: ----------------------------------------------------------------" try: print str(best) except: print "FAILED TO PRINT FOR SOME REASON!" b_fit = Run().fitness(specimen, "Best_of_gen_"+str(i), save=True) print "Fitness of best = " + str(b_fit) print "---------------------------------------------------------------------------------" children = [] numAdded= 0 elites = [] for j in range(self.pop): if fitnesses[j] > 0: children.append(population[j]) numAdded += 1 elites.append(j) while numAdded < self.pop: extra = population[random.randint(0, self.pop-1)] if random.random() < self.p_mutate: extra = self.mutate(extra) children.append(extra) numAdded += 1 for i in range(self.pop/2): pos1 = 2*i pos2 = 2*i + 1 if pos1 in elites or pos2 in elites: continue if random.random() < self.p_xover: new1, new2 = self.crossover(children[pos1], children[pos2]) children[pos1] = new1 children[pos2] = new2 population = children
import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import seaborn as sns plt.rcParams.update({'figure.autolayout': True}) mpl.rcParams['axes.unicode_minus']=False #local from run import Run import plot_style import field_helper as field plot_style.white() pal = sns.color_palette('deep') run = Run(sys.argv[1]) run.read_ntot() os.system('mkdir -p ' + run.run_dir + 'analysis/amplitude') max_amp_i = np.empty(run.nt, dtype=float) max_amp_e = np.empty(run.nt, dtype=float) for it in range(run.nt): max_amp_i[it] = np.max(run.ntot_i[it, :, :]) max_amp_e[it] = np.max(run.ntot_e[it, :, :]) np.savetxt(run.run_dir + 'analysis/amplitude/max_amp.csv', np.transpose((range(run.nt), max_amp_i, max_amp_e)), delimiter=',', fmt=['%d', '%.5f', '%.5f'], header='t_index,max_amp_i,max_amp_e')
f_phi = interp.interp1d(run.t, run.phi[:,ix,iy]) phi_tmp[:, ix, iy] = f_phi(t_reg) f_q_i = interp.interp1d(run.t, run.q_i) run.ntot_i = ntot_i_tmp[:] run.ntot_e = ntot_e_tmp[:] run.phi = phi_tmp[:] run.q_i = f_q_i(t_reg) run.t = t_reg[:] run.nt = len(run.t) if __name__ == '__main__': run = Run(sys.argv[1]) if sys.argv[2] == 'False': lab_frame = False else: lab_frame = True version = int(sys.argv[3]) run.read_ntot(lab_frame=lab_frame) run.read_phi(lab_frame=lab_frame) run.read_q() interp_fac = 1 interpolate_time(run, interp_fac) # Heat flux to SI hflux_SI = run.q_i * (run.nref*run.tref*run.vth*run.rhoref**2/run.amin**2)
def play_level(self, level, str_level=None): # Set new run instance # Astar # Label for Algorithm astar_label = Label(master=self, text="Best First - Algorithm").grid(column=0, row=0, sticky=N+W, padx=20, pady=20) astar_generated = Label(master=self, textvariable=self.astar_stat).grid(column=0, row=2, sticky=N+W, padx=20, pady=20) astar_run = Run(self) self.astar_gui = Board(parent=self, stats_gui=self.astar_stat, run=astar_run, column=0, row=1, sq_size=self.sqsize, delay=self.delay, height=300, width=300) astar_run.gui = self.astar_gui # BFS # Label for Algorithm bfs_label = Label(master=self, text="BFS - Algorithm").grid(column=1, row=0, sticky=N+W, padx=20, pady=20) bfs_generated = Label(master=self, textvariable=self.bfs_stat).grid(column=1, row=2, sticky=N+W, padx=20, pady=20) bfs_run = Run(self) self.bfs_gui = Board(parent=self, stats_gui=self.bfs_stat, run=bfs_run, column=1, row=1, sq_size=self.sqsize, delay=self.delay, height=300, width=300) bfs_run.gui = self.bfs_gui # DFS # Label for Algorithm astar_label = Label(master=self, text="DFS - Algorithm").grid(column=2, row=0, sticky=N+W, padx=20, pady=20) dfs_generated = Label(master=self, textvariable=self.dfs_stat).grid(column=2, row=2, sticky=N+W, padx=20, pady=20) lifo_run = Run(self) self.lifo_gui = Board(parent=self, stats_gui=self.dfs_stat, run=lifo_run, column=2, row=1, sq_size=self.sqsize, delay=self.delay, height=300, width=300) lifo_run.gui = self.lifo_gui # Set the algorithm 0 = Astar, 1=BFS astar_run.set_algorithm(0) bfs_run.set_algorithm(1) lifo_run.set_algorithm(2) # Set the correct level in Run class if str_level: bfs_run.initialize(bfs_run.generate_board(str_level=str_level)) astar_run.initialize((astar_run.generate_board(str_level=str_level))) lifo_run.initialize(lifo_run.generate_board(str_level=str_level)) else: bfs_run.open_file(level) astar_run.open_file(level) lifo_run.open_file(level) # Run the solvers astar_run.run() bfs_run.run() lifo_run.run()
image.save(fname) # time.sleep(3) print "Saved image to " + fname def clear(self): self._scene.clear() self.tview.setPlainText("") self.gview.update() if __name__ == "__main__": import json, sys, os from font import Font from run import Run app = QtGui.QApplication(sys.argv) # print app.desktop().logicalDpiY() tpath = os.path.join(os.path.dirname(sys.argv[0]), "../../tests") jf = file(os.path.join(tpath, "padauk3.json")) jinfo = json.load(jf) font = Font() font.loadFont(os.path.join(tpath, "fonts/Padauk/Padauk.ttf")) font.makebitmaps(40) rinfo = jinfo["passes"][0]["slots"] run = Run(font, False) run.addSlots(rinfo) view = RunView(run, font).gview print "Padauk RunView?" ### view.show() sys.exit(app.exec_())
class Gui(Tk): def __init__(self, *args, **kwargs): Tk.__init__(self, *args, **kwargs) # Force fullscreen self.geometry("{0}x{1}+0+0".format(self.winfo_screenwidth() - 3, self.winfo_screenheight() - 3)) self.columnconfigure(0, weight=5) self.columnconfigure(1, weight=5) self.columnconfigure(2, weight=3) self.rowconfigure(0, weight=2) self.rowconfigure(1, weight=2) self.rowconfigure(2, weight=2) # Set the title self.title("AI-Prog Module 1 - Kristian Ekle & Thor Håkon") # Reference to menu self.menubar = Menu(self) # Reference to run class self.run = Run(self) # The sizof the squares in the map self.sqsize = 15 # Defining the colors for the icons self.icon = { ".": "#CCFFFF", "#": "#4c4c4c", "S": "#FF6600", "G": "#33CC33", " ": "#CCFFFF", ".S": "#B8E6E6", ".G": "#99EB99" } # How long we should wait between each redraw self.delay = 20 # Radiobuttons self.v1 = None self.v2 = None self.v3 = None self.v4 = None # Config self.algoritm = 0 self.astar_stat = StringVar() self.bfs_stat = StringVar() self.dfs_stat = StringVar() # Populate the menues self.populate_menu() # Populates the menu def populate_menu(self): # Dummy self.v1 = StringVar() self.v2 = IntVar() self.v2.set(1) self.v3 = IntVar() self.v3.set(1) self.v4 = IntVar() self.v4.set(1) # Create a pulldown menu for chosing what level to play self.filemenu = Menu(self.menubar, tearoff=0) str_levels = self.run.list_files() for level in range(len(str_levels)): self.filemenu.add_radiobutton(label=str_levels[level][:-4], variable=self.v1, command=partial(self.play_level, str_levels[level])) self.filemenu.add_separator() self.filemenu.add_command(label="Add New", command=lambda: self.open_file()) self.menubar.add_cascade(label="Boards", menu=self.filemenu) # Create a pulldown menu for chosing delays self.delaymenu = Menu(self.menubar, tearoff=0) for i in ['0', '20', '50', '100', '200', '500', '1000']: self.delaymenu.add_radiobutton(label=i + " ms", variable=self.v4, value=i, command=lambda: self.set_delay(i)) self.menubar.add_cascade(label="Delay", menu=self.delaymenu) # Menu element for closing the application self.menubar.add_command(label="Quit!", command=self.quit) # Apply the config self.config(menu=self.menubar) ## ----------------------------------------- ## Menu Handlers ## ----------------------------------------- def play_level(self, level, str_level=None): # Set new run instance # Astar # Label for Algorithm astar_label = Label(master=self, text="Best First - Algorithm").grid(column=0, row=0, sticky=N+W, padx=20, pady=20) astar_generated = Label(master=self, textvariable=self.astar_stat).grid(column=0, row=2, sticky=N+W, padx=20, pady=20) astar_run = Run(self) self.astar_gui = Board(parent=self, stats_gui=self.astar_stat, run=astar_run, column=0, row=1, sq_size=self.sqsize, delay=self.delay, height=300, width=300) astar_run.gui = self.astar_gui # BFS # Label for Algorithm bfs_label = Label(master=self, text="BFS - Algorithm").grid(column=1, row=0, sticky=N+W, padx=20, pady=20) bfs_generated = Label(master=self, textvariable=self.bfs_stat).grid(column=1, row=2, sticky=N+W, padx=20, pady=20) bfs_run = Run(self) self.bfs_gui = Board(parent=self, stats_gui=self.bfs_stat, run=bfs_run, column=1, row=1, sq_size=self.sqsize, delay=self.delay, height=300, width=300) bfs_run.gui = self.bfs_gui # DFS # Label for Algorithm astar_label = Label(master=self, text="DFS - Algorithm").grid(column=2, row=0, sticky=N+W, padx=20, pady=20) dfs_generated = Label(master=self, textvariable=self.dfs_stat).grid(column=2, row=2, sticky=N+W, padx=20, pady=20) lifo_run = Run(self) self.lifo_gui = Board(parent=self, stats_gui=self.dfs_stat, run=lifo_run, column=2, row=1, sq_size=self.sqsize, delay=self.delay, height=300, width=300) lifo_run.gui = self.lifo_gui # Set the algorithm 0 = Astar, 1=BFS astar_run.set_algorithm(0) bfs_run.set_algorithm(1) lifo_run.set_algorithm(2) # Set the correct level in Run class if str_level: bfs_run.initialize(bfs_run.generate_board(str_level=str_level)) astar_run.initialize((astar_run.generate_board(str_level=str_level))) lifo_run.initialize(lifo_run.generate_board(str_level=str_level)) else: bfs_run.open_file(level) astar_run.open_file(level) lifo_run.open_file(level) # Run the solvers astar_run.run() bfs_run.run() lifo_run.run() def choose_algorithm(self, algorithm): self.algoritm = algorithm def set_delay(self, delay): self.delay = int(delay) def choose_module(self, module): print module def open_file(self): filename = askopenfilename(parent=self) f = open(filename) level = f.read() # start with chosen file self.play_level(None, level)
# when reading from disk to memory taking the whole slice of each column is very fast species_indices = h5.root.species_information.cols.species_index[:] species_names = h5.root.species_information.cols.species_name[:] simulation._species_list = [ Species( species_indices[i], species_names[i], simulation, ) for i in range(simulation.number_of_species) ] # create run objects and compartment objects within runs for i in range(1, int(simulation.number_of_runs) + 1): try: node = h5.root._f_getChild('run%s' % i) run = Run(node._v_attrs, i, simulation) except tables.exceptions.NoSuchNodeError, error: # Couldn't find run i, so overwrite number_of_runs with i - 1 h5.close() h5 = tables.openFile(h5_file, 'r+') h5.root._v_attrs.number_of_runs = i - 1 simulation.number_of_runs = h5.root._v_attrs.number_of_runs break # just create compartments once on the first run #TODO maybe change this when dealing with volumes if i == 1: cols = node.compartment_information.cols # table columns accessor compartment_indices = cols.compartment_index[:] compartment_ids = cols.compartment_id[:] compartment_names = cols.compartment_name[:] compartment_x_positions = cols.compartment_x_position[:]
def reload_run(self, run_path): run = Run(None, None, None, self) run.results_folder_path = run_path run.reload()