def __init__(self, in_dim, emb_dim, pb_limits=None, batch_sz=128, log_dir="/tmp/"): self.frozen = MiscUtils.SmallEncoder1d(in_dim, emb_dim, num_hidden=3, non_lin="leaky_relu", use_bn=False) self.frozen.eval() self.learnt = MiscUtils.SmallEncoder1d(in_dim, emb_dim, num_hidden=5, non_lin="leaky_relu", use_bn=False) self.optimizer = torch.optim.Adam(self.learnt.parameters(), lr=1e-2) self.archive = None self.pop = None self.batch_sz = batch_sz self.epoch = 0 self.log_dir = log_dir if pb_limits is not None: MiscUtils.make_networks_divergent(self.frozen, self.learnt, pb_limits, iters=50)
def run_playbook(self): self.check_for_extension() os.system(self.__ansible__command__ + " " + self.pb) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + Constants.openedx_run_playbook + self.pb)
def do_action(self): if self.command == 'change_root_password': user = self.args[3] strQuery = 'mysql -u root -p -e "SET PASSWORD FOR ' + user + "@'localhost' = PASSWORD('" + self.args[4] + "');" os.system("sudo " + strQuery) MiscUtils.write_to_log(Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.change_user_pass + " " + self.args[4])
def run_script(self): if self.__does_script_exist(): os.system(self.dir + self.script) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.execute_shell_script + " " + self.dir + self.script) else: print(Constants.script_not_found)
def do_lms_asset_recompile(): print(Constants.header + Constants.openedx_compile_assets) os.system( "cd /edx/app/edxapp/edx-platform && paver update_assets lms --settings=aws" ) sv = Supervisor.Supervisor("restart") sv.cmd = sv.cmd + " edxapp:" sv.run() print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.openedx_compile_assets_lms_log)
def handle_alexa(event, content): intent_name = event['request']['intent']['name'] slots = event['request']['intent']['slots'] session_attrs = event['session'].get('attributes', {}) set_user_response = MiscUtils.set_session_user_id_and_type(event, True) if set_user_response is not None: return set_user_response if LexUtils.is_yes(session_attrs[CC.SESS_ATTR_ANONYMOUS]): message = "You must provide an email address or phone number to use this feature." return AlexaUtils.build_response( session_attrs, AlexaUtils.build_speechlet_response("Sorry", message, None, False)) user = User.User(session_attrs[CC.SESS_ATTR_USER_ID], session_attrs[CC.SESS_ATTR_USER_ID_TYPE]) points_period_days = 30 if AlexaUtils.is_slot_present(slots, CC.SLOT_POINTS_TIME_PERIOD): points_time_period = slots[CC.SLOT_POINTS_TIME_PERIOD] if points_time_period == CC.WEEKLY: points_period_days = 7 elif points_time_period == CC.YEARLY: points_period_days = 365 # TODO get points from user opportunities + points per opportunity message = "The point period is {}.".format(points_period_days) return AlexaUtils.build_response( session_attrs, AlexaUtils.build_speechlet_response("Point Period", message, None, False))
def run_me(): yield """ <script type="text/javascript"> function stb() { window.scrollBy(0,document.body.scrollHeight); } var success = true; </script> <style>body {color:#fff;font-family: monospace;}</style> <pre>""" args = (caseobj["case_appsmount"], ) p = Thread(target=parser.begin_scan, args=args) p.start() while True: res = q.get() if res == "FIN": break elif res == "FINERR": yield "<b>Error occurred, canceling scan!<b>" yield "<script>success = false;</script>" self.db.remove_case(caseobj["case_id"]) return yield res + "\n<script>stb();</script>" yield "<b>Finished, Saving Content please wait...</b>" yield "</pre>" store = parser.get_extractedstore() storepath = store.gen_storepath(caseobj["case_name"], caseobj["case_id"]) self.db.add_case_storepath(caseobj["case_id"], storepath) store_file = store.save_store(storepath) self.db.add_case_file(case_id, store_file, MiscUtils.genfilesig(store_file)) self.add_store(store, case_id)
def handle_lex(event, content): intent_name = event['currentIntent']['name'] slots = event['currentIntent']['slots'] session_attrs = event['sessionAttributes'] if event[ 'sessionAttributes'] is not None else {} set_user_response = MiscUtils.set_session_user_id_and_type(event, False) if set_user_response is not None: return set_user_response if LexUtils.is_yes(session_attrs[CC.SESS_ATTR_ANONYMOUS]): message = "You must provide an email address or phone number to use this feature." return LexUtils.close(CC.EMPTY_OBJ, True, message, CC.EMPTY_OBJ) user = User.User(session_attrs[CC.SESS_ATTR_USER_ID], session_attrs[CC.SESS_ATTR_USER_ID_TYPE]) points_period_days = 30 if LexUtils.is_slot_present(slots, CC.SLOT_POINTS_TIME_PERIOD): points_time_period = slots[CC.SLOT_POINTS_TIME_PERIOD] if points_time_period == CC.WEEKLY: points_period_days = 7 elif points_time_period == CC.YEARLY: points_period_days = 365 # TODO get points from user opportunities + points per opportunity message = "The point period is {}.".format(points_period_days) return LexUtils.close(CC.EMPTY_OBJ, True, message, CC.EMPTY_OBJ)
def sendParser(gui,message): """ Retrieves the text from the entry field, parses it and sends it to the server in a bytearray form. Keyword arguments: event - The return-key was pressent Side-effects: Checks if the command is valid. If so, sends it to the server """ options = initiateSwitch() argumentString = MiscUtils.messageSplit(message) if (argumentString[0] == "/connect"): connect(argumentString,gui) elif (gui.socketStatus != "ok"): TabUtils.writeMessage(gui,"You are not connected to a server, use /connect IP") gui.message.delete(0,END) else: if argumentString[0] in options: options[argumentString[0]](argumentString,gui) else: msg_temp = gui.currentTab + " " + message+'\n' sendToServer(gui,msg_temp)
def del_user(self, username, email): cmd = "sudo /edx/bin/python.edxapp /edx/bin/manage.edxapp lms manage_user " + username + " " + email + " --settings=aws" if MiscUtils.isValidEmail(email): os.system(cmd) else: print(Constants.openedx_user_invalidemail)
def run_me(): yield """ <script type="text/javascript"> function stb() { window.scrollBy(0,document.body.scrollHeight); } var success = true; </script> <style>body {color:#fff;font-family: monospace;}</style> <pre>""" args = (caseobj["case_appsmount"],) p = Thread(target=parser.begin_scan, args=args) p.start() while True: res = q.get() if res == "FIN": break elif res == "FINERR": yield "<b>Error occurred, canceling scan!<b>" yield "<script>success = false;</script>" self.db.remove_case(caseobj["case_id"]) return yield res + "\n<script>stb();</script>" yield "<b>Finished, Saving Content please wait...</b>" yield "</pre>" store = parser.get_extractedstore() storepath = store.gen_storepath( caseobj["case_name"], caseobj["case_id"] ) self.db.add_case_storepath( caseobj["case_id"], storepath ) store_file = store.save_store( storepath ) self.db.add_case_file(case_id, store_file, MiscUtils.genfilesig(store_file)) self.add_store(store, case_id)
def get_accurate_tree_score(mask_matrix, parent_vector, dft, log_scores): """Accurately evaluate mutation tree. For detailed explanation of the fast_matrix please see docstring of get_fast_tree_score. Args: mask_matrix (np.ndarray): Matrix of 'count of probabilities' of type P(D_ij = x|E_ij = 0) and P(D_ij = x|E_ij = 1) from which fast_matrix can be obtained if log scores matrix is known. parent_vector (np.ndarray): Parent vector representation of mutation tree. dft (np.ndarray): Depth first traversal of mutation tree. log_scores (np.ndarray): Log scores matrix corresponding to error probabilities. Returns: float: Accurate score of mutation tree. np.ndarray: fast_matrix containing probabilities of type PI(i=1 -> n) P(D_ij|A(T)_i ro_j) that relate to the attachment of every cell to every node. """ # Create a copy mask_matrix_copy = np.copy(mask_matrix) # Use DFS to visit all nodes # Parent will be always evaluated before child, so we can calculate the score of the child from the score of the # parent, root is already evaluated so it can be skipped for i in dft[1:]: mask_matrix_copy[i, :, :] += mask_matrix_copy[parent_vector[i], :, :] # Transformation of log_scores_matrix to analogous vector representation log_scores_vector = np.hstack((log_scores[:, 0], log_scores[:, 1])) # Transform counts to probabilities by using log_scores_vector fast_matrix = MiscUtils.dot_3d(mask_matrix_copy, log_scores_vector) return get_score_from_fast_matrix(fast_matrix), fast_matrix
def test_dot_3d(self): a = np.random.randint(0, 100, (15, 3000, 8), dtype=np.int32) b = np.log(np.random.random((8,))) dot_product = MiscUtils.dot_3d(a, b) dot_product_test = np.dot(a, b) self.assertTrue(check_max_variation_matrix(dot_product, dot_product_test, 1e-15))
def checkQueue(gui): """ Parses the answer from the server and performes the corresponding command. Side effects: Can be one of the following: A new tab is created, the username is changed, a message is printed in the current window. """ stopSign = 1 respons = gui.thread.returnQueue() if(respons[0][0] == "{"): options = initDictionary() temp = respons[1:len(respons)-2] if " " in temp: commandString = MiscUtils.messageSplit(temp) if commandString[0] == 'disconnected': stopSign = 0 disconnected(gui) else: if commandString[0] in options: options[commandString[0]](gui,commandString[1]) else: fillUserList(gui,commandString) else: fillRoomList(gui,temp) else: printMessage(gui,respons) if stopSign == 1: gui.master.after(50,checkQueue,gui)
def delegate(self, gridPassPhrase='', verbose=False): """method to upload a valid proxy credential in a myproxy server. The proxy must to have voms attributes. Only the pilot owner will be allowed to retrieve the proxy, specified by -Z option. The DN of the user is used as username. """ if self.servername == '': raise MyProxyError(2100) if self.vomsattributes == '': raise MyProxyError(2101) if self.pilotownerDN == '': raise MyProxyError(2103) cmd = 'myproxy-init' # credname credname = re.sub('-', '', MiscUtils.wrappedUuidGen()) print "=== upload proxy for glexec" # command options cmd += ' -s %s' % self.servername # myproxy sever name cmd += " -x -Z '%s'" % self.pilotownerDN # only user with this DN # is allowed to retrieve it cmd += ' --voms %s' % self.vomsattributes # voms attributes cmd += ' -d' # uses DN as username cmd += ' --credname %s' % credname if gridPassPhrase == '': if sys.stdin.isatty(): gridPassPhrase = getpass.getpass('Enter GRID pass phrase:') else: sys.stdout.write('Enter GRID pass phrase:') sys.stdout.flush() gridPassPhrase = sys.stdin.readline().rstrip() print gridPassPhrase = gridPassPhrase.replace('$', '\$') cmd = 'echo "%s" | %s -S' % (gridPassPhrase, cmd) cmd = self.srcFile + ' unset GT_PROXY_MODE; ' + cmd self.__command = cmd # for testing purpose if verbose: cmdStr = cmd if gridPassPhrase != '': cmdStr = re.sub(gridPassPhrase, '*****', cmd) print cmdStr status, out = commands.getstatusoutput(cmd) if verbose: print out if status != 0: if out.find( 'Warning: your certificate and proxy will expire') == -1: if not verbose: print out raise MyProxyError(2300) return credname
def test_get_mirrored_beta(self): betas = np.array([0, 0.5, 1, -0.3, 1.3, -2.5, 2.5, -3.7, 3.7, -4.8, 4.8, -5.6, 5.6], dtype=np.float64) betas_mirrored = np.empty((betas.size,), dtype=np.float64) for beta_index in range(betas.size): betas_mirrored[beta_index] = MiscUtils.get_mirrored_beta(betas[beta_index]) betas_mirrored_test = np.array([0, 0.5, 1, 0.3, 0.7, 0.5, 0.5, 0.3, 0.3, 0.8, 0.8, 0.4, 0.4], dtype=np.float64) self.assertTrue(check_max_variation_array(betas_mirrored, betas_mirrored_test, 1e-15))
def delegate(self,gridPassPhrase='',verbose=False): """method to upload a valid proxy credential in a myproxy server. The proxy must to have voms attributes. Only the pilot owner will be allowed to retrieve the proxy, specified by -Z option. The DN of the user is used as username. """ if self.servername == '' : raise MyProxyError(2100) if self.vomsattributes == '' : raise MyProxyError(2101) if self.pilotownerDN == '' : raise MyProxyError(2103) cmd = 'myproxy-init' # credname credname = re.sub('-','',MiscUtils.wrappedUuidGen()) print "=== upload proxy for glexec" # command options cmd += ' -s %s' % self.servername # myproxy sever name cmd += " -x -Z '%s'" % self.pilotownerDN # only user with this DN # is allowed to retrieve it cmd += ' --voms %s' % self.vomsattributes # voms attributes cmd += ' -d' # uses DN as username cmd += ' --credname %s' % credname if gridPassPhrase == '': if sys.stdin.isatty(): gridPassPhrase = getpass.getpass('Enter GRID pass phrase:') else: sys.stdout.write('Enter GRID pass phrase:') sys.stdout.flush() gridPassPhrase = sys.stdin.readline().rstrip() print gridPassPhrase = gridPassPhrase.replace('$','\$') cmd = 'echo "%s" | %s -S' % (gridPassPhrase,cmd) cmd = self.srcFile + ' unset GT_PROXY_MODE; ' + cmd self.__command = cmd # for testing purpose if verbose: cmdStr = cmd if gridPassPhrase != '': cmdStr = re.sub(gridPassPhrase,'*****',cmd) print cmdStr status,out = commands.getstatusoutput( cmd ) if verbose: print out if status != 0: if out.find('Warning: your certificate and proxy will expire') == -1: if not verbose: print out raise MyProxyError(2300) return credname
def join(argument,gui): success = 0 if (" " in argument[1]): argumentString2 = MiscUtils.messageSplit(argument[1]) if MiscUtils.noDuplicate(gui.windowList,argumentString2[0]): success = 1 arguments = 2 else: if MiscUtils.noDuplicate(gui.windowList,argument[1]): success = 1 arguments = 1 if success == 1: if (arguments == 2): msg_temp = argument[0] + " " + argument[0]+ " " + argument[1]+'\n' else: msg_temp = argument[1] + " " + argument[0]+ " " + argument[1]+'\n' sendToServer(gui,msg_temp) else: TabUtils.writeMessage(gui,"You are already a member of that room!") gui.message.delete(0,END)
def leave(argument,gui): if argument[1] == "global": TabUtils.writeMessage("You cannot leave global!") gui.message.delete(0,END) else: if(not MiscUtils.noDuplicate(gui.windowList,argument[1])): TabUtils.deleteTab(gui,argument[1]) gui.windowList.pop(argument[1],None) gui.menues.updateRoomList(gui.windowList) msg_temp ="global" + " " + argument[0] + " " +argument[1] +'\n' sendToServer(gui,msg_temp) else: gui.writeMessage("You are not a member of the room " + argument[1] +"!") gui.message.delete(0,END)
def run_management_script_update(self): print(Constants.header + " " + Constants.management_script_upgrade) os.system("git config --global credential.helper \"cache --timeout=3600\"") os.system("cd " + MiscUtils.get_python_dir() + " && sudo git reset --hard HEAD") os.system("cd " + MiscUtils.get_python_dir() + " && sudo git clean -f") os.system("cd " + MiscUtils.get_python_dir() + " && sudo git pull") print(Constants.header + Constants.done) MiscUtils.write_to_log(Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.management_script_upgrade_log)
def add_user(self, username, email, isStaff, isSuperUser): cmd = "sudo /edx/bin/python.edxapp /edx/bin/manage.edxapp lms manage " + username + " " + email if isStaff: cmd = cmd + " --staff" if isSuperUser: cmd = cmd + " --superuser" cmd = cmd + " --settings=aws" if MiscUtils.isValidEmail(email): os.system(cmd) else: print(Constants.openedx_user_invalidemail)
def propose_new_beta(self, mutation_matrix, mask_matrix, log_scores, best_tree_log_score, jump_stdev, beta_params): """Construct a beta move in Metropolis-Hastings algorithm and accept or reject it. Args: mutation_matrix (np.ndarray): Mutation matrix (D matrix). mask_matrix (np.ndarray): Matrix of 'count of probabilities' of type P(D_ij = x|E_ij = 0) and P(D_ij = x|E_ij = 1) from which fast_matrix can be obtained if log scores matrix is known. log_scores (np.ndarray): Log scores matrix corresponding to error probabilities. best_tree_log_score (float): Best tree log score so far. jump_stdev (float): Beta jump normal random variable standard deviation. beta_params (list): Beta distribution parameters for beta. Returns: np.ndarray: Log scores matrix corresponding to error probabilities. """ # Find new beta proposed_beta = self.beta + np.random.normal(0, jump_stdev) # Mirror value if not on interval [0, 1] proposed_beta = MiscUtils.get_mirrored_beta(proposed_beta) # Calculate score of the proposed beta proposed_beta_log_score = ScoreUtils.get_beta_score( proposed_beta, beta_params) # Update log scores matrix proposed_log_scores = ScoreUtils.get_updated_log_scores_matrix( log_scores, proposed_beta) # Calculate score of the mutation tree with the new beta proposed_tree_log_score, proposed_fast_matrix = ScoreUtils.get_fast_tree_score( mutation_matrix, mask_matrix, self.parent_vector, self.dft, proposed_log_scores, best_tree_log_score) # Accept move if ScoreUtils.check_accept_move( proposed_beta_log_score + proposed_tree_log_score, self.combined_log_score): self.update_beta(proposed_beta, proposed_beta_log_score, proposed_tree_log_score, proposed_fast_matrix) return proposed_log_scores # Reject move return log_scores
def run_db_backup(self): print(Constants.header + " " + Constants.backup_log) if self.__hasMaintenanceStarted__ is True: if MiscUtils.check_for_backup_dir(): cur_logtime = MiscUtils.get_logtime_format() os.system("mongodump -h 127.0.0.1:" + Constants.mongodb_port + " -o " + MiscUtils.get_backup_dir() + cur_logtime + "_mongo-backup") os.system("mongodump -h 127.0.0.1:" + Constants.mongodb_port + " -o " + Constants.edx_backup_loc) os.system("mysqldump -u " + Constants.mysql_db_backup_user + " -p --all-databases > " + MiscUtils.get_backup_dir() + cur_logtime + "_mongo-backup/" + cur_logtime + "_mysql-backup.sql") shutil.make_archive(cur_logtime, "zip", MiscUtils.get_backup_dir() + cur_logtime + "_mongo-backup") os.system("sudo rm -rf " + MiscUtils.get_backup_dir() + cur_logtime + "_mongo-backup") os.system("sudo cp " + cur_logtime + ".zip " + MiscUtils.get_backup_dir() + "db/") os.system("sudo rm " + cur_logtime + ".zip") print(Constants.header + " " + Constants.backup_log_complete + MiscUtils.get_backup_dir()) else: print(Constants.maintenance_error)
def run_mark_packages(self): if self.do_mark_packages: print(Constants.header + Constants.system_updates_mark_package) for item in self.packages: os.system("sudo apt-mark hold " + item) print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.system_updates_mark_package_log) else: print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.system_updates_mark_package_log_unsuccessful)
def run_upgrades(self): if self.do_upgrade: print(Constants.header + Constants.system_updates_run_upgrades) os.system( "sudo apt-get update -y && apt-get upgrade -y && apt-get autoremove" ) print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.system_updates_run_upgrades_successful) else: print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.system_updates_run_upgrades_unsuccessful)
def __init__(self, archive, nov_estimator, mutator, problem, selector, n_pop, n_offspring, agent_factory, visualise_bds_flag, map_type="scoop", logs_root="/tmp/ns_log/", compute_parent_child_stats=False): """ archive Archive object implementing the Archive interface. Can be None if novelty is LearnedNovelty1d/LearnedNovelty2d nov_estimator NoveltyEstimator object implementing the NoveltyEstimator interface. problem Problem object that provides - __call__ function taking individual_index returning (fitness, behavior_descriptors, task_solved_or_not) - a dist_thresh (that is determined from its bds) which specifies the minimum distance that should separate a point x from its nearest neighbour in the archive+pop in order for the point to be considered as novel. It is also used as a threshold on novelty when updating the archive. - optionally, a visualise_bds function. mutator Mutator selector function n_pop int n_offspring int agent_factory function visualise_bds_flag int map_type string different options for sequential/parallel mapping functions. supported values currently are "scoop" distributed map from futures.map "std" buildin python map logs_root str the logs diretory will be created inside logs_root """ self.archive = archive if archive is not None: self.archive.reset() self.nov_estimator = nov_estimator self.problem = problem self.map_type = map_type self._map = futures.map if map_type == "scoop" else map print( colored("[NS info] Using map_type " + map_type, "green", attrs=["bold"])) self.mutator = mutator self.selector = selector self.n_offspring = n_offspring self.agent_factory = agent_factory initial_pop = [self.agent_factory() for i in range(n_pop)] initial_pop = self.generate_new_agents(initial_pop, generation=0) self._initial_pop = copy.deepcopy(initial_pop) assert n_offspring >= len( initial_pop), "n_offspring should be larger or equal to n_pop" self.visualise_bds_flag = visualise_bds_flag if os.path.isdir(logs_root): self.logs_root = logs_root self.log_dir_path = MiscUtils.create_directory_with_pid( dir_basename=logs_root + "/NS_log_", remove_if_exists=True, no_pid=False) print( colored("[NS info] NS log directory was created: " + self.log_dir_path, "green", attrs=["bold"])) else: raise Exception( "Root dir for logs not found. Please ensure that it exists before launching the script." ) self.task_solvers = {} self.compute_parent_child_stats = compute_parent_child_stats self.save_archive_to_file = True self.disable_tqdm = False
def main(): parser = argparse.ArgumentParser(description='Novelty Search.') parser.add_argument('--config', type=str, help="yaml config file for ns", default="") args = parser.parse_args() if not len(args.config): raise Exception("You need to provide a yaml config file") if len(args.config): with open(args.config, "r") as fl: config = yaml.load(fl, Loader=yaml.FullLoader) if config["problem"]["name"] == "hardmaze": max_steps = config["problem"]["max_steps"] bd_type = config["problem"]["bd_type"] assets = config["problem"]["assets"] import HardMaze problem = HardMaze.HardMaze(bd_type=bd_type, max_steps=max_steps, assets=assets) elif config["problem"]["name"] == "large_ant_maze" or config[ "problem"]["name"] == "huge_ant_maze": max_steps = config["problem"]["max_steps"] bd_type = config["problem"]["bd_type"] assets = config["problem"]["assets"] pb_type = "huge" if config["problem"][ "name"] == "huge_ant_maze" else "large" import LargeAntMaze problem = LargeAntMaze.LargeAntMaze(pb_type=pb_type, bd_type=bd_type, max_steps=max_steps, assets=assets) else: raise NotImplementedError("Problem type") if config["novelty_estimator"]["type"] == "archive_based": nov_estimator = NoveltyEstimators.ArchiveBasedNoveltyEstimator( k=config["hyperparams"]["k"]) arch_types = {"list_based": Archives.ListArchive} arch = arch_types[config["archive"]["type"]]( max_size=config["archive"]["max_size"], growth_rate=config["archive"]["growth_rate"], growth_strategy=config["archive"]["growth_strategy"], removal_strategy=config["archive"]["removal_strategy"]) elif config["novelty_estimator"]["type"] == "learned": bd_dims = problem.get_bd_dims() embedding_dims = 2 * bd_dims nov_estimator = NoveltyEstimators.LearnedNovelty1d( in_dim=bd_dims, emb_dim=embedding_dims, pb_limits=problem.get_behavior_space_boundaries()) arch = None if config["selector"]["type"] == "elitist_with_thresh": selector = functools.partial( MiscUtils.selBest, k=config["hyperparams"]["population_size"]) elif config["selector"]["type"] == "roulette_with_thresh": roulette_msg = "Usage currently not supported: it ends up chosing the same element many times, this duplicates agent._ids etc" roulette_msg += " fixing this bug is not a priority since selBest with thresholding actually works well" raise Exception(roulette_msg) elif config["selector"]["type"] == "nsga2_with_thresh": selector = MiscUtils.NSGA2( k=config["hyperparams"]["population_size"]) elif config["selector"]["type"] == "elitist": selector = functools.partial( MiscUtils.selBest, k=config["hyperparams"]["population_size"], automatic_threshold=False) else: raise NotImplementedError("selector") in_dims = problem.dim_obs out_dims = problem.dim_act num_pop = config["hyperparams"]["population_size"] if config["population"]["individual_type"] == "simple_fw_fc": normalise_output_with = "" num_hidden = 3 hidden_dim = 10 if "large_ant_maze" == config["problem"]["name"]: normalise_output_with = "tanh" num_hidden = 4 hidden_dim = 10 def make_ag(): return Agents.SmallFC_FW( in_d=in_dims, out_d=out_dims, num_hidden=num_hidden, hidden_dim=hidden_dim, output_normalisation=normalise_output_with) elif config["population"]["individual_type"] == "agent1d": def make_ag(): return Agents.Agent1d(min(problem.env.phi_vals), max(problem.env.phi_vals)) mutator_type = config["mutator"]["type"] genotype_len = make_ag().get_genotype_len() if mutator_type == "gaussian_same": mutator_conf = config["mutator"]["gaussian_params"] mu, sigma, indpb = mutator_conf["mu"], mutator_conf[ "sigma"], mutator_conf["indpb"] mus = [mu] * genotype_len sigmas = [sigma] * genotype_len mutator = functools.partial(deap_tools.mutGaussian, mu=mus, sigma=sigmas, indpb=indpb) elif mutator_type == "poly_same": mutator_conf = config["mutator"]["poly_params"] eta, low, up, indpb = mutator_conf["eta"], mutator_conf[ "low"], mutator_conf["up"], mutator_conf["indpb"] if config["population"]["individual_type"] == "agent1d": dummy_ag = make_ag() low = dummy_ag.min_val up = dummy_ag.max_val mutator = functools.partial(deap_tools.mutPolynomialBounded, eta=eta, low=low, up=up, indpb=indpb) else: raise NotImplementedError("mutation type") map_t = "scoop" if config["use_scoop"] else "std" visualise_bds = config["visualise_bds"] ns = NoveltySearch( archive=arch, nov_estimator=nov_estimator, mutator=mutator, problem=problem, selector=selector, n_pop=num_pop, n_offspring=config["hyperparams"]["offspring_size"], agent_factory=make_ag, visualise_bds_flag=visualise_bds, map_type=map_t, logs_root=config["ns_log_root"], compute_parent_child_stats=config["compute_parent_child_stats"]) MiscUtils.bash_command( ["cp", args.config, ns.log_dir_path + "/config.yaml"]) stop_on_reaching_task = config["stop_when_task_solved"] nov_estimator.log_dir = ns.log_dir_path ns.disable_tqdm = config["disable_tqdm"] ns.save_archive_to_file = config["archive"]["save_to_file"] if ns.disable_tqdm: print( colored("[NS info] tqdm is disabled.", "magenta", attrs=["bold"])) final_pop, solutions = ns( iters=config["hyperparams"]["num_generations"], stop_on_reaching_task=stop_on_reaching_task, save_checkpoints=config["save_checkpoints"])
def __call__(self, iters, stop_on_reaching_task=True, reinit=False, save_checkpoints=0): """ iters int number of iterations """ print( f"Starting NS with pop_sz={len(self._initial_pop)}, offspring_sz={self.n_offspring}" ) print("Evaluation will take time.") if save_checkpoints: raise NotImplementedError( "checkpoint save/load not implemented yet") if reinit and self.archive is not None: self.archive.reset() parents = copy.deepcopy(self._initial_pop) self.eval_agents(parents) self.nov_estimator.update(archive=[], pop=parents) novs = self.nov_estimator() for ag_i in range(len(parents)): parents[ag_i]._nov = novs[ag_i] tqdm_gen = tqdm.trange(iters, desc='', leave=True, disable=self.disable_tqdm) for it in tqdm_gen: offsprings = self.generate_new_agents(parents, generation=it + 1) task_solvers, _ = self.eval_agents(offsprings) pop = parents + offsprings for x in pop: if x._age == -1: x._age = it + 1 - x._created_at_gen else: x._age += 1 self.nov_estimator.update(archive=self.archive, pop=pop) novs = self.nov_estimator() for ag_i in range(len(pop)): pop[ag_i]._nov = novs[ag_i] parents_next = self.selector(individuals=pop, fit_attr="_nov") if self.compute_parent_child_stats: for x in parents_next: if x._bd_dist_to_parent_bd == -1 and x._created_at_gen > 0: xp = next((s for s in pop if s._idx == x._parent_idx), None) if xp is None: raise Exception("this shouldn't happen") x._bd_dist_to_parent_bd = self.problem.bd_extractor.distance( x._behavior_descr, xp._behavior_descr) parents = parents_next if hasattr(self.nov_estimator, "train"): self.nov_estimator.train(parents) if self.archive is not None: self.archive.update(parents, offsprings, thresh=self.problem.dist_thresh, boundaries=[0, 600], knn_k=15) if self.save_archive_to_file: self.archive.dump(self.log_dir_path + f"/archive_{it}") self.visualise_bds(parents + [x for x in offsprings if x._solved_task]) MiscUtils.dump_pickle(self.log_dir_path + f"/population_gen_{it}", parents) if len(task_solvers): print( colored("[NS info] found task solvers (generation " + str(it) + ")", "magenta", attrs=["bold"])) self.task_solvers[it] = task_solvers if stop_on_reaching_task: break tqdm_gen.set_description( f"Generation {it}/{iters}, archive_size=={len(self.archive) if self.archive is not None else -1}" ) tqdm_gen.refresh() return parents, self.task_solvers
def printMessage(gui,message): argumentString = MiscUtils.messageSplit(message) gui.windowList[argumentString[0]].config(state=NORMAL) gui.windowList[argumentString[0]].insert(END,MiscUtils.GetTime() + argumentString[1]) gui.windowList[argumentString[0]].yview(END) gui.windowList[argumentString[0]].config(state=DISABLED)
def run_logs_backup(self): print(Constants.header + " " + Constants.logs_backup) cur_logtime = MiscUtils.get_logtime_format() shutil.make_archive("logs_backup_" + cur_logtime, "zip", Constants.logs_loc) os.system("mv logs_backup_" + cur_logtime + ".zip " + MiscUtils.get_backup_dir() + "logs/") MiscUtils.write_to_log(Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.logs_loc)
def run(self): os.system(self.__sv__ + " " + self.cmd) MiscUtils.write_to_log(Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.supervisor_log + " " + self.cmd)
def do_command(self): if self.command == "add_edx_superuser": if self.totalLen > 4: print(Constants.header + Constants.add_edx_user) self.add_user(self.args[3], self.args[4], True, True) print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.add_edx_user_log + " " + self.args[3] + " " + self.args[4]) elif self.command == "add_edx_user": print(Constants.header + Constants.add_edx_user) self.add_user(self.args[3], self.args[4], False, False) print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.add_edx_user_log + " " + self.args[3] + " " + self.args[4]) elif self.command == "add_edx_staff": print(Constants.header + Constants.add_edx_user) self.add_user(self.args[3], self.args[4], True, False) print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.add_edx_user_log + " " + self.args[3] + " " + self.args[4]) elif self.command == "delete_edx_user": print(Constants.header + Constants.del_edx_user) self.del_user(self.args[3], self.args[4]) print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.del_edx_user_log + " " + self.args[3] + " " + self.args[4]) elif self.command == "change_password": print(Constants.header + Constants.change_password_edx_user) self.change_password(self.args[3]) print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.change_password_edx_user_log + " " + self.args[3]) else: print(Constants.command_not_found)
def invited(gui,arguments): if(MiscUtils.noDuplicate(gui.windowList,arguments)): TabUtils.addTab(gui,arguments)
def compare_execution_time(min_d_e, max_d_e, pop_sz=50, offspring_sz=100, archive_size=8000,num_tests=50): global_time_networks = [] global_time_archive = [] bd_size_range = [2**x for x in range(min_d_e,max_d_e+1,1)] print(bd_size_range) for descr_dim in bd_size_range: num_gens = num_tests emb_dim = descr_dim * 2 pop_bds = torch.rand(offspring_sz + pop_sz, descr_dim) frozen = MiscUtils.SmallEncoder1d(descr_dim, emb_dim, num_hidden=3, non_lin="leaky_relu", use_bn=False) frozen.eval() learnt = MiscUtils.SmallEncoder1d(descr_dim, emb_dim, num_hidden=5, non_lin="leaky_relu", use_bn=False) time_hist = [] frozen.eval() optimizer = torch.optim.Adam( [x for x in learnt.parameters() if x.requires_grad], lr=1e-2) batch_sz = 256 for i in range(num_gens): t1_n = time.time() learnt.eval() for batch_i in range(0, pop_bds.shape[0], batch_sz): batch = pop_bds[batch_i:batch_i + batch_sz] with torch.no_grad(): e_frozen = frozen(batch) e_pred = learnt(batch) nov = (e_pred - e_frozen).norm(dim=1) learnt.train() #this is how training is done, note that we can further reduce runtime by removing the extra frozen forward passes that we've made before when computing novelty for _ in range(5):#In the experiments presented in the paper, usually this was set to 3 so BR-NS in the paper should be slightly faster for batch_i in range(0, pop_bds.shape[0], batch_sz): batch = pop_bds[batch_i:batch_i + batch_sz] with torch.no_grad(): e_frozen = frozen(batch) optimizer.zero_grad() e_l = learnt(batch) loss = (e_l - e_frozen).norm()**2 loss /= batch_sz loss.backward() optimizer.step() t2_n = time.time() time_hist.append(t2_n - t1_n) mean_t_nets = np.array(time_hist).mean() print(descr_dim, mean_t_nets) global_time_networks.append(mean_t_nets) if 1: knn_k = 15 kdt_bds = np.random.rand(archive_size, descr_dim) times = [] for i in range(num_gens): t1_a = time.time() #note that the kdtree has to be created everytime as after adding elements, you can't just reuse the same kdtree (some cells might have become much more dense) kdt = KDTree(kdt_bds, leaf_size=20, metric='euclidean') dists, ids = kdt.query(pop_bds, knn_k, return_distance=True) t2_a = time.time() times.append(t2_a - t1_a) mean_t_arch = np.array(times).mean() global_time_archive.append(mean_t_arch) print(descr_dim, mean_t_arch) if 1: gt_arc_ms = [x * 1000 for x in global_time_archive] gt_net_ms = [x * 1000 for x in global_time_networks] plt.plot(bd_size_range, gt_arc_ms, "r", label=f"Archive-based NS (size=={archive_size})", linewidth=5) plt.plot(bd_size_range, gt_net_ms, "b", label="BR-NS", linewidth=5) plt.grid("on") plt.legend(fontsize=28) plt.xlabel("behavior descriptor dimensionality", fontsize=28) plt.ylabel("time (ms)", fontsize=28) plt.xticks(fontsize=28) plt.yticks(fontsize=28) plt.xlim(0, 2**max_d_e) plt.show()
def end_maintenance(self): if self.__hasMaintenanceStarted__ is True: MiscUtils.write_to_log(Constants.maintenance_log, MiscUtils.get_current_user() + " has ended Maintenance") #TODO: Create Constant os.system("sudo service nginx start") self.__hasMaintenanceStarted__ = False
def username(gui,arguments): gui.configList["userName"] = arguments gui.menues.setName(arguments) MiscUtils.rename(gui)
def run_maintenance_script(self,user): #Use only if you do something special with Maintenance (though you shouldn't need to!) MiscUtils.write_to_log(Constants.mgmt_system_log, MiscUtils.get_current_user() + " is running User Maintenance script : " + user) #TODO: Create Constant su = shell_functions.Shell_Script(shell_functions.get_script_dir(), user + "-maintenance-script.sh") su.run_script()
def success(gui,arguments): command = MiscUtils.messageSplit(arguments) gui.rooms[command[0]] = [command[0],command[1]] TabUtils.addTab(gui,command[0])
def get_reports_dir(): thedir = os.path.join(get_data_dir(), 'reports') ut.createdir(thedir) assert os.path.exists(thedir) return thedir
def restart_service(self): print(Constants.header + self.service + " " + Constants.restart) if self.service == "forum": self.__openedx__forum__res() elif self.service == "mongodb" or self.service == "mongo" or self.service == "mongod": self.__mongodb__res() elif self.service == "ssh": self.__ssh__res() elif self.service == "nginx": self.__nginx__res() elif self.service == "denyhosts": self.__denyhosts__res() elif self.service == "mysql": self.__mysql__res() elif self.service == "nginx": self.__nginx__res() elif self.service == "all": self.__openedx__res() self.__openedx__analytics__res() self.__openedx__certs__res() self.__openedx__ecommerce__res() self.__openedx__ecomworker__res() self.__openedx__insights__res() self.__openedx__notifier_celery__res() self.__openedx__notifier_scheduler__res() self.__openedx__programs__res() self.__openedx__xqueue__res() self.__openedx__xqueue_consumer__res() self.__mongodb__res() self.__ssh__res() self.__nginx__res() self.__denyhosts__res() self.__mysql__res() self.__nginx__res() elif self.service == "edxapp": self.__openedx__edxapp__res() elif self.service == "analytics_api": self.__openedx__analytics__res() elif self.service == "certs": self.__openedx__certs__res() elif self.service == "ecommerce": self.__openedx__ecommerce__res() elif self.service == "ecommerce_workers": self.__openedx__ecomworker__res() elif self.service == "insights": self.__openedx__insights__res() elif self.service == "notifier-celery-workers": self.__openedx__notifier_celery__res() elif self.service == "notifier-scheduler": self.__openedx__notifier_scheduler__res() elif self.service == "programs": self.__openedx__programs__res() elif self.service == "xqueue": self.__openedx__xqueue__res() elif self.service == "xqueue_consumer": self.__openedx__xqueue_consumer__res() elif self.service == "rabbitmq": self.__rabbitmq_server__res() elif self.service == "edxapp-worker" or self.service == "edxapp-workers" or self.service == "edxapp_worker" or self.service == "edxapp_workers": self.__openedx__edxapp_worker__res() else: print(Constants.option_not_found) print(Constants.header + Constants.done) MiscUtils.write_to_log( Constants.mgmt_system_log, MiscUtils.get_current_user() + " " + Constants.restart_log + " " + self.service)