def get_status(self): """Get status""" if not self.type: return get_status('http://ws.audioscrobbler.com/1.0/user/%s/recenttracks.rss' % (self.name)) elif self.type == 1: return get_status('http://twitter.com/statuses/user_timeline/%s.rss' % (self.name)) else: return get_status('http://rss.juick.com/%s/blog' % (self.name))
def get_status(self): """Get status""" if not self.type: return get_status( 'http://ws.audioscrobbler.com/1.0/user/%s/recenttracks.rss' % (self.name)) elif self.type == 1: return get_status( 'http://twitter.com/statuses/user_timeline/%s.rss' % (self.name)) else: return get_status('http://rss.juick.com/%s/blog' % (self.name))
def main(demo_mode, real_engine, setter=None): start_sumo("cfg/freeway.sumo.cfg", False) plexe = Plexe() traci.addStepListener(plexe) step = 0 state_left = None state_right = None random.seed(1) while running(demo_mode, step, 6000): if demo_mode and step == 6000: start_sumo("cfg/freeway.sumo.cfg", True) step = 0 random.seed(1) traci.simulationStep() if step == 1: add_platooning_vehicle(plexe, "p0", 150, 0, 25, 5, real_engine) add_vehicle(plexe, "v0", 140, 1, 25, "passenger") add_vehicle(plexe, "v1", 250, 0, 20, "passenger2") traci.gui.trackVehicle("View #0", "p0") traci.gui.setZoom("View #0", 50000) plexe.set_active_controller("p0", ACC) plexe.set_cc_desired_speed("p0", 25) traci.vehicle.setSpeedMode("p0", 0) if step > 1: state = traci.vehicle.getLaneChangeState("p0", 1)[0] if state_left != state: state_left = state str_status = get_status(state) print("Step %d, vehicle p0. Lane change status (LEFT ): %s" % (step, str_status)) state = traci.vehicle.getLaneChangeState("p0", -1)[0] if state_right != state: state_right = state str_status = get_status(state) print("Step %d, vehicle p0. Lane change status (RIGHT): %s" % (step, str_status)) if real_engine and setter is not None: # if we are running with the dashboard, update its values tracked_id = traci.gui.getTrackedVehicle("View #0") if tracked_id != "": ed = plexe.get_engine_data(tracked_id) vd = plexe.get_vehicle_data(tracked_id) setter(ed[RPM], ed[GEAR], vd.speed, vd.acceleration) step += 1 traci.close()
def get_status(): ''' Returns either a status object or its text represenation when given a valid token string. curl localhost:8081:/status?token=4f7b38cf02f0ba5c38000000 curl 'localhost:8081:/status?token=4f7b38cf02f0ba5c38000000&human_readable=true' ''' token = utils.str_to_obj(request.query.get('token')) if not token: raise HTTPResponse('Please specify a valid token.\n', 400) # Output formatting status = utils.get_status(token, no_id=True) human_readable = request.query.get('human_readable') if status: if human_readable: # Return text depending on the deployment's status if status['Deployment finished']: return 'Deployment finished.\n' elif status['Error occured']: return 'Error occured during deployment.\n' else: try: return '%s.\n' % status['running step'] except KeyError: return 'Error occured before the beginning of deployment.\n' else: # Just return the whole status object return utils.stringify(status) else: raise HTTPResponse('No status found for this token\n', 404)
def add(host_id=None): db = getdb() data = {} if request.method == "POST": for item in request.form: if item == "submit": action = request.form[item] else: data[item] = request.form[item] data["status"] = get_status(data["inputIP"]) if host_id: if action == "delete": db.remove(eids=[host_id]) return redirect(url_for("index")) else: db.update(data, eids=[host_id]) else: host_id = db.insert(data) if action == "save": return redirect(url_for("add", host_id=host_id)) else: return redirect(url_for("index")) else: if host_id: data = db.get(eid=host_id) return Menu.render("add.html", data=data, host_id=host_id)
def case(case_num, n_count=10): status_dict = { 'case_num': [], 'case_date': [], 'form': [], 'status': [] } int_case_num = int(case_num[3:]) prefix = case_num[:3] try: passed_n_count = int(request.args['n_count']) n_count = min(passed_n_count, n_count) except: pass for cnum in range(int_case_num-n_count, int_case_num+n_count+1): cnum, cdate, form, status = get_status(case_num=prefix+str(cnum)) status_dict['case_num'].append(cnum) status_dict['case_date'].append(cdate) status_dict['form'].append(form) status_dict['status'].append(status) status_dict = zip(status_dict['case_num'], status_dict['case_date'], status_dict['form'], status_dict['status']) return render_template('index.html', status=status_dict)
def print_day(name, t_beg, t_end, minutes, data): """ Print the inline day :param name: :param t_beg: :param t_end: :param minutes: :param data: :return: ``` 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 20200407T || || || || || || || || || || || || || || || ``` """ my_date = datetime.strptime(name.split('.')[0], "%Y%m%d") day_week = calendar.day_name[my_date.weekday()][0] print(name.split('.')[0] + day_week, end=" ") total = 0 for h in range(t_beg, t_end): for m in range(0, 60, minutes): status, _, _ = get_status(h, m, data) print_minute(m, status) if status=="ACTIVE": total += minutes print(f" ", end="") # print(total) print(f" {time_from_mins(total)}") return total
def update_status(host_id=None): db = getdb() if host_id: hosts = [db.get(eid=host_id)] else: hosts = db.all() for host in hosts: status = get_status(host["inputIP"]) db.update({"status": status}, eids=[host.eid]) return redirect(url_for("index"))
def display_instances(request): # Get existing containers status = utils.get_status() keys = status[status.keys()[0]].keys() # Set variables for template context = {} context['keys'] = keys context['containers'] = status return render(request, 'manager/monitor_instances.html', context)
def report_results(self): logging.info("** Final results using Benders Decomposition **") status = self.model.getAttr(grb.GRB.Attr.Status) logging.info('Final problem status %s.', utils.get_status(status)) obj_val = self.model.getAttr(grb.GRB.Attr.ObjVal) logging.info("Objective value: %f.", obj_val) logging.info( "The facilities at the following locations should be built:") for var in self.model.getVars(): if utils.is_non_zero(var.x): facility_name = var.varName if facility_name: logging.info(f" {facility_name} ... {var.x}")
def get_status_and_title(link, x): # title title = utils.get_title(x, selectors) if title.text.find("shared a memory") != -1: x = x.find_element_by_xpath(selectors.get("title_element")) title = utils.get_title(x, selectors) status = utils.get_status(x, selectors) if title.text == driver.find_element_by_id( selectors.get("title_text")).text: if status == "": temp = utils.get_div_links(x, "img", selectors) if temp == "": # no image tag which means . it is not a life event link = utils.get_div_links(x, "a", selectors).get_attribute("href") post_type = "status update without text" else: post_type = "life event" link = utils.get_div_links(x, "a", selectors).get_attribute("href") status = utils.get_div_links(x, "a", selectors).text else: post_type = "status update" if utils.get_div_links(x, "a", selectors) != "": link = utils.get_div_links(x, "a", selectors).get_attribute("href") elif title.text.find(" shared ") != -1: x1, link = utils.get_title_links(title) post_type = "shared " + x1 elif title.text.find(" at ") != -1 or title.text.find(" in ") != -1: if title.text.find(" at ") != -1: x1, link = utils.get_title_links(title) post_type = "check in" elif title.text.find(" in ") != 1: status = utils.get_div_links(x, "a", selectors).text elif title.text.find(" added ") != -1 and title.text.find("photo") != -1: post_type = "added photo" link = utils.get_div_links(x, "a", selectors).get_attribute("href") elif title.text.find(" added ") != -1 and title.text.find("video") != -1: post_type = "added video" link = utils.get_div_links(x, "a", selectors).get_attribute("href") else: post_type = "others" if not isinstance(title, str): title = title.text status = status.replace("\n", " ") title = title.replace("\n", " ") return link, status, title, post_type
def test_vtgate(self): # do a few vtgate topology queries to prime the cache vtgate_client = zkocc.ZkOccConnection("localhost:%u" % vtgate_port, "test_nj", 30.0) vtgate_client.dial() vtgate_client.get_srv_keyspace_names("test_nj") vtgate_client.get_srv_keyspace("test_nj", "test_keyspace") vtgate_client.get_end_points("test_nj", "test_keyspace", "-80", "master") vtgate_client.close() status = utils.get_status(vtgate_port) self.assertIn('</html>', status) # end of page self.assertIn('/serving_graph/test_nj">test_nj', status) # vtctld link utils.pause("You can now run a browser and connect to http://localhost:%u%s to manually check vtgate status page" % (vtgate_port, environment.status_url))
def save_model(self, request, obj, form, change): ### additional helpers to speed up data entry obj.scmurl = utils.normalize_checkout_url( obj.scmurl) or utils.get_checkout_url(obj.website) obj.scmtype = utils.get_scm_type(obj.scmurl, obj.scmtype) obj.bugurl = bugs.normalize_bug_format_string( obj.bugurl) or bugs.get_bug_format_string(obj.website) obj.bugtype = bugs.get_bug_type(obj.bugurl, obj.bugtype) try: obj.changelog = obj.changelog or utils.get_changelog(obj.scmurl) except: pass ### end helpers # perform checks before moving to VERIFIED result = utils.test_if_package_verified(obj) # print any messages from the test for (msg_type, msg_text) in result['messages']: messages.add_message(request, msg_type, msg_text) obj.status = utils.get_status(result['scores'], utils.SCORES_PACKAGE_VERIFIED, STATUS_VERIFIED, STATUS_MODIFIED) if obj.status == STATUS_VERIFIED: messages.success(request, "All data successfully VERIFIED") elif obj.status == STATUS_MODIFIED: messages.warning(request, "Status is MODIFIED") obj.assigned_to = request.user.username if obj.status == STATUS_MODIFIED: obj.status = STATUS_ASSIGNED # NB: using this ugly .update() instead of save() because it allows for data partitioning # However object fields are set above to please the test_if_package_verified() function # so that it doesn't have to hit the DB again. Package.objects.filter(pk=obj.pk).update( website=obj.website, scmurl=obj.scmurl, scmtype=obj.scmtype, bugurl=obj.bugurl, bugtype=obj.bugtype, changelog=obj.changelog, subpackage_path=obj.subpackage_path, status=obj.status)
def save_model(self, request, obj, form, change): ### additional helpers to speed up data entry obj.scmurl = utils.normalize_checkout_url(obj.scmurl) or utils.get_checkout_url(obj.website) obj.scmtype = utils.get_scm_type(obj.scmurl, obj.scmtype) obj.bugurl = bugs.normalize_bug_format_string(obj.bugurl) or bugs.get_bug_format_string(obj.website) obj.bugtype = bugs.get_bug_type(obj.bugurl, obj.bugtype) try: obj.changelog = obj.changelog or utils.get_changelog(obj.scmurl) except: pass ### end helpers # perform checks before moving to VERIFIED result = utils.test_if_package_verified(obj) # print any messages from the test for (msg_type, msg_text) in result['messages']: messages.add_message(request, msg_type, msg_text) obj.status = utils.get_status(result['scores'], utils.SCORES_PACKAGE_VERIFIED, STATUS_VERIFIED, STATUS_MODIFIED) if obj.status == STATUS_VERIFIED: messages.success(request, "All data successfully VERIFIED") elif obj.status == STATUS_MODIFIED: messages.warning(request, "Status is MODIFIED") obj.assigned_to = request.user.username if obj.status == STATUS_MODIFIED: obj.status = STATUS_ASSIGNED # NB: using this ugly .update() instead of save() because it allows for data partitioning # However object fields are set above to please the test_if_package_verified() function # so that it doesn't have to hit the DB again. Package.objects.filter( pk=obj.pk ).update( website = obj.website, scmurl = obj.scmurl, scmtype = obj.scmtype, bugurl = obj.bugurl, bugtype = obj.bugtype, changelog = obj.changelog, subpackage_path = obj.subpackage_path, status = obj.status )
def __init__(self, env, obs_space, action_space, model_dir, ignoreLTL, progression_mode, gnn, recurrence=1, dumb_ac=False, device=None, argmax=False, num_envs=1): try: print(model_dir) status = utils.get_status(model_dir) except OSError: status = {"num_frames": 0, "update": 0} using_gnn = (gnn != "GRU" and gnn != "LSTM") obs_space, self.preprocess_obss = utils.get_obss_preprocessor( env, using_gnn, progression_mode) if "vocab" in status and self.preprocess_obss.vocab is not None: self.preprocess_obss.vocab.load_vocab(status["vocab"]) if recurrence > 1: self.acmodel = RecurrentACModel(env, obs_space, action_space, ignoreLTL, gnn, dumb_ac, True) self.memories = torch.zeros(num_envs, self.acmodel.memory_size, device=device) else: self.acmodel = ACModel(env, obs_space, action_space, ignoreLTL, gnn, dumb_ac, True) self.device = device self.argmax = argmax self.num_envs = num_envs self.acmodel.load_state_dict(utils.get_model_state(model_dir)) self.acmodel.to(self.device) self.acmodel.eval()
def get_group_post_as_line(post_id, photos_dir): try: data = driver.find_element_by_xpath(selectors.get("single_post")) time = utils.get_time(data) title = utils.get_title(data, selectors).text # link, status, title, type = get_status_and_title(title,data) link = utils.get_div_links(data, "a", selectors) if link != "": link = link.get_attribute("href") post_type = "" status = '"' + utils.get_status(data, selectors).replace("\r\n", " ") + '"' photos = utils.get_post_photos_links(data, selectors, photos_small_size) comments = get_comments() photos = image_downloader(photos, photos_dir) line = (str(time) + "||" + str(post_type) + "||" + str(title) + "||" + str(status) + "||" + str(link) + "||" + str(post_id) + "||" + str(photos) + "||" + str(comments) + "\n") return line except Exception: return ""
def save_model(self, request, obj, form, change): # flip ASSIGNED to MODIFIED after doing manual inspection # so that later we can change to VERIFIED if obj.status == STATUS_ASSIGNED: obj.status = STATUS_MODIFIED # perform checks before moving to VERIFIED result = utils.test_if_package_version_verified(obj) # print any messages from the test for (msg_type, msg_text) in result['messages']: messages.add_message(request, msg_type, msg_text) obj.status = utils.get_status(result['scores'], utils.SCORES_PACKAGE_VERSION_VERIFIED, STATUS_VERIFIED, STATUS_MODIFIED) if obj.status == STATUS_VERIFIED: messages.success(request, "All data successfully VERIFIED") elif obj.status == STATUS_MODIFIED: messages.warning(request, "Status is MODIFIED") obj.assigned_to = request.user.username # NB: For PVs ASSIGNED is used only to indicate that manual inspection is needed # not to indicate a user is working on this object record # if obj.status == STATUS_MODIFIED: # obj.status = STATUS_ASSIGNED # NB: using this ugly thing b/c test_if_package_version_verified needs an object # and we don't want to make it hit the DB again PackageVersion.objects.filter( pk=obj.pk ).update( status = obj.status, version = obj.version, scmid = obj.scmid, assigned_to = obj.assigned_to, released_on = obj.released_on, download_url = obj.download_url )
def __init__(self, env, obs_space, action_space, model_dir, device=None, argmax=False, num_envs=1, use_memory=False, use_text=False): obs_space, self.preprocess_obs_goals = utils.get_obs_goals_preprocessor( obs_space) self.acmodel = ACModel(obs_space, action_space, use_memory=use_memory, use_text=use_text) self.device = device self.argmax = argmax self.num_envs = num_envs status = utils.get_status(model_dir) self.goals = list(status['agent_goals'].values()) # for goal in self.goals: # goal = env.unwrapped.get_obs_render( goal, tile_size=32) # plt.imshow(goal) # plt.show() if self.acmodel.recurrent: self.memories = torch.zeros(self.num_envs, self.acmodel.memory_size, device=self.device) self.acmodel.load_state_dict(status["model_state"]) self.acmodel.to(self.device) self.acmodel.eval() if hasattr(self.preprocess_obs_goals, "vocab"): self.preprocess_obs_goals.vocab.load_vocab(status["vocab"])
def save_model(self, request, obj, form, change): # flip ASSIGNED to MODIFIED after doing manual inspection # so that later we can change to VERIFIED if obj.status == STATUS_ASSIGNED: obj.status = STATUS_MODIFIED # perform checks before moving to VERIFIED result = utils.test_if_package_version_verified(obj) # print any messages from the test for (msg_type, msg_text) in result['messages']: messages.add_message(request, msg_type, msg_text) obj.status = utils.get_status(result['scores'], utils.SCORES_PACKAGE_VERSION_VERIFIED, STATUS_VERIFIED, STATUS_MODIFIED) if obj.status == STATUS_VERIFIED: messages.success(request, "All data successfully VERIFIED") elif obj.status == STATUS_MODIFIED: messages.warning(request, "Status is MODIFIED") obj.assigned_to = request.user.username # NB: For PVs ASSIGNED is used only to indicate that manual inspection is needed # not to indicate a user is working on this object record # if obj.status == STATUS_MODIFIED: # obj.status = STATUS_ASSIGNED # NB: using this ugly thing b/c test_if_package_version_verified needs an object # and we don't want to make it hit the DB again PackageVersion.objects.filter(pk=obj.pk).update( status=obj.status, version=obj.version, scmid=obj.scmid, assigned_to=obj.assigned_to, released_on=obj.released_on, download_url=obj.download_url)
def calculate_day(data, minutes, t_beg, t_end, log_data=[]): """ This function computes the data used later on :param data: :param minutes: :param t_beg: :param t_end: :param log_data: :return: - day_data: data of the day (sleep/presnt) - hourly_data: the data for each hour (e.g. action a (sleep present)) - minute_data: each minut what was the action/status - time_spent: spent time per action """ present = 0 away = 0 # this is present+away+other total = 0 time_spent = dict(NOCAT=dict(minutes=0, detail="", index='-', away=0)) hourly_data = dict() start_time = None end_time = None # loop for all the time and minutes (in step of 60/delta minutes) i = 0 minute_data = dict() # if it has a - for h in range(t_beg, t_end): hourly_data[str(h)] = dict() # detail have hours on the left side. for m in range(0, 60, minutes): # find the status of that time and prints it. status, _, _ = get_status(h, m, data) # find what's the log, if any log, _, log_det = get_status(h, m, log_data, True) # in case it starts with -, we default set it to sleep if log.startswith('-'): # only when status exists, it may be NOCAT if status != "NOCAT": status = "SLEEP" log = log[1:] try: time_spent[log]['hidden']=True except: pass if log.startswith('+'): # only when status exists, it may be NOCAT # if status != "NOCAT": status = "ACTIVE" log = log[1:] # init time spent if does not exists if log not in time_spent: if log != "NOCAT": index = chr(i + 65) i += 1 time_spent[log] = dict( minutes=0, away=0, index=index, detail="") if status == "ACTIVE": if not start_time: start_time = f"{h}:{m}" end_time = f"{h}:{m}" present += minutes time_spent[log]['minutes'] = time_spent[log]['minutes'] + 1 if log_det not in time_spent[log]['detail']: time_spent[log]['detail'] += log_det try: hourly_data[str(h)][time_spent[log] ['index']]['active'] += 1 except: hourly_data[str(h)][time_spent[log]['index'] ] = dict(active=1, sleep=0) elif status == "SLEEP": # we keep track of the time of the task as well, maybe is a task away from the pc away += minutes time_spent[log]['away'] = time_spent[log]['away'] + 1 if log_det not in time_spent[log]['detail']: time_spent[log]['detail'] += log_det try: hourly_data[str(h)][time_spent[log]['index']]['sleep'] += 1 except: hourly_data[str(h)][time_spent[log]['index'] ] = dict(sleep=1, active=0) elif status != 'NOCAT': # any other category total += minutes k = "%s:%s" % (h, m) minute_data[k] = dict() minute_data[k]['status'] = status minute_data[k]['cat'] = log if log_det: minute_data[k]['detail'] = log_det active = away = 0 for status, item in time_spent.items(): active += item['minutes'] away += item.get('away', 0) day_data = dict(active=active, away=away, total=total + active + away, start_time=start_time, end_time=end_time) return day_data, hourly_data, minute_data, time_spent
turnInit = False # Render the game. # screen.fill(utils.BACKGROUND) if trump_card is not None: screen.blit(utils.load_trump_card(trump_card), utils.TRUMP_POSITION) if len(deck) > 0: screen.blit(utils.load_card_back(), utils.DECK_POSITION) if len(discard) > 0: screen.blit(utils.load_card(discard[len(discard) - 1]), utils.DISCARD_POSITION) if len(cardsInPlay) > 0: for index in range(0, len(cardsInPlay)): screen.blit(utils.load_card(cardsInPlay[index]), utils.get_play_position(index, len(cardsInPlay))) for index in range(0, len(players[0].hand)): screen.blit( utils.load_card(players[0].hand[index]), utils.get_card_position(index, len(players[0].hand), False)) for index in range(0, len(players[1].hand)): screen.blit(utils.load_card_back(), utils.get_card_position(index, len(players[1].hand), True)) if utils.get_status() is not None: screen.blit(utils.get_status_message(), utils.STATUS_POSITION) pygame.display.flip()
def get_status(self): return utils.get_status(self.port)
def getStatus(self): print("=== Getting Status ===") for address in self.__addresses: utils.get_status(address)
def test_status(self): try: port = self.env.tablet.port except AttributeError: port = self.env.vtoccport self.assertIn('</html>', utils.get_status(port))
def tuner(icm_lr, reward_weighting, normalise_rewards, args): import argparse import datetime import torch import torch_ac import tensorboardX import sys import numpy as np from model import ACModel from .a2c import A2CAlgo # from .ppo import PPOAlgo frames_to_visualise = 200 # Parse arguments args.mem = args.recurrence > 1 def make_exploration_heatmap(args, plot_title): import numpy as np import matplotlib.pyplot as plt visitation_counts = np.load( f"{args.model}_visitation_counts.npy", allow_pickle=True ) plot_title = str(np.count_nonzero(visitation_counts)) + args.model plt.imshow(np.log(visitation_counts)) plt.colorbar() plt.title(plot_title) plt.savefig(f"{plot_title}_visitation_counts.png") # Set run dir date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") default_model_name = f"{args.env}_{args.algo}_seed{args.seed}_{date}" model_name = args.model or default_model_name model_dir = utils.get_model_dir(model_name) # Load loggers and Tensorboard writer txt_logger = utils.get_txt_logger(model_dir) csv_file, csv_logger = utils.get_csv_logger(model_dir) tb_writer = tensorboardX.SummaryWriter(model_dir) # Log command and all script arguments txt_logger.info("{}\n".format(" ".join(sys.argv))) txt_logger.info("{}\n".format(args)) # Set seed for all randomness sources utils.seed(args.seed) # Set device device = "cpu" # torch.device("cuda" if torch.cuda.is_available() else "cpu") txt_logger.info(f"Device: {device}\n") # Load environments envs = [] for i in range(16): an_env = utils.make_env( args.env, int(args.frames_before_reset), int(args.environment_seed) ) envs.append(an_env) txt_logger.info("Environments loaded\n") # Load training status try: status = utils.get_status(model_dir) except OSError: status = {"num_frames": 0, "update": 0} txt_logger.info("Training status loaded\n") # Load observations preprocessor obs_space, preprocess_obss = utils.get_obss_preprocessor(envs[0].observation_space) if "vocab" in status: preprocess_obss.vocab.load_vocab(status["vocab"]) txt_logger.info("Observations preprocessor loaded") # Load model acmodel = ACModel(obs_space, envs[0].action_space, args.mem, args.text) if "model_state" in status: acmodel.load_state_dict(status["model_state"]) acmodel.to(device) txt_logger.info("Model loaded\n") txt_logger.info("{}\n".format(acmodel)) # Load algo # adapted from impact driven RL from .models import AutoencoderWithUncertainty autoencoder = AutoencoderWithUncertainty(observation_shape=(7, 7, 3)).to(device) autoencoder_opt = torch.optim.Adam( autoencoder.parameters(), lr=icm_lr, weight_decay=0 ) if args.algo == "a2c": algo = A2CAlgo( envs, acmodel, autoencoder, autoencoder_opt, args.uncertainty, args.noisy_tv, args.curiosity, args.randomise_env, args.uncertainty_budget, args.environment_seed, reward_weighting, normalise_rewards, args.frames_before_reset, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_alpha, args.optim_eps, preprocess_obss, None, args.random_action, ) elif args.algo == "ppo": algo = PPOAlgo( envs, acmodel, autoencoder, autoencoder_opt, args.uncertainty, args.noisy_tv, args.curiosity, args.randomise_env, args.uncertainty_budget, args.environment_seed, reward_weighting, normalise_rewards, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss, ) else: raise ValueError("Incorrect algorithm name: {}".format(args.algo)) if "optimizer_state" in status: algo.optimizer.load_state_dict(status["optimizer_state"]) txt_logger.info("Optimizer loaded\n") # Train model num_frames = status["num_frames"] update = status["update"] start_time = time.time() while num_frames < args.frames: # Update model parameters update_start_time = time.time() exps, logs1 = algo.collect_experiences() logs2 = algo.update_parameters(exps) logs = {**logs1, **logs2} update_end_time = time.time() num_frames += logs["num_frames"] update += 1 log_to_wandb(logs, start_time, update_start_time, update_end_time) # Print logs if update % args.log_interval == 0: fps = logs["num_frames"] / (update_end_time - update_start_time) duration = int(time.time() - start_time) return_per_episode = utils.synthesize(logs["return_per_episode"]) rreturn_per_episode = utils.synthesize(logs["reshaped_return_per_episode"]) num_frames_per_episode = utils.synthesize(logs["num_frames_per_episode"]) header = ["update", "frames", "FPS", "duration"] data = [update, num_frames, fps, duration] header += ["rreturn_" + key for key in rreturn_per_episode.keys()] data += rreturn_per_episode.values() header += ["num_frames_" + key for key in num_frames_per_episode.keys()] data += num_frames_per_episode.values() header += [ "intrinsic_rewards", "uncertainties", "novel_states_visited", "entropy", "value", "policy_loss", "value_loss", "grad_norm", ] data += [ logs["intrinsic_rewards"].mean().item(), logs["uncertainties"].mean().item(), logs["novel_states_visited"].mean().item(), logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"], ] txt_logger.info( "U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f}".format( *data ) ) # Save status if args.save_interval > 0 and update % args.save_interval == 0: status = { "num_frames": num_frames, "update": update, "model_state": acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict(), } if hasattr(preprocess_obss, "vocab"): status["vocab"] = preprocess_obss.vocab.vocab utils.save_status(status, model_dir) return
def test_status(self): port = self.env.port self.assertIn('</html>', utils.get_status(port))
def __get_run_status_private(self, lastest_job, job_dir, job_jss): if type(lastest_job) is str: if os.path.isfile(os.path.join(job_dir, "status/summary.txt")): return JobCenter.FIN elif lastest_job.strip() == "-1": return None elif job_jss is None: # the job was submitted directly pid = int(lastest_job) if pid < 0: return None else: try: tmp = utils.get_status(None, pid) return JobCenter.RUN except: errlog = glob.glob(os.path.join(job_dir, "*.err")) if len(errlog) == 0: return JobCenter.PRE else: with open(errlog[0]) as fp: for line in fp.readlines(): if "traceback" in line.lower(): return JobCenter.ERR elif "terminated" in line.lower(): return JobCenter.TER return JobCenter.TER else: # the job was submitted using jss pid = lastest_job try: stat = utils.get_status(job_jss, pid) return self.namespace['process_status'][stat] except: errlog = glob.glob(os.path.join(job_dir, "*.err")) if len(errlog) == 0: return JobCenter.PRE else: with open(errlog[0]) as fp: for line in fp.readlines(): if "traceback" in line.lower(): return JobCenter.ERR elif "terminated" in line.lower(): return JobCenter.TER return JobCenter.TER elif type(lastest_job) == subprocess.Popen: # directly submit recode = lastest_job.poll() if recode is None: return JobCenter.RUN elif recode < 0: return JobCenter.TER elif recode > 0: if recode >= 128: return JobCenter.TER else: return JobCenter.ERR else: return JobCenter.FIN else: return None
def main(raw_args=None): # Parse arguments parser = argparse.ArgumentParser() ## General parameters parser.add_argument("--algo", required=True, help="algorithm to use: a2c | ppo | ipo (REQUIRED)") parser.add_argument("--domain1", required=True, help="name of the first domain to train on (REQUIRED)") parser.add_argument( "--domain2", required=True, help="name of the second domain to train on (REQUIRED)") parser.add_argument( "--p1", required=True, type=float, help="Proportion of training environments from first domain (REQUIRED)" ) parser.add_argument("--model", required=True, help="name of the model") parser.add_argument("--seed", type=int, default=1, help="random seed (default: 1)") parser.add_argument("--log-interval", type=int, default=1, help="number of updates between two logs (default: 1)") parser.add_argument( "--save-interval", type=int, default=10, help= "number of updates between two saves (default: 10, 0 means no saving)") parser.add_argument("--procs", type=int, default=16, help="number of processes (default: 16)") parser.add_argument("--frames", type=int, default=10**7, help="number of frames of training (default: 1e7)") ## Parameters for main algorithm parser.add_argument("--epochs", type=int, default=4, help="number of epochs for PPO (default: 4)") parser.add_argument("--batch-size", type=int, default=256, help="batch size for PPO (default: 256)") parser.add_argument( "--frames-per-proc", type=int, default=None, help= "number of frames per process before update (default: 5 for A2C and 128 for PPO)" ) parser.add_argument("--discount", type=float, default=0.99, help="discount factor (default: 0.99)") parser.add_argument("--lr", type=float, default=0.001, help="learning rate (default: 0.001)") parser.add_argument( "--gae-lambda", type=float, default=0.95, help="lambda coefficient in GAE formula (default: 0.95, 1 means no gae)" ) parser.add_argument("--entropy-coef", type=float, default=0.01, help="entropy term coefficient (default: 0.01)") parser.add_argument("--value-loss-coef", type=float, default=0.5, help="value loss term coefficient (default: 0.5)") parser.add_argument("--max-grad-norm", type=float, default=0.5, help="maximum norm of gradient (default: 0.5)") parser.add_argument( "--optim-eps", type=float, default=1e-8, help="Adam and RMSprop optimizer epsilon (default: 1e-8)") parser.add_argument("--optim-alpha", type=float, default=0.99, help="RMSprop optimizer alpha (default: 0.99)") parser.add_argument("--clip-eps", type=float, default=0.2, help="clipping epsilon for PPO (default: 0.2)") parser.add_argument( "--recurrence", type=int, default=1, help= "number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory." ) parser.add_argument("--text", action="store_true", default=False, help="add a GRU to the model to handle text input") args = parser.parse_args(raw_args) args.mem = args.recurrence > 1 # Check PyTorch version if (torch.__version__ != '1.2.0'): raise ValueError( "PyTorch version must be 1.2.0 (see README). Your version is {}.". format(torch.__version__)) if args.mem: raise ValueError("Policies with memory not supported.") # Set run dir date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") default_model_name = args.model model_name = args.model or default_model_name model_dir = utils.get_model_dir(model_name) # Load loggers and Tensorboard writer txt_logger = utils.get_txt_logger(model_dir) csv_file, csv_logger = utils.get_csv_logger(model_dir) tb_writer = tensorboardX.SummaryWriter(model_dir) # Log command and all script arguments txt_logger.info("{}\n".format(" ".join(sys.argv))) txt_logger.info("{}\n".format(args)) # Set seed for all randomness sources torch.backends.cudnn.deterministic = True utils.seed(args.seed) # Set device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") txt_logger.info(f"Device: {device}\n") # Load environments from different domains domain1 = args.domain1 # e.g., 'MiniGrid-ColoredKeysRed-v0' domain2 = args.domain2 # e.g., 'MiniGrid-ColoredKeysYellow-v0' p1 = args.p1 # Proportion of environments from domain1 num_envs_total = args.procs # Total number of environments num_domain1 = math.ceil( p1 * num_envs_total) # Number of environments in domain1 num_domain2 = num_envs_total - num_domain1 # Number of environments in domain2 # Environments from domain1 envs1 = [] for i in range(num_domain1): envs1.append(utils.make_env(domain1, args.seed + 10000 * i)) # Environments from domain2 envs2 = [] for i in range(num_domain2): envs2.append(utils.make_env(domain2, args.seed + 10000 * i)) # All environments envs = envs1 + envs2 txt_logger.info("Environments loaded\n") # Load training status try: status = utils.get_status(model_dir) except OSError: status = {"num_frames": 0, "update": 0} txt_logger.info("Training status loaded\n") # Load observations preprocessor obs_space, preprocess_obss = utils.get_obss_preprocessor( envs[0].observation_space) if "vocab" in status: preprocess_obss.vocab.load_vocab(status["vocab"]) txt_logger.info("Observations preprocessor loaded") if args.algo == "ipo": # Load model for IPO game acmodel = ACModel_average(obs_space, envs[0].action_space, args.mem, args.text) if "model_state" in status: acmodel.load_state_dict(status["model_state"]) acmodel.to(device) txt_logger.info("Model loaded\n") txt_logger.info("{}\n".format(acmodel)) else: # Load model (for standard PPO or A2C) acmodel = ACModel(obs_space, envs[0].action_space, args.mem, args.text) if "model_state" in status: acmodel.load_state_dict(status["model_state"]) acmodel.to(device) txt_logger.info("Model loaded\n") txt_logger.info("{}\n".format(acmodel)) # Load algo if args.algo == "a2c": algo = torch_ac.A2CAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_alpha, args.optim_eps, preprocess_obss) if "optimizer_state" in status: algo.optimizer.load_state_dict(status["optimizer_state"]) txt_logger.info("Optimizer loaded\n") elif args.algo == "ppo": algo = torch_ac.PPOAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss) if "optimizer_state" in status: algo.optimizer.load_state_dict(status["optimizer_state"]) txt_logger.info("Optimizer loaded\n") elif args.algo == "ipo": # One algo per domain. These have different envivonments, but shared acmodel algo1 = torch_ac.IPOAlgo( envs1, acmodel, 1, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss) algo2 = torch_ac.IPOAlgo( envs2, acmodel, 2, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss) if "optimizer_state1" in status: algo1.optimizer.load_state_dict(status["optimizer_state1"]) txt_logger.info("Optimizer 1 loaded\n") if "optimizer_state2" in status: algo2.optimizer.load_state_dict(status["optimizer_state2"]) txt_logger.info("Optimizer 2 loaded\n") else: raise ValueError("Incorrect algorithm name: {}".format(args.algo)) # Train model num_frames = status["num_frames"] update = status["update"] start_time = time.time() while num_frames < args.frames: # Update model parameters update_start_time = time.time() if args.algo == "ipo": # Standard method # Collect experiences on first domain exps1, logs_exps1 = algo1.collect_experiences() # Update params of model corresponding to first domain logs_algo1 = algo1.update_parameters(exps1) # Collect experiences on second domain exps2, logs_exps2 = algo2.collect_experiences() # Update params of model corresponding to second domain logs_algo2 = algo2.update_parameters(exps2) # Update end time update_end_time = time.time() # Combine logs logs_exps = { 'return_per_episode': logs_exps1["return_per_episode"] + logs_exps2["return_per_episode"], 'reshaped_return_per_episode': logs_exps1["reshaped_return_per_episode"] + logs_exps2["reshaped_return_per_episode"], 'num_frames_per_episode': logs_exps1["num_frames_per_episode"] + logs_exps2["num_frames_per_episode"], 'num_frames': logs_exps1["num_frames"] + logs_exps2["num_frames"] } logs_algo = { 'entropy': (num_domain1 * logs_algo1["entropy"] + num_domain2 * logs_algo2["entropy"]) / num_envs_total, 'value': (num_domain1 * logs_algo1["value"] + num_domain2 * logs_algo2["value"]) / num_envs_total, 'policy_loss': (num_domain1 * logs_algo1["policy_loss"] + num_domain2 * logs_algo2["policy_loss"]) / num_envs_total, 'value_loss': (num_domain1 * logs_algo1["value_loss"] + num_domain2 * logs_algo2["value_loss"]) / num_envs_total, 'grad_norm': (num_domain1 * logs_algo1["grad_norm"] + num_domain2 * logs_algo2["grad_norm"]) / num_envs_total } logs = {**logs_exps, **logs_algo} num_frames += logs["num_frames"] else: exps, logs1 = algo.collect_experiences() logs2 = algo.update_parameters(exps) logs = {**logs1, **logs2} update_end_time = time.time() num_frames += logs["num_frames"] update += 1 # Print logs if update % args.log_interval == 0: fps = logs["num_frames"] / (update_end_time - update_start_time) duration = int(time.time() - start_time) return_per_episode = utils.synthesize(logs["return_per_episode"]) rreturn_per_episode = utils.synthesize( logs["reshaped_return_per_episode"]) num_frames_per_episode = utils.synthesize( logs["num_frames_per_episode"]) header = ["update", "frames", "FPS", "duration"] data = [update, num_frames, fps, duration] header += ["rreturn_" + key for key in rreturn_per_episode.keys()] data += rreturn_per_episode.values() header += [ "num_frames_" + key for key in num_frames_per_episode.keys() ] data += num_frames_per_episode.values() header += [ "entropy", "value", "policy_loss", "value_loss", "grad_norm" ] data += [ logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"] ] txt_logger.info( "U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f} | vL {:.3f} | ∇ {:.3f}" .format(*data)) header += ["return_" + key for key in return_per_episode.keys()] data += return_per_episode.values() # header += ["debug_last_env_reward"] # data += [logs["debug_last_env_reward"]] header += ["total_loss"] data += [ logs["policy_loss"] - args.entropy_coef * logs["entropy"] + args.value_loss_coef * logs["value_loss"] ] if status["num_frames"] == 0: csv_logger.writerow(header) csv_logger.writerow(data) csv_file.flush() for field, value in zip(header, data): tb_writer.add_scalar(field, value, num_frames) # Save status if args.save_interval > 0 and update % args.save_interval == 0: if args.algo == "ipo": status = { "num_frames": num_frames, "update": update, "model_state": acmodel.state_dict(), "optimizer_state1": algo1.optimizer.state_dict(), "optimizer_state2": algo2.optimizer.state_dict() } else: status = { "num_frames": num_frames, "update": update, "model_state": acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict() } if hasattr(preprocess_obss, "vocab"): status["vocab"] = preprocess_obss.vocab.vocab utils.save_status(status, model_dir) txt_logger.info("Status saved")
def main(): # Parse arguments parser = argparse.ArgumentParser() ## General parameters parser.add_argument( "--algo", required=True, help="algorithm to use: a2c | ppo | ppo_intrinsic (REQUIRED)") parser.add_argument("--env", required=True, help="name of the environment to train on (REQUIRED)") parser.add_argument( "--model", default=None, help="name of the model (default: {ENV}_{ALGO}_{TIME})") parser.add_argument("--seed", type=int, default=1, help="random seed (default: 1)") parser.add_argument("--log-interval", type=int, default=1, help="number of updates between two logs (default: 1)") parser.add_argument( "--save-interval", type=int, default=10, help= "number of updates between two saves (default: 10, 0 means no saving)") parser.add_argument("--procs", type=int, default=16, help="number of processes (default: 16)") parser.add_argument("--frames", type=int, default=10**7, help="number of frames of training (default: 1e7)") ## Parameters for main algorithm parser.add_argument("--epochs", type=int, default=4, help="number of epochs for PPO (default: 4)") parser.add_argument("--batch-size", type=int, default=256, help="batch size for PPO (default: 256)") parser.add_argument( "--frames-per-proc", type=int, default=None, help= "number of frames per process before update (default: 5 for A2C and 128 for PPO)" ) parser.add_argument("--discount", type=float, default=0.99, help="discount factor (default: 0.99)") parser.add_argument("--lr", type=float, default=0.001, help="learning rate (default: 0.001)") parser.add_argument( "--gae-lambda", type=float, default=0.95, help="lambda coefficient in GAE formula (default: 0.95, 1 means no gae)" ) parser.add_argument("--entropy-coef", type=float, default=0.01, help="entropy term coefficient (default: 0.01)") parser.add_argument("--value-loss-coef", type=float, default=0.5, help="value loss term coefficient (default: 0.5)") parser.add_argument("--max-grad-norm", type=float, default=0.5, help="maximum norm of gradient (default: 0.5)") parser.add_argument( "--optim-eps", type=float, default=1e-8, help="Adam and RMSprop optimizer epsilon (default: 1e-8)") parser.add_argument("--optim-alpha", type=float, default=0.99, help="RMSprop optimizer alpha (default: 0.99)") parser.add_argument("--clip-eps", type=float, default=0.2, help="clipping epsilon for PPO (default: 0.2)") parser.add_argument( "--recurrence", type=int, default=1, help= "number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory." ) parser.add_argument("--text", action="store_true", default=False, help="add a GRU to the model to handle text input") parser.add_argument("--visualize", default=False, help="show real time CNN layer weight changes") args = parser.parse_args() args.mem = args.recurrence > 1 # Set run dir date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") default_model_name = f"{args.env}_{args.algo}_seed{args.seed}_{date}" model_name = args.model or default_model_name model_dir = utils.get_model_dir(model_name) # Load loggers and Tensorboard writer txt_logger = utils.get_txt_logger(model_dir) csv_file, csv_logger = utils.get_csv_logger(model_dir) tb_writer = tensorboardX.SummaryWriter(model_dir) # Log command and all script arguments txt_logger.info("{}\n".format(" ".join(sys.argv))) txt_logger.info("{}\n".format(args)) # Set seed for all randomness sources utils.seed(args.seed) # Set device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") txt_logger.info(f"Device: {device}\n") # Load environments envs = [] for i in range(args.procs): envs.append(utils.make_env(args.env, args.seed + 10000 * i)) txt_logger.info("Environments loaded\n") # Load training status try: status = utils.get_status(model_dir) except OSError: status = {"num_frames": 0, "update": 0} txt_logger.info("Training status loaded\n") # Load observations preprocessor obs_space, preprocess_obss = utils.get_obss_preprocessor( envs[0].observation_space) if "vocab" in status: preprocess_obss.vocab.load_vocab(status["vocab"]) txt_logger.info("Observations preprocessor loaded") # Load model acmodel = ACModel(obs_space, envs[0].action_space, args.mem, args.text) if "model_state" in status: acmodel.load_state_dict(status["model_state"]) acmodel.to(device) txt_logger.info("Model loaded\n") txt_logger.info("{}\n".format(acmodel)) # Load algo if args.algo == "a2c": algo = torch_ac.A2CAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_alpha, args.optim_eps, preprocess_obss) elif args.algo == "ppo": algo = torch_ac.PPOAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss) elif args.algo == "ppo_intrinsic": algo = torch_ac.PPOAlgoIntrinsic( envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss) elif args.algo == "a2c_intrinsic": algo = torch_ac.A2CAlgoIntrinsic( envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda, args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence, args.optim_alpha, args.optim_eps, preprocess_obss) else: raise ValueError("Incorrect algorithm name: {}".format(args.algo)) if "optimizer_state" in status: algo.optimizer.load_state_dict(status["optimizer_state"]) txt_logger.info("Optimizer loaded\n") # Train model num_frames = status["num_frames"] update = status["update"] start_time = time.time() print_visual = args.visualize if print_visual: fig, axs = plt.subplots(1, 3) fig.suptitle('Convolution Layer Weights Normalized Difference') while num_frames < args.frames: # Store copies of s_t model params old_parameters = {} for name, param in acmodel.named_parameters(): old_parameters[name] = param.detach().numpy().copy() # Update model parameters update_start_time = time.time() exps, logs1 = algo.collect_experiences() logs2 = algo.update_parameters(exps) logs = {**logs1, **logs2} update_end_time = time.time() # Store copies of s_t+1 model params new_parameters = {} for name, param in acmodel.named_parameters(): new_parameters[name] = param.detach().numpy().copy() # Compute L2 Norm of model state differences # Print model weight change visualization for index in range(len(old_parameters.keys())): if index == 0 or index == 2 or index == 4: key = list(old_parameters.keys())[index] old_weights = old_parameters[key] new_weights = new_parameters[key] norm_diff = numpy.linalg.norm(new_weights - old_weights) diff_matrix = abs(new_weights - old_weights) diff_matrix[:, :, 0, 0] = normalize(diff_matrix[:, :, 0, 0], norm='max', axis=0) if print_visual: axs[int(index / 2)].imshow(diff_matrix[:, :, 0, 0], cmap='Greens', interpolation='nearest') # This allows the plots to update as the model trains if print_visual: plt.ion() plt.show() plt.pause(0.001) num_frames += logs["num_frames"] update += 1 # Print logs if update % args.log_interval == 0: fps = logs["num_frames"] / (update_end_time - update_start_time) duration = int(time.time() - start_time) return_per_episode = utils.synthesize(logs["return_per_episode"]) rreturn_per_episode = utils.synthesize( logs["reshaped_return_per_episode"]) num_frames_per_episode = utils.synthesize( logs["num_frames_per_episode"]) header = ["update", "frames", "FPS", "duration"] data = [update, num_frames, fps, duration] header += ["rreturn_" + key for key in rreturn_per_episode.keys()] data += rreturn_per_episode.values() header += [ "num_frames_" + key for key in num_frames_per_episode.keys() ] data += num_frames_per_episode.values() header += [ "entropy", "value", "policy_loss", "value_loss", "grad_norm" ] data += [ logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"] ] txt_logger.info( "U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f} | vL {:.3f} | ∇ {:.3f}" .format(*data)) header += ["return_" + key for key in return_per_episode.keys()] data += return_per_episode.values() if status["num_frames"] == 0: csv_logger.writerow(header) csv_logger.writerow(data) csv_file.flush() for field, value in zip(header, data): tb_writer.add_scalar(field, value, num_frames) # Save status if args.save_interval > 0 and update % args.save_interval == 0: status = { "num_frames": num_frames, "update": update, "model_state": acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict() } if hasattr(preprocess_obss, "vocab"): status["vocab"] = preprocess_obss.vocab.vocab utils.save_status(status, model_dir) txt_logger.info("Status saved")
def extract_and_write_posts(elements, filename): try: f = open(filename, "w", newline="\r\n") f.writelines( " TIME || TYPE || TITLE || STATUS || LINKS(Shared Posts/Shared Links etc) " + "\n" + "\n") for x in elements: try: title = " " status = " " link = "" time = " " # time time = utils.get_time(x) # title title = utils.get_title(x, selectors) if title.text.find("shared a memory") != -1: x = x.find_element_by_xpath(selectors.get("title_element")) title = utils.get_title(x, selectors) status = utils.get_status(x, selectors) if (title.text == driver.find_element_by_id( selectors.get("title_text")).text): if status == "": temp = utils.get_div_links(x, "img", selectors) if ( temp == "" ): # no image tag which means . it is not a life event link = utils.get_div_links( x, "a", selectors).get_attribute("href") type = "status update without text" else: type = "life event" link = utils.get_div_links( x, "a", selectors).get_attribute("href") status = utils.get_div_links(x, "a", selectors).text else: type = "status update" if utils.get_div_links(x, "a", selectors) != "": link = utils.get_div_links( x, "a", selectors).get_attribute("href") elif title.text.find(" shared ") != -1: x1, link = utils.get_title_links(title) type = "shared " + x1 elif title.text.find(" at ") != -1 or title.text.find( " in ") != -1: if title.text.find(" at ") != -1: x1, link = utils.get_title_links(title) type = "check in" elif title.text.find(" in ") != 1: status = utils.get_div_links(x, "a", selectors).text elif (title.text.find(" added ") != -1 and title.text.find("photo") != -1): type = "added photo" link = utils.get_div_links(x, "a", selectors).get_attribute("href") elif (title.text.find(" added ") != -1 and title.text.find("video") != -1): type = "added video" link = utils.get_div_links(x, "a", selectors).get_attribute("href") else: type = "others" if not isinstance(title, str): title = title.text status = status.replace("\n", " ") title = title.replace("\n", " ") line = (str(time) + " || " + str(type) + " || " + str(title) + " || " + str(status) + " || " + str(link) + "\n") try: f.writelines(line) except Exception: print("Posts: Could not map encoded characters") except Exception: pass f.close() except Exception: print("Exception (extract_and_write_posts)", "Status =", sys.exc_info()[0]) return
for i in range(args.procs): envs.append(utils.make_env(args.env, progression_mode, args.ltl_sampler, args.seed, args.int_reward, args.noLTL)) # Sync environments envs[0].reset() if isinstance(envs[0].env, LetterEnv): txt_logger.info("Using fixed maps.") for env in envs: env.env.map = envs[0].env.map txt_logger.info("Environments loaded\n") # Load training status try: status = utils.get_status(model_dir + "/train") except OSError: status = {"num_frames": 0, "update": 0} txt_logger.info("Training status loaded.\n") if pretrained_model_dir is not None: try: pretrained_status = utils.get_status(pretrained_model_dir) except: txt_logger.info("Failed to load pretrained model.\n") exit(1) # Load observations preprocessor using_gnn = (args.gnn != "GRU" and args.gnn != "LSTM") obs_space, preprocess_obss = utils.get_obss_preprocessor(envs[0], using_gnn, progression_mode) if "vocab" in status and preprocess_obss.vocab is not None:
args = argparser.parse_args() minutes = args.min or 5 detail = args.detail beg = args.begin or 7 end = args.end or 22 dh = args.detail_hour dc = args.detail_category daily_log = args.daily_log os.chdir(cfg['FOLDER']) if args.cron or args.croncheck: day = datetime.now() day = day.strftime('%Y%m%d') data = _load_file("%s.txt" % (day), today=True) log_data = _load_file("%s_log.txt" % (day), log=True) rightnow = datetime.now() log = get_status(rightnow.hour, rightnow.minute,log_data,True)[0] start = data[0]['time'] if args.cron: percent(start, log) if args.croncheck: # if log is missing if not log or log == "NOCAT": notify("Missing log", "add it") elif args.all: # if all, print all files it founds print_h_inline(minutes, t_beg=beg, t_end=end) total = 0 counted = 0
def test_status(self): self.assertIn('</html>', utils.get_status(vtgate_port))
def __init__(self, env, model_dir, model_type='PPO2', logger=None, argmax=False, use_memory=False, use_text=False, num_cpu=1, frames_per_proc=None, discount=0.99, lr=0.001, gae_lambda=0.95, entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=1, optim_eps=1e-8, optim_alpha=None, clip_eps=0.2, epochs=4, batch_size=256): """ Initialize the Agent object. This primarily includes storing of the configuration parameters, but there is some other logic for correctly initializing the agent. :param env: the environment for training :param model_dir: the save directory (appended with the goal_id in initialization) :param model_type: the type of model {'PPO2', 'A2C'} :param logger: existing text logger :param argmax: if we use determinsitic or probabilistic action selection :param use_memory: if we are using an LSTM :param use_text: if we are using NLP to parse the goal :param num_cpu: the number of parallel instances for training :param frames_per_proc: max time_steps per process (versus constant) :param discount: the discount factor (gamma) :param lr: the learning rate :param gae_lambda: the generalized advantage estimator lambda parameter (training smoothing parameter) :param entropy_coef: relative weight for entropy loss :param value_loss_coef: relative weight for value function loss :param max_grad_norm: max scaling factor for the gradient :param recurrence: number of recurrent steps :param optim_eps: minimum value to prevent numerical instability :param optim_alpha: RMSprop decay parameter (A2C only) :param clip_eps: clipping parameter for the advantage and value function (PPO2 only) :param epochs: number of epochs in the parameter update (PPO2 only) :param batch_size: number of samples for the parameter update (PPO2 only) """ if hasattr( env, 'goal' ) and env.goal: # if the environment has a goal, set the model_dir to the goal folder self.model_dir = model_dir + env.goal.goalId + '/' else: # otherwise just use the model_dir as is self.model_dir = model_dir # store all of the input parameters self.model_type = model_type self.num_cpu = num_cpu self.frames_per_proc = frames_per_proc self.discount = discount self.lr = lr self.gae_lambda = gae_lambda self.entropy_coef = entropy_coef self.value_loss_coef = value_loss_coef self.max_grad_norm = max_grad_norm self.recurrence = recurrence self.optim_eps = optim_eps self.optim_alpha = optim_alpha self.clip_eps = clip_eps self.epochs = epochs self.batch_size = batch_size # use the existing logger and create two new ones self.txt_logger = logger self.csv_file, self.csv_logger = utils.get_csv_logger(self.model_dir) self.tb_writer = tensorboardX.SummaryWriter(self.model_dir) self.set_env( env ) # set the environment to with some additional checks and init of training_envs self.algo = None # we don't initialize the algorithm until we call init_training_algo() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.txt_logger.info(f"Device: {device}\n") try: # if we have a saved model, load it self.status = utils.get_status(self.model_dir) except OSError: # otherwise initialize the status print('error loading saved model. initializing empty model...') self.status = {"num_frames": 0, "update": 0} if self.txt_logger: self.txt_logger.info("Training status loaded\n") if "vocab" in self.status: preprocess_obss.vocab.load_vocab(self.status["vocab"]) if self.txt_logger: self.txt_logger.info("Observations preprocessor loaded") # get the obs_space and the observation pre-processor # (for manipulating gym observations into a torch-friendly format) obs_space, self.preprocess_obss = utils.get_obss_preprocessor( self.env.observation_space) self.acmodel = ACModel(obs_space, self.env.action_space, use_memory=use_memory, use_text=use_text) self.device = device # store the device {'cpu', 'cuda:N'} self.argmax = argmax # if we are using greedy action selection # or are we using probabilistic action selection if self.acmodel.recurrent: # initialize the memories self.memories = torch.zeros(num_cpu, self.acmodel.memory_size, device=self.device) if "model_state" in self.status: # if we have a saved model ('model_state') in the status # load that into the initialized model self.acmodel.load_state_dict(self.status["model_state"]) self.acmodel.to( device) # make sure the model is located on the correct device self.txt_logger.info("Model loaded\n") self.txt_logger.info("{}\n".format(self.acmodel)) # some redundant code. uncomment if there are issues and delete after enough testing #if 'model_state' in self.status: # self.acmodel.load_state_dict(self.status['model_state']) #self.acmodel.to(self.device) self.acmodel.eval() if hasattr(self.preprocess_obss, "vocab"): self.preprocess_obss.vocab.load_vocab(utils.get_vocab(model_dir))
txt_logger.info(f"Device: {device}\n") # Load environments envs = [] for i in range(args.procs): envs.append(utils.make_env(args.env, args.obj_type, args.obj_color, seed=args.seed + 10000 * i)) txt_logger.info("Environments loaded\n") # Load training status try: if not args.load: status = {"num_frames": 0, "update": 0} else: status = utils.get_status(model_dir) txt_logger.info("Training status loaded\n") except OSError: status = {"num_frames": 0, "update": 0} # Load observations preprocessor obs_space, preprocess_obs_goals = utils.get_obs_goals_preprocessor(envs[0].observation_space) if "vocab" in status: preprocess_obs_goals.vocab.load_vocab(status["vocab"]) txt_logger.info("observations preprocessor loaded") # Load model acmodel = ACModel(obs_space, envs[0].action_space, args.mem, args.text) if "model_state" in status:
# Set device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") txt_logger.info(f"Device: {device}\n") # Load environments envs = [] for i in range(args.procs): envs.append(utils.make_env(args.env, args.seed + 10000 * i)) txt_logger.info("Environments loaded\n") # Load training status try: status = utils.get_status(model_dir, args.seed) except OSError: status = {"num_frames": 0, "update": 0} txt_logger.info("Training status loaded\n") # Load observations preprocessor obs_space, preprocess_obss = utils.get_obss_preprocessor( envs[0].observation_space) if "vocab" in status: preprocess_obss.vocab.load_vocab(status["vocab"]) txt_logger.info("Observations preprocessor loaded") # Load model if args.use_pnn == 'True':
def start(self): # temp advance_dirs = { 'Merged_vcf': '{analydir}/Advance/{newjob}/Merged_vcf', 'ACMG': '{analydir}/Advance/{newjob}/ACMG', 'FilterSV': '{analydir}/Advance/{newjob}/FilterSV', 'FilterCNV': '{analydir}/Advance/{newjob}/FilterCNV', 'Noncoding': '{analydir}/Advance/{newjob}/Noncoding', 'ModelF': '{analydir}/Advance/{newjob}/ModelF', 'Share': '{analydir}/Advance/{newjob}/Share', 'Denovo': '{analydir}/Advance/{newjob}/Denovo', 'Linkage': '{analydir}/Advance/{newjob}/Linkage', 'ROH': '{analydir}/Advance/{newjob}/ROH', 'Network': '{analydir}/Advance/{newjob}/Network', 'Pathway': '{analydir}/Advance/{newjob}/Pathway', 'PPI': '{analydir}/Advance/{newjob}/PPI', 'HLA': '{analydir}/Advance/{newjob}/HLA', 'SiteAS': '{analydir}/Advance/{newjob}/SiteAS', 'GeneAS': '{analydir}/Advance/{newjob}/GeneAS', 'IntegrateResult': '{analydir}/Advance/{newjob}/IntegrateResult', 'Disease': '{analydir}/Advance/{newjob}/Disease', 'BriefResults': '{analydir}/Advance/{newjob}/BriefResults', } for k, v in advance_dirs.iteritems(): self.args.update({k: v.format(**self.args)}) # print self.args['SiteAS'] # exit() # print self.analy_array print 'hello, {}'.format(self.username) # Require rawdata or not qc_status = utils.get_status('qc', self.startpoint, config.ANALYSIS_POINTS) mapping_status = utils.get_status('bwa_mem', self.startpoint, config.ANALYSIS_POINTS) print 'qc status:', qc_status print 'mapping status:', mapping_status ANALY_DICT = utils.get_analysis_dict(self.analy_array, config.ANALYSIS_CODE) self.args.update({'ANALY_DICT': ANALY_DICT}) # print ANALY_DICT.keys();exit() softwares = utils.get_softwares(self.analy_array, self.args['ANALY_DICT'], self.args, self.seqstrag) # pprint(softwares);exit() self.args.update({'softwares': softwares}) # check inputs self.queues = utils.check_queues(self.queues, self.username) self.args.update({'queues': self.queues}) # use sentieon specific queues if needed if 'sentieon' in softwares.values(): print 'add sentieon_queues' sentieon_queues = self.queues if config.CONFIG.has_option('resource', 'sentieon_queues'): sentieon_queues = config.CONFIG.get( 'resource', 'sentieon_queues').split(',') sentieon_queues = utils.check_queues(sentieon_queues, self.username) if not sentieon_queues: sentieon_queues = self.queues self.args.update({'sentieon_queues': sentieon_queues}) # print self.args['sentieon_queues'];exit() # print sentieon_queues;exit() utils.check_analy_array(self.seqstrag, self.analy_array, config.ANALYSIS_CODE) utils.check_files(self.pn, self.samp_info, self.samp_list) newTR = utils.check_target_region(config.CONFIG, self.seqstrag, self.refgenome, self.rawTR) self.args.update({'TR': newTR}) print 'analysis items:' for analysis_code in self.analy_array: print utils.color_text( '{:4} {}'.format(analysis_code, config.ANALYSIS_CODE[analysis_code][0]), 'yellow') # Analysis start point if self.startpoint: if self.startpoint in config.ANALYSIS_POINTS: print 'start point: {}'.format( utils.color_text(self.startpoint)) else: print '[error] invalid startpoint: {}'.format( utils.color_text(self.startpoint)) print 'maybe you want to choose: {}'.format( utils.color_text( process.extractOne(self.startpoint, config.ANALYSIS_POINTS.keys())[0], 'cyan')) print 'available startpoints are as follows:\n {}'.format( ' '.join(config.ANALYSIS_POINTS.keys())) exit(1) is_advance = max(self.analy_array) > 6.1 project = utils.Project(self.analydir, self.samp_info, self.samp_info_done, self.samp_list, self.qc_list, qc_status, mapping_status, is_advance) # Extract sample_info print 'extract sample informations...' fenqi, tissue, disease_name, sample_infos, sample_infos_all, sample_done = project.get_sample_infos( self.samp_list, self.samp_info, self.samp_info_done, is_advance) database = '{}/project/DisGeNet.json'.format( config.CONFIG.get('software', 'soft_dir')) disease_ids = utils.get_disease_id(disease_name, database) self.args.update({ 'disease_name': disease_name, 'disease_ids': disease_ids, }) sample_infos_waiting = { sampleid: infos for sampleid, infos in sample_infos.iteritems() if sampleid not in sample_done } self.args.update({'sample_infos_waiting': sample_infos_waiting}) # print sample_infos_waiting # exit() # print 'fenqi:', fenqi # print 'tissue:', tissue # exit() sample_lists = project.get_sample_lists # print sample_lists # print sample_infos.keys() # print sample_infos_all.keys() # for sample in sample_infos: # print sample, sample_infos[sample]['familyid'] # exit() if mapping_status == 'waiting': sample_lists = project.update_qc_list() print ' report number: {}'.format(utils.color_text(fenqi)) if disease_name: print ' disease name: {}'.format(utils.color_text(disease_name)) print ' disease id: {}'.format(utils.color_text(disease_ids)) if tissue: print ' tissue: {}'.format(utils.color_text(tissue)) print ' samples ({}): {}'.format( len(sample_infos), utils.color_text(sample_infos.keys())) if sample_done: print ' samples done({}): {}'.format( len(sample_done), utils.color_text(sample_done)) # Update qc_list and extract sample_list # print 'update qc_list...' # print json.dumps(sample_lists, indent=2) # set memory according seqstrag print 'set analysis memory...' if self.seqstrag == 'WGS': print 'upate memory for WGS...' for analysis, memory in config.ANALYSIS_MEM_WGS.items(): if analysis in config.ANALYSIS_POINTS: config.ANALYSIS_POINTS[analysis][0] = memory # exit() # =========================================================== # =========================================================== print '>>> pipeline start...' mutation_soft, sv_soft, cnv_soft, denovo_soft = [ softwares[each] for each in ('mutation', 'sv', 'cnv', 'denovo') ] print ' mutation_soft:{}, sv_soft:{}, cnv_soft:{}, denovo_soft:{}'.format( mutation_soft, sv_soft, cnv_soft, denovo_soft) # QC if ANALY_DICT['quality_control'] and qc_status == 'waiting': utils.print_color('> QC', 'white') QC(self.args, self.jobs, self.orders, sample_lists, config).start() # Mapping if ANALY_DICT['mapping']: utils.print_color('> Mapping', 'white') Mapping(self.args, self.jobs, self.orders, sample_lists, sample_infos, config, qc_status, mapping_status).start() # Mutation if ANALY_DICT['snpindel_call']: utils.print_color('> Mutation', 'white') Mutation(self.args, self.jobs, self.orders, sample_lists, sample_infos, config).start() # SV if ANALY_DICT['sv_call']: utils.print_color('> SV', 'white') SV(self.args, self.jobs, self.orders, sample_infos, config).start() # CNV if ANALY_DICT['cnv_call']: utils.print_color('> CNV', 'white') CNV(self.args, self.jobs, self.orders, sample_infos, config).start() # FilterDB if ANALY_DICT['filter']: utils.print_color('> FilterDB', 'white') FilterDB(self.args, self.jobs, self.orders, mutation_soft, sv_soft, cnv_soft, sample_infos, config, disease_name, tissue, ANALY_DICT).start() # ModelF if ANALY_DICT['filter_model']: utils.print_color('> Model', 'white') FilterModel(self.args, self.jobs, self.orders, mutation_soft, sv_soft, cnv_soft, sample_infos, config).start() # Denovo if ANALY_DICT['denovo']: utils.print_color('> Denovo', 'white') Denovo(self.args, self.jobs, self.orders, mutation_soft, sv_soft, cnv_soft, denovo_soft, sample_infos, config, ANALY_DICT).start() # Linkage if ANALY_DICT['linkage']: utils.print_color('> Linkage', 'white') Linkage(self.args, self.jobs, self.orders, mutation_soft, sv_soft, cnv_soft, denovo_soft, sample_infos_all, config, ANALY_DICT).start() # IntegrateResult if any(ANALY_DICT[analysis] for analysis in ['filter', 'filter_model', 'denovo', 'phenolyzer']): utils.print_color('> IntegrateResult', 'white') IntegrateResult(self.args, self.jobs, self.orders, config).start() # ROH if ANALY_DICT['roh']: utils.print_color('> ROH', 'white') ROH(self.args, self.jobs, self.orders, sample_infos, mutation_soft, config).start() # OTHER other = Other(self.args, self.jobs, self.orders, config, disease_name) # IBD if any(ANALY_DICT[each] for each in ['filter_model', 'linkage', 'denovo' ]) and len(sample_infos_waiting) > 1: utils.print_color('> IBD', 'white') other.ibd() # Network if ANALY_DICT['phenolyzer']: utils.print_color('> Phenolyzer', 'white') other.phenolyzer() # Pathway if ANALY_DICT['pathway']: utils.print_color('> Pathway', 'white') other.pathway() # PPI if ANALY_DICT['ppi']: utils.print_color('> PPI', 'white') other.ppi() # SiteAS if ANALY_DICT['site_association']: utils.print_color('> SiteAS', 'white') Association(self.args, self.jobs, self.orders, config).site_association() # GeneAS if ANALY_DICT['gene_association']: utils.print_color('> GeneAS', 'white') Association(self.args, self.jobs, self.orders, config).gene_association() # HLA if ANALY_DICT['hla']: utils.print_color('> HLA', 'white') HLA(self.args, self.jobs, self.orders, sample_lists, sample_infos, config, qc_status).start() # result and report utils.print_color('> Result', 'white') Result(self.args, self.jobs, self.orders, config).start() utils.print_color('> Report', 'white') Report(self.args, self.jobs, self.orders, config).start() # job summary print 'lenght of jobs waiting/total: {}/{}'.format( len([job for job in self.jobs if job.get('status') == 'waiting']), len(self.jobs)) utils.write_job(self.analydir, self.newjob, self.jobs, self.orders) print '{:-^80}'.format(' all done ')