def header(self, message = logger.colors.fg.pink + logger.colors.bold + 'Onionr' + logger.colors.reset + logger.colors.fg.pink + ' has started.'): if os.path.exists('static-data/header.txt') and logger.get_level() <= logger.LEVEL_INFO: with open('static-data/header.txt', 'rb') as file: # only to stdout, not file or log or anything sys.stderr.write(file.read().decode().replace('P', logger.colors.fg.pink).replace('W', logger.colors.reset + logger.colors.bold).replace('G', logger.colors.fg.green).replace('\n', logger.colors.reset + '\n').replace('B', logger.colors.bold).replace('A', '%s' % API_VERSION).replace('V', ONIONR_VERSION)) if not message is None: logger.info(logger.colors.fg.lightgreen + '-> ' + str(message) + logger.colors.reset + logger.colors.fg.lightgreen + ' <-\n', sensitive=True)
def args_parse(): p = OptionParser() p.add_option('-e', '--expr', dest='expr', action='append', type='string', help='specify experssion') p.add_option('-v', '--verbose', dest='verbose', default='2', help='specify verbose level') p.add_option('-o', '--out_dir', dest='out_dir', default='logs', type='string', help='specify output directory') p.add_option('-l', '--logfile', dest='logfile', default='dvpy_master.log', type='string', help='specify log filename') p.add_option('-m', '--max_agents', dest='max_agents', default=1, type='int', help='specify max agent number') p.add_option('-s', '--suite', dest='suite', action='callback', type='string', callback=add_suite, help='specify suite') p.add_option('-t', '--test', action='callback', dest='test', type='string', callback=add_test, help='specify test') p.add_option('-w', '--where', action='store', dest='where', type='string', help='specify test selector') p.add_option('-a', '--action', action='append', dest='action', type='string', help='specify action') p.add_option('-f', '--patchfile', action='append', dest='patchfile', type='string', help='specify patch file') p.add_option('-g', '--gcf', dest='gcf', type='string', default='local', help='specify gcf') (opts, args) = p.parse_args() logger.setLevel(get_level(opts.verbose)) master_log = path.abspath(path.join(opts.out_dir, opts.logfile)) try: makedirs(path.dirname(master_log)) except Exception as e: pass if path.exists(master_log): rename(master_log, (master_log+'.bak')) fh = logging.FileHandler(master_log) fh.setFormatter(logging.Formatter(FORMAT)) fh.setLevel(get_level(opts.verbose)) logger.addHandler(fh) return (opts, args)
def main(): global host, port, agent_id, out_dir p = OptionParser() p.add_option('-m', '--host', dest='host', help='specify host name') p.add_option('-p', '--port', dest='port', help='specify port') p.add_option('-i', '--id', dest='id', help='specify id name') p.add_option('-o', '--out_dir', dest='out_dir', help='specify output directory') p.add_option('-v', '--verbose', dest='verbose', default='1', help='specify verbose level') (options, args) = p.parse_args() host = options.host port = int(options.port) agent_id = options.id out_dir = path.abspath(options.out_dir) logger.setLevel(get_level(options.verbose)) agent_log = path.abspath(path.join(out_dir, 'agents', agent_id)) try: os.makedirs(path.dirname(agent_log)) except Exception as e: pass if path.exists(agent_log): rename(agent_log, (agent_log + '.bak')) fh = logging.FileHandler(agent_log) fh.setFormatter(logging.Formatter(FORMAT)) fh.setLevel(get_level(options.verbose)) logger.addHandler(fh) run()
def start_agent_server(_out_q, _in_q, out_dir, verbose): global in_q, out_q in_q = _in_q out_q = _out_q logger.setLevel(get_level(verbose)) server_log = path.abspath(path.join(out_dir, 'agent_server.log')) try: os.makedirs(path.dirname(server_log)) except Exception as e: pass if path.exists(server_log): os.rename(server_log, (server_log+'.bak')) fh = logging.FileHandler(server_log) fh.setFormatter(logging.Formatter(FORMAT)) fh.setLevel(get_level(verbose)) logger.addHandler(fh) loop = asyncio.get_event_loop() server_host = socket.gethostname() server_ip = socket.gethostbyname(server_host) coro = loop.create_server(AgentServerProtocal, server_ip) server = loop.run_until_complete(coro) _, server_port = server.sockets[0].getsockname() logger.info('agent server started on {}:{}'.format(server_host, server_port)) out_q.put((server_host, server_port)) loop.run_forever() server.close() loop.run_until_complete(server.wait_closed()) loop.close()
def run(): # configure logging logger_name = "webserver" web_logger = logging.getLogger('werkzeug') web_logger.setLevel(logger.get_level( conf["logging"][logger_name]["level"])) web_logger.addHandler(logger.get_file_logger(logger_name)) # run the application log.info("Starting web server on port " + str(conf["gui"]["port"])) app.run(debug=True, use_reloader=conf["constants"]["web_use_reloader"], host='0.0.0.0', port=conf["gui"]["port"])
def main(): global host, port, agent_id, out_dir p = OptionParser() p.add_option('-m', '--host', dest='host', help='specify host name') p.add_option('-p', '--port', dest='port', help='specify port') p.add_option('-i', '--id', dest='id', help='specify id name') p.add_option('-o', '--out_dir', dest='out_dir', help='specify output directory') p.add_option('-v', '--verbose', dest='verbose', default='1', help='specify verbose level') (options, args) = p.parse_args() host = options.host port = int(options.port) agent_id = options.id out_dir = path.abspath(options.out_dir) logger.setLevel(get_level(options.verbose)) agent_log = path.abspath(path.join(out_dir, 'agents', agent_id)) try: os.makedirs(path.dirname(agent_log)) except Exception as e: pass if path.exists(agent_log): rename(agent_log, (agent_log+'.bak')) fh = logging.FileHandler(agent_log) fh.setFormatter(logging.Formatter(FORMAT)) fh.setLevel(get_level(options.verbose)) logger.addHandler(fh) run()
def parallel_collect_samples(env_pool, policy, num_samples): """ Collect trajectories in parallel using a pool of workers. Actions are computed using the provided policy. Collection will continue until at least num_samples trajectories are collected. It will exceed this amount by at most env_pool.n_envs. This means that some of the trajectories will not be executed until termination. These partial trajectories will have their "finished" entry set to False. When starting, it will first check if env_pool.last_obs is set, and if so, it will start from there rather than resetting all environments. This is useful for reusing the same episode. :param env_pool: An instance of EnvPool. :param policy: The policy used to select actions. :param num_samples: The minimum number of samples to collect. :return: """ trajs = [] partial_trajs = [None] * env_pool.n_envs num_collected = 0 if env_pool.last_obs is not None: obs = env_pool.last_obs else: obs = env_pool.reset() if logger.get_level() <= logger.INFO: progbar = tqdm(total=num_samples) else: progbar = None while num_collected < num_samples: actions, dists = policy.get_actions(obs) next_obs, rews, dones, infos = env_pool.step(actions) for idx in range(env_pool.n_envs): if partial_trajs[idx] is None: partial_trajs[idx] = dict( observations=[], actions=[], rewards=[], distributions=[], ) traj = partial_trajs[idx] traj["observations"].append(obs[idx]) traj["actions"].append(actions[idx]) traj["rewards"].append(rews[idx]) traj_dists = traj["distributions"] traj_dists.append({k: v[idx] for k, v in dists.items()}) if dones[idx]: trajs.append( dict( observations=np.asarray(traj["observations"]), actions=np.asarray(traj["actions"]), rewards=np.asarray(traj["rewards"]), distributions={ k: np.asarray([d[k] for d in traj_dists]) for k in traj_dists[0].keys() }, last_observation=infos[idx]["last_observation"], finished=True, )) partial_trajs[idx] = None obs = next_obs num_collected += env_pool.n_envs if progbar is not None: progbar.update(env_pool.n_envs) if progbar is not None: progbar.close() for idx in range(env_pool.n_envs): if partial_trajs[idx] is not None: traj = partial_trajs[idx] traj_dists = traj["distributions"] trajs.append( dict( observations=np.asarray(traj["observations"]), actions=np.asarray(traj["actions"]), rewards=np.asarray(traj["rewards"]), distributions={ k: np.asarray([d[k] for d in traj_dists]) for k in traj_dists[0].keys() }, last_observation=obs[idx], finished=False, )) return trajs
# handle scheduler errors def on_job_missed(event): scheduler_error(apscheduler.events.EVENT_JOB_MISSED, event) def on_job_error(event): scheduler_error(apscheduler.events.EVENT_JOB_ERROR, event) # configure logging logger_name = "scheduler" scheduler_logger = logging.getLogger('apscheduler.executors.default') scheduler_logger.setLevel( logger.get_level(conf["logging"][logger_name]["level"])) scheduler_logger.addHandler(logger.get_console_logger(logger_name)) scheduler_logger.addHandler(logger.get_file_logger(logger_name)) scheduler_logger = logging.getLogger('apscheduler.scheduler') scheduler_logger.setLevel( logger.get_level(conf["logging"][logger_name]["level"])) scheduler_logger.addHandler(logger.get_console_logger(logger_name)) scheduler_logger.addHandler(logger.get_file_logger(logger_name)) # handle errors and exceptions scheduler.add_listener(on_job_missed, apscheduler.events.EVENT_JOB_MISSED) scheduler.add_listener(on_job_error, apscheduler.events.EVENT_JOB_ERROR) # return the scheduler object
def parallel_collect_samples(env_pool, policy, num_samples): """ Collect trajectories in parallel using a pool of workers. Actions are computed using the provided policy. Collection will continue until at least num_samples trajectories are collected. It will exceed this amount by at most env_pool.n_envs. This means that some of the trajectories will not be executed until termination. These partial trajectories will have their "finished" entry set to False. When starting, it will first check if env_pool.last_obs is set, and if so, it will start from there rather than resetting all environments. This is useful for reusing the same episode. :param env_pool: An instance of EnvPool. :param policy: The policy used to select actions. :param num_samples: The minimum number of samples to collect. :return: """ trajs = [] partial_trajs = [None] * env_pool.n_envs num_collected = 0 if env_pool.last_obs is not None: obs = env_pool.last_obs else: obs = env_pool.reset() if logger.get_level() <= logger.INFO: progbar = tqdm(total=num_samples) else: progbar = None while num_collected < num_samples: actions, dists = policy.get_actions(obs) next_obs, rews, dones, infos = env_pool.step(actions) for idx in range(env_pool.n_envs): if partial_trajs[idx] is None: partial_trajs[idx] = dict( observations=[], actions=[], rewards=[], distributions=[], ) traj = partial_trajs[idx] traj["observations"].append(obs[idx]) traj["actions"].append(actions[idx]) traj["rewards"].append(rews[idx]) traj_dists = traj["distributions"] traj_dists.append({k: v[idx] for k, v in dists.items()}) if dones[idx]: trajs.append( dict( observations=np.asarray(traj["observations"]), actions=np.asarray(traj["actions"]), rewards=np.asarray(traj["rewards"]), distributions={ k: np.asarray([d[k] for d in traj_dists]) for k in traj_dists[0].keys() }, last_observation=infos[idx]["last_observation"], finished=True, ) ) partial_trajs[idx] = None obs = next_obs num_collected += env_pool.n_envs if progbar is not None: progbar.update(env_pool.n_envs) if progbar is not None: progbar.close() for idx in range(env_pool.n_envs): if partial_trajs[idx] is not None: traj = partial_trajs[idx] traj_dists = traj["distributions"] trajs.append( dict( observations=np.asarray(traj["observations"]), actions=np.asarray(traj["actions"]), rewards=np.asarray(traj["rewards"]), distributions={ k: np.asarray([d[k] for d in traj_dists]) for k in traj_dists[0].keys() }, last_observation=obs[idx], finished=False, ) ) return trajs
def args_parse(): p = OptionParser() p.add_option('-e', '--expr', dest='expr', action='append', type='string', help='specify experssion') p.add_option('-v', '--verbose', dest='verbose', default='2', help='specify verbose level') p.add_option('-o', '--out_dir', dest='out_dir', default='logs', type='string', help='specify output directory') p.add_option('-l', '--logfile', dest='logfile', default='dvpy_master.log', type='string', help='specify log filename') p.add_option('-m', '--max_agents', dest='max_agents', default=1, type='int', help='specify max agent number') p.add_option('-s', '--suite', dest='suite', action='callback', type='string', callback=add_suite, help='specify suite') p.add_option('-t', '--test', action='callback', dest='test', type='string', callback=add_test, help='specify test') p.add_option('-w', '--where', action='store', dest='where', type='string', help='specify test selector') p.add_option('-a', '--action', action='append', dest='action', type='string', help='specify action') p.add_option('-f', '--patchfile', action='append', dest='patchfile', type='string', help='specify patch file') p.add_option('-g', '--gcf', dest='gcf', type='string', default='local', help='specify gcf') (opts, args) = p.parse_args() logger.setLevel(get_level(opts.verbose)) master_log = path.abspath(path.join(opts.out_dir, opts.logfile)) try: makedirs(path.dirname(master_log)) except Exception as e: pass if path.exists(master_log): rename(master_log, (master_log + '.bak')) fh = logging.FileHandler(master_log) fh.setFormatter(logging.Formatter(FORMAT)) fh.setLevel(get_level(opts.verbose)) logger.addHandler(fh) return (opts, args)
################################################################################ if __name__ == "__main__": print('go') #----------------- # get arguments : log.info('get args') args = get_integrous_arguments_values() make_sure_path_exists(args.out_path) #----------------- # set verbosity : level = logger.get_level(args.verbosity) log.setLevel(level) #----------------- # run minimi + velocities generation if needed : if args.minimisation: log.info('run minimi') MD_minimi = MolecularDynamics( mdp_filename=args.minimisation_mdp_filename, gro_filename=args.gro_filename, top_filename=args.top_filename, out_path=args.out_path, out_name=args.minimisation_outname, maxwarn=args.maxwarn) MD_minimi.run()