def get_navbar(request): if get_geo(request)[0]: items = cache.get('navbar_items__intranet') if not items: items = NavbarItem.objects.filter(intranet=True).order_by('order') cache.set('navbar_items__intranet', items, 600) else: items = cache.get('navbar_items__internet') if not items: items = NavbarItem.objects.filter(internet=True).order_by('order') cache.set('navbar_items__internet', items, 600) navbar_items = [] for item in items: navbar_items.append({ 'key': item.key, 'title': item.title, 'url': reverse(item.key + ':index') }) result = get_environment(request) result.update({'navbar_items': navbar_items}) result.update({'active_item': resolve(request.path).namespace}) return result
def main(cfg): print(cfg.pretty(), end="\n\n\n") environment = get_environment(**cfg["environment"]) agent = Agent.from_conf(environment, **cfg["agent"]) replay_buffer = ReplayBufferBase.from_conf(**cfg["replay_buffer"]) algorithm = Algorithm.from_conf(environment, agent, replay_buffer, **cfg["algorithm"]) algorithm()
def main(args): try: scheduler = get_environment().get_scheduler() except AttributeError: scheduler = None project = get_project() with Pool() as pool: project.print_status(scheduler, pool=pool, **vars(args))
def main(args): env = environment.get_environment(test=args.test) if args.ppn is None: try: args.ppn = env.cores_per_node except AttributeError: raise ValueError( "Did not find a default value for the processors-per-node (ppn)." "Please provide `--ppn` argument") project = get_project() project.submit(env, **vars(args))
def main(argv=sys.argv): # Parse options parser = OptionParser(os.path.basename(argv[0])) opts = parser.parse_args(argv[1:]) # Load configuration env = get_environment() env.config.parser_load(parser, opts.config, opts.extra_config) sub_addr = env.config['agent-exchange']['subscribe-address'] append_pid = env.config['agent-exchange']['append-pid'] if append_pid and sub_addr.startswith('ipc://'): sub_addr += '.{}'.format(os.getpid()) env.config['agent-exchange']['subscribe-address'] = sub_addr pub_addr = env.config['agent-exchange']['publish-address'] if append_pid and pub_addr.startswith('ipc://'): pub_addr += '.{}'.format(os.getpid()) env.config['agent-exchange']['publish-address'] = pub_addr env.resmon = load_entry_point( 'volttronlite', 'volttron.switchboard.resmon', 'lite')(env) env.aip = load_entry_point( 'volttronlite', 'volttron.switchboard.aip', 'lite')(env) # Configure logging level = max(0, logging.WARNING + opts.verboseness * 10) if opts.log is None: log_to_file(sys.stderr, level) elif opts.log == '-': log_to_file(sys.stdout, level) elif opts.log: log_to_file(opts.log, level, handler_class=handlers.WatchedFileHandler) else: log_to_file(None, 100, handler_class=lambda x: logging.NullHandler()) if opts.log_config: logging.config.fileConfig(opts.log_config) env.aip.setup() if not opts.skip_autostart: for name, error in env.aip.autostart(): _log.error('error starting {!r}: {}\n'.format(name, error)) # Main loops try: exchange = gevent.spawn(agent_exchange, pub_addr, sub_addr) try: control = gevent.spawn(control_loop, env.config) exchange.link(lambda *a: control.kill()) control.join() finally: exchange.kill() finally: env.aip.finish()
def main(cfg): print(cfg.pretty(), end="\n\n\n") config_path = cfg.experiment_path + "/.hydra/config.yaml" exp_cfg = omegaconf.OmegaConf.load(config_path) exp_cfg.environment.monitor = "always" environment = get_environment(**exp_cfg["environment"]) agent = Agent.from_conf(environment, **exp_cfg["agent"]) replay_buffer = ReplayBuffer.from_conf(**exp_cfg["replay_buffer"]) algorithm = Algorithm.from_conf(environment, agent, replay_buffer, **exp_cfg["algorithm"]) algorithm.restore_model(cfg.experiment_path + "/checkpoints") for i in range(cfg["n_episodes"]): print("{: 3d}/{: 3d}".format(i + 1, cfg["n_episodes"]), end='\r') algorithm.evaluate()
def get_navbar(request): if get_geo(request)[0]: items = cache.get('navbar_items__intranet') if not items: items = NavbarItem.objects.filter(intranet=True).order_by('order') cache.set('navbar_items__intranet', items, 600) else: items = cache.get('navbar_items__internet') if not items: items = NavbarItem.objects.filter(internet=True).order_by('order') cache.set('navbar_items__internet', items, 600) navbar_items = [] for item in items: navbar_items.append({'key': item.key, 'title': item.title, 'url': reverse(item.key + ':index')}) result = get_environment(request) result.update({'navbar_items': navbar_items}) result.update({'active_item': resolve(request.path).namespace}) return result
def handler(agent_name): get_environment().aip.stop_agent(agent_name)
def handler(agent_name): get_environment().aip.disable_agent(agent_name)
def handler(): return get_environment().aip.list_agents()
def handler(agent_name): get_environment().aip.unload_agent(agent_name)
def handler(agent_config, name=None, force=False): get_environment().aip.load_agent(agent_config, name, force)
def handler(exe_path, name=None, force=False): get_environment().aip.install_executable(exe_path, name, force)
r = requests.get( 'http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html' ) content = r.text soup = bs4.BeautifulSoup(content, 'html.parser') td_ys = soup.find_all(attrs={"class": "y"}) td_xs = soup.find_all(attrs={"class": "x"}) print(td_ys) print(td_xs) num_of_agents = 10 #make our agent coordinations change an arbitrary number of times num_of_iterations = 100 neighbourhood = 20 agents = [] environment = environment.get_environment() #size of the pop up window fig = matplotlib.pyplot.figure(figsize=(7, 7)) ax = fig.add_axes([0, 0, 200, 200]) #Let's create a list with num_of_agents agent coordinates. for i in range(num_of_agents): y = int(td_ys[i].text) x = int(td_xs[i].text) agents.append( agentframework.Agent(environment, agents, neighbourhood, y, x)) for j in range(num_of_iterations): for i in range(num_of_agents): agents[i].move()
def __init__(self, *args, **kwargs): super(MyProject, self).__init__(*args, **kwargs) env = environment.get_environment() def add_grompp_op(op_name, name, gro, sys, **kwargs): self.add_operation( name=op_name, cmd=('-n 1 --w-cd {{job.ws}} -- {}' ''.format(_grompp_str(self.root_directory(), name, gro, sys))), **kwargs) def add_mdrun_op(op_name, name, **kwargs): self.add_operation( name=op_name, cmd=('-n 16 -N 16 --w-cd {{job.ws}} -- {} -gpu_id ' '{}'.format(_mdrun_str(name), '0'*16)), **kwargs) def add_grompp_extend_op(op_name, name, **kwargs): self.add_operation( name=op_name, cmd=('-n 1 --w-cd {{job.ws}} -- {}' ''.format(_grompp_extend_str(name))), **kwargs) def add_mdrun_extend_op(op_name, name, **kwargs): self.add_operation( name=op_name, cmd=('-n 16 -N 16 --w-cd {{job.ws}} -- {} -gpu_id ' '{}'.format(_mdrun_extend_str(name), '0'*16)), **kwargs) def add_grompp_rerun_op(op_name, name, gro, sys, **kwargs): self.add_operation( name=op_name, cmd=('-n 1 --w-cd {{job.ws}} -- {}' ''.format(_grompp_rerun_str(self.root_directory(), name, gro, sys))), **kwargs) def add_mdrun_rerun_op(op_name, name, **kwargs): self.add_operation( name=op_name, cmd=('-n 1 --w-cd {{job.ws}} -- {}' ''.format(_mdrun_rerun_str(name))), **kwargs) self.add_operation( name='initialize', cmd='python src/operations.py initialize {job._id}', post=[self.initialized]) self.add_operation( name='fix_overlaps', cmd=('-n 16 --w-cd {{job.ws}} -- lmp_titan -in ' '{0}/src/util/mdp_files/in.minimize -log minimize.log' ''.format(self.root_directory())), np=16, pre=[self.initialized], post=[self.fixed_overlaps]) self.add_operation( name='lammps_to_gmx', cmd=('echo 0 | aprun gmx_mpi trjconv -s {job.ws}/init.gro -f ' '{job.ws}/minimize.xtc -o {job.ws}/minimized.gro -b 1.0 -e 1.0'), pre=[self.fixed_overlaps], post=[self.lmp_to_gmx]) add_grompp_op( op_name='minimize_grompp', name='em', gro='minimized', sys='init', pre=[self.lmp_to_gmx], post=[self.ready_to_minimize]) add_mdrun_op( op_name='minimize', name='em', pre=[self.ready_to_minimize], post=[self.minimized]) add_grompp_op( op_name='equilibrate_grompp', name='nvt', gro='em', sys='init', pre=[self.minimized], post=[self.ready_to_equilibrate]) add_mdrun_op( op_name='equilibrate', name='nvt', pre=[self.ready_to_equilibrate], post=[self.equilibrated]) add_grompp_op( op_name='compress_grompp', name='compress', gro='nvt', sys='init', pre=[self.equilibrated], post=[self.ready_to_compress]) add_mdrun_op( op_name='compress', name='compress', pre=[self.ready_to_compress], post=[self.compressed]) add_grompp_op( op_name='shear_5nN_grompp', name='shear_5nN', gro='compress', sys='init', pre=[self.compressed], post=[self.ready_to_shear_at_5nN]) add_mdrun_op( op_name='shear_5nN', name='shear_5nN', pre=[self.ready_to_shear_at_5nN], post=[self.sheared_at_5nN]) add_grompp_extend_op( op_name='shear_5nN_extend_grompp', name='shear_5nN', pre=[self.sheared_at_25nN], post=[self.ready_to_extend_shear_at_5nN]) add_mdrun_extend_op( op_name='shear_5nN_extend', name='shear_5nN', pre=[self.ready_to_extend_shear_at_5nN], post=[self.extended_shear_at_5nN]) add_grompp_rerun_op( op_name='shear_5nN_rerun_grompp', name='shear_5nN', gro='compress', sys='init', pre=[self.extended_shear_at_5nN], post=[self.ready_to_rerun_shear_at_5nN]) add_mdrun_rerun_op( op_name='shear_5nN_rerun', name='shear_5nN', pre=[self.ready_to_rerun_shear_at_5nN], post=[self.shear_reran_at_5nN]) add_grompp_op( op_name='shear_15nN_grompp', name='shear_15nN', gro='compress', sys='init', pre=[self.sheared_at_5nN], post=[self.ready_to_shear_at_15nN]) add_mdrun_op( op_name='shear_15nN', name='shear_15nN', pre=[self.ready_to_shear_at_15nN], post=[self.sheared_at_15nN]) add_grompp_extend_op( op_name='shear_15nN_extend_grompp', name='shear_15nN', pre=[self.shear_reran_at_5nN], post=[self.ready_to_extend_shear_at_15nN]) add_mdrun_extend_op( op_name='shear_15nN_extend', name='shear_15nN', pre=[self.ready_to_extend_shear_at_15nN], post=[self.extended_shear_at_15nN]) add_grompp_rerun_op( op_name='shear_15nN_rerun_grompp', name='shear_15nN', gro='compress', sys='init', pre=[self.extended_shear_at_15nN], post=[self.ready_to_rerun_shear_at_15nN]) add_mdrun_rerun_op( op_name='shear_15nN_rerun', name='shear_15nN', pre=[self.ready_to_rerun_shear_at_15nN], post=[self.shear_reran_at_15nN]) add_grompp_op( op_name='shear_25nN_grompp', name='shear_25nN', gro='compress', sys='init', pre=[self.shear_reran_at_15nN], post=[self.ready_to_shear_at_25nN]) add_mdrun_op( op_name='shear_25nN', name='shear_25nN', pre=[self.ready_to_shear_at_25nN], post=[self.sheared_at_25nN]) add_grompp_extend_op( op_name='shear_25nN_extend_grompp', name='shear_25nN', pre=[self.extended_shear_at_15nN], post=[self.ready_to_extend_shear_at_25nN]) add_mdrun_extend_op( op_name='shear_25nN_extend', name='shear_25nN', pre=[self.ready_to_extend_shear_at_25nN], post=[self.extended_shear_at_25nN]) add_grompp_rerun_op( op_name='shear_25nN_rerun_grompp', name='shear_25nN', gro='compress', sys='init', pre=[self.shear_reran_at_15nN], post=[self.ready_to_rerun_shear_at_25nN]) add_mdrun_rerun_op( op_name='shear_25nN_rerun', name='shear_25nN', pre=[self.ready_to_rerun_shear_at_25nN], post=[self.shear_reran_at_25nN])
def authorize(request): result = get_environment(request) result.update(csrf(request)) # 验证是否为登录表单 # TODO 当系统有初始化、登录以外的入口时,此处需要改进(SunFulong@2014-1-7) form = AuthenticationForm(request.POST) # 非登录表单,返回登录画面 if not form.is_valid(): # 验证应用端身份 form, client = verify_client(request.REQUEST) if issubclass(form.__class__, HttpResponse): return form result.update({'name': client.name}) request.session.set_expiry(0) request.session.update(form.cleaned_data) return render_to_response('oauth/authorize.html', result) # 是登录表单,进行相关验证 else: action = form.cleaned_data['action'] username = form.cleaned_data['username'] domain = form.cleaned_data['domain'] password = form.cleaned_data['password'] # 验证应用端身份 form, client = verify_client(request.REQUEST) if issubclass(form.__class__, HttpResponse): return form result.update({'name': client.name}) response_type = form.cleaned_data['response_type'] client_id = form.cleaned_data['client_id'] redirect_uri = request.session['redirect_uri'] scope = request.session['scope'] state = request.session['state'] # 处理code请求 if response_type == 'code': # 处理登录以外的请求 if not action.lower() == 'login'.lower(): return callback_client(redirect_uri + '?error=access_denied', state) # 验证表单合法性 if not username or not domain or not password: result.update({'error': '请输入邮箱地址及密码'}) return render_to_response('oauth/authorize.html', result) # 验证用户合法性 user = authenticate(email=username + '@' + domain, password=password) if not user: result.update({'error': '邮箱地址或密码错误,请重新输入'}) return render_to_response('oauth/authorize.html', result) # 生成code code = AuthorizationCode(client=client, user=user, redirect_uri=redirect_uri, expire_time=datetime.datetime.now() + datetime.timedelta(minutes=10)) code.save() return callback_client(redirect_uri + '?code=' + urlsafe_base64_encode(code.code.bytes), state) else: return callback_client(redirect_uri + '?error=unsupported_response_type', state), None
def authorize(request): result = get_environment(request) result.update(csrf(request)) # 验证是否为登录表单 # TODO 当系统有初始化、登录以外的入口时,此处需要改进(SunFulong@2014-1-7) form = AuthenticationForm(request.POST) # 非登录表单,返回登录画面 if not form.is_valid(): # 验证应用端身份 form, client = verify_client(request.REQUEST) if issubclass(form.__class__, HttpResponse): return form result.update({'name': client.name}) request.session.set_expiry(0) request.session.update(form.cleaned_data) return render_to_response('oauth/authorize.html', result) # 是登录表单,进行相关验证 else: action = form.cleaned_data['action'] username = form.cleaned_data['username'] domain = form.cleaned_data['domain'] password = form.cleaned_data['password'] # 验证应用端身份 form, client = verify_client(request.REQUEST) if issubclass(form.__class__, HttpResponse): return form result.update({'name': client.name}) response_type = form.cleaned_data['response_type'] client_id = form.cleaned_data['client_id'] redirect_uri = request.session['redirect_uri'] scope = request.session['scope'] state = request.session['state'] # 处理code请求 if response_type == 'code': # 处理登录以外的请求 if not action.lower() == 'login'.lower(): return callback_client(redirect_uri + '?error=access_denied', state) # 验证表单合法性 if not username or not domain or not password: result.update({'error': '请输入邮箱地址及密码'}) return render_to_response('oauth/authorize.html', result) # 验证用户合法性 user = authenticate(email=username + '@' + domain, password=password) if not user: result.update({'error': '邮箱地址或密码错误,请重新输入'}) return render_to_response('oauth/authorize.html', result) # 生成code code = AuthorizationCode(client=client, user=user, redirect_uri=redirect_uri, expire_time=datetime.datetime.now() + datetime.timedelta(minutes=10)) code.save() return callback_client( redirect_uri + '?code=' + urlsafe_base64_encode(code.code.bytes), state) else: return callback_client( redirect_uri + '?error=unsupported_response_type', state), None
def handler(config_path): get_environment().aip.launch_agent(config_path)
def handler(): get_environment().aip.shutdown()
def handler(): return get_environment().aip.list_executables()
def handler(exe_name, force=False): get_environment().aip.remove_executable(exe_name, force)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-f', help='input file containing environment specs', nargs=1) parser.add_argument( '-r', help='generates random environment with \ specified number of hosts [0] and VMs [1]', nargs=2) args = parser.parse_args() if args.r: env = env_mod.create_random_environment(int(args.r[0]), int(args.r[1])) description = ("%s virtual machine and %s physical machines" % (args.r[1], args.r[0])) elif args.f: env = env_mod.parse_environment(args.f[0]) description = "input file: %s" % args.f[0] else: env = env_mod.get_environment() description = "OpenStack" ts = dbops.get_ts_local() cons_id = dbops.save_cons_rec(ts, True, description) actions = [] print "----------------Environment---------------------------------" env_mod.print_env(env) print("Environment Energy Consumption: %s W" % str(env_mod.compute_env_consumption(env))) print len(env_mod.get_unused_hypervisors(env)) print len(env_mod.get_used_hypervisors(env)) print host_mod.compute_pm_util_avg_rel(env) dbops.save_environment(cons_id, ts, env, "init", actions) actions = cc_mod.split(env) env_mod.print_env_available_res(env) env_mod.print_env_available_res_rel(env) print "----------------Evironment after split----------------------" env_mod.print_env(env) print "No migrations: " + str(len(actions)) # mig_mod.print_actions(actions) # env_mod.print_env(env) print("Environment Energy Consumption: %s W" % str(env_mod.compute_env_consumption(env))) print host_mod.compute_pm_util_avg_rel(env) print len(env_mod.get_unused_hypervisors(env)) print len(env_mod.get_used_hypervisors(env)) dbops.save_environment(cons_id, ts, env, "split", actions) actions = cc_mod.consolidate(env) env_mod.print_env_available_res(env) env_mod.print_env_available_res_rel(env) print "----------------Environment after consolidation-------------" env_mod.print_env(env) print "No migrations: " + str(len(actions)) # mig_mod.print_actions(actions) # env_mod.print_env(env) print("Environment Energy Consumption: %s W" % str(env_mod.compute_env_consumption(env))) print host_mod.compute_pm_util_avg_rel(env) print len(env_mod.get_unused_hypervisors(env)) print len(env_mod.get_used_hypervisors(env)) dbops.save_environment(cons_id, ts, env, "cons", actions)