def get_ssh_keys(): private_key = get_config("private_key") public_key = get_config("public_key") if not (private_key and public_key): key = RSA.generate(2048) private_key = key.exportKey("PEM") public_key = key.publickey().exportKey("OpenSSH") with app.app_context(): private = Config("private_key", private_key) public = Config("public_key", public_key) db.session.add(private) db.session.add(public) db.session.commit() db.session.close() return private_key, public_key
def get(self): user = self.get_current_user() if user: logger.error(str(user)) if user['uid'] > MAX_ADMIN_UID: raise tornado.web.HTTPError(404, 'Page Not Found!') users_total = yield self.get_users_total() users = User.select() try: config = Config.select().where(Config.id == 1).get() except Config.DoesNotExist: config = Config() msg = self.get_argument('msg', '') page = int(self.get_argument('page', '1')) page_size = 100 offset = page_size * (page - 1) kwargs = { 'msg': msg, 'users': users, 'users_total': users_total, 'config': config, } self.render("admin/system.html", **kwargs)
def main(): data_folder = os.path.join('.', 'dataset', 'data') # set tasks source_dir = os.path.join(data_folder, task) # create config config = Config(task) # load datasets trainset = load_json(os.path.join(source_dir, 'train.json')) devset = load_json(os.path.join(source_dir, 'dev.json')) testset = load_json(os.path.join(source_dir, 'test.json')) # build model model = DenseConnectBiLSTM(config, resume_training=resume_training) # training batch_size = 200 epochs = 30 if has_devset: model.train(trainset, devset, testset, batch_size=batch_size, epochs=epochs, shuffle=True) else: trainset = trainset + devset model.train(trainset, None, testset, batch_size=batch_size, epochs=epochs, shuffle=True)
def check_config(): ''' 第一次初始化的时候,为配置添加一个初始变量 :return: ''' conf = session.query(Config).first() now_time = int(time.time()) spider_time = int( time.mktime( time.strptime(time.strftime("%Y-%m-%d", time.localtime(now_time)), "%Y-%m-%d")) + eval(config.LOOP_TIME)) if not conf: conf = Config(create_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(spider_time)), ) session.add(conf) session.commit() return False else: spider_time = int( time.mktime( time.strptime( time.strftime("%Y-%m-%d", time.localtime(now_time)), "%Y-%m-%d")) + eval(conf.loop_time)) conf.create_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(spider_time)) session.commit() return True
def seed_config(db): print("++ Seeding config") a = Config(app_name="My reel2bits instance") a.app_description = """This is a reel2bits instance""" db.session.add(a) db.session.commit() db.session.commit()
def config(self, api_port): logger.info('Configuring alcazard with state at {}', self.state_path) api_port = api_port or DEFAULT_PORT os.makedirs(self.state_path, exist_ok=True) DB.init(self.db_path) with DB: self._init_db() config = Config.select().first() if not config: from transmission.params import DEFAULT_TRANSMISSION_SETTINGS_TEMPLATE config = Config( is_fully_configured=True, transmission_settings= DEFAULT_TRANSMISSION_SETTINGS_TEMPLATE, is_dht_enabled=False, local_port_pools_fmt='9091-9291', peer_port_pools_fmt='21413-21613', ) config.api_port = api_port config.save() logger.info('Saved configuration - done.')
def r_before_request(): try: g.ts = ji.Common.ts() if not is_not_need_to_auth(request.endpoint) and request.blueprint is not None and request.method != 'OPTIONS': g.config = Config() g.config.id = 1 g.config.get() token = session.get('token', '') g.token = Utils.verify_token(token) user = User() user.id = g.token['uid'] try: user.get() except ji.PreviewingError, e: # 如果该用户获取失败,则清除该用户对应的session。因为该用户可能已经被删除。 for key in session.keys(): session.pop(key=key) return json.loads(e.message) except ji.JITError, e: ret = json.loads(e.message) if ret['state']['code'] == '404': return redirect(location=url_for('v_config.create'), Response=Response) if ret['state']['sub']['code'] in ['41208']: return redirect(location=url_for('v_misc.login'), Response=Response) return ret
def newclusterconfig(): print request tokenUrl = "https://discovery.etcd.io/new" if request.method == 'POST': # save a new config nc = Config() nc.cluster_name = request.form['cluster_name'] try: # create the new token r = requests.get(tokenUrl) cluster_etcd_locator_url = r.text nc.cluster_etcd_locator_url = cluster_etcd_locator_url db.session.add(nc) db.session.commit() except Exception: pass return json.dumps({ 'status': 'OK', 'cluster': { 'id': nc.id, 'cluster_name': nc.cluster_name, 'cluster_etcd_locator_url': nc.cluster_etcd_locator_url } }) else: return json.dumps({'status': 'Fail'})
def r_migrate(uuids, destination_host): args_rules = [ Rules.UUIDS.value, Rules.DESTINATION_HOST.value ] try: ji.Check.previewing(args_rules, {'uuids': uuids, 'destination_host': destination_host}) ret = dict() ret['state'] = ji.Common.exchange_state(20000) # 取全部活着的 hosts available_hosts = Host.get_available_hosts(nonrandom=None) if available_hosts.__len__() == 0: ret['state'] = ji.Common.exchange_state(50351) return ret available_hosts_mapping_by_node_id = dict() for host in available_hosts: if host['node_id'] not in available_hosts_mapping_by_node_id: available_hosts_mapping_by_node_id[host['node_id']] = host guest = Guest() for uuid in uuids.split(','): guest.uuid = uuid guest.get_by('uuid') config = Config() config.id = 1 config.get() for uuid in uuids.split(','): guest.uuid = uuid guest.get_by('uuid') # 忽略宕机计算节点 上面的 虚拟机 迁移请求 # 忽略目标计算节点 等于 当前所在 计算节点 的虚拟机 迁移请求 if guest.node_id not in available_hosts_mapping_by_node_id or \ available_hosts_mapping_by_node_id[guest.node_id]['hostname'] == destination_host: continue message = { '_object': 'guest', 'action': 'migrate', 'uuid': uuid, 'node_id': guest.node_id, 'storage_mode': config.storage_mode, 'duri': 'qemu+ssh://' + destination_host + '/system' } Utils.emit_instruction(message=json.dumps(message)) return ret except ji.PreviewingError, e: return json.loads(e.message)
def set_config(key, value): config = Config.query.filter_by(key=key).first() if config: config.value = value else: config = Config(key, value) db.session.add(config) db.session.commit() return config
async def post(self, request, _): req = request.json try: config = await Config.get_by_id(1) await config.update_from_dict(req) except IndexError: config = Config(**req) await config.create() return json({'success': True, 'msg': 'Config updated'}, sort_keys=True)
def r_delete(ids): ret = dict() ret['state'] = ji.Common.exchange_state(20000) config = Config() config.id = 1 config.get() # 取全部活着的 hosts available_hosts = Host.get_available_hosts(nonrandom=None) if available_hosts.__len__() == 0: ret['state'] = ji.Common.exchange_state(50351) return ret chosen_host = available_hosts[0] node_id = chosen_host['node_id'] os_template_image = OSTemplateImage() # TODO: 加入对,是否有被 Guest 引用的判断 for _id in ids.split(','): os_template_image.id = _id os_template_image.get() for _id in ids.split(','): os_template_image.id = _id os_template_image.get() # 暂时不支持从计算节点上,删除公共镜像 if os_template_image.kind == OSTemplateImageKind.public.value: os_template_image.delete() continue elif os_template_image.kind == OSTemplateImageKind.custom.value: os_template_image.progress = 254 message = { '_object': 'os_template_image', 'action': 'delete', 'storage_mode': config.storage_mode, 'dfs_volume': config.dfs_volume, 'template_path': os_template_image.path, # uuid 这里没有实际意义,仅仅是为了迁就 JimV-C 的个命令格式 'uuid': None, 'node_id': node_id, 'os_template_image_id': os_template_image.id, 'passback_parameters': { 'id': os_template_image.id } } Utils.emit_instruction(message=json.dumps(message)) os_template_image.update() return ret
def setup(): if not utils.is_setup(): if request.method == "POST": username = request.form['name'] password = bcrypt.hashpw(request.form.get('password').encode('utf-8'), bcrypt.gensalt()) admin = User(username=username,password=password,role="all") syslog_ip = Config(key="syslog_ip", value=None) syslog_port = Config(key="syslog_port", value=None) db.session.add(admin) db.session.add(syslog_ip) db.session.add(syslog_port) db.session.commit() installed = utils.set_config('installed', True) return redirect('/') else: return render_template('register.html') else: return redirect('/')
def r_detach_disk(disk_uuid): args_rules = [Rules.DISK_UUID.value] try: ji.Check.previewing(args_rules, {'disk_uuid': disk_uuid}) disk = Disk() disk.uuid = disk_uuid disk.get_by('uuid') ret = dict() ret['state'] = ji.Common.exchange_state(20000) if disk.state != DiskState.mounted.value or disk.sequence == 0: # 表示未被任何实例使用,已被分离 # 序列为 0 的表示实例系统盘,系统盘不可以被分离 # TODO: 系统盘单独范围其它状态 return ret guest = Guest() guest.uuid = disk.guest_uuid guest.get_by('uuid') # 判断 Guest 是否处于可用状态 if guest.status in (status.GuestState.no_state.value, status.GuestState.dirty.value): ret['state'] = ji.Common.exchange_state(41259) return ret config = Config() config.id = 1 config.get() guest_xml = GuestXML(guest=guest, disk=disk, config=config) message = { '_object': 'guest', 'action': 'detach_disk', 'uuid': disk.guest_uuid, 'node_id': guest.node_id, 'xml': guest_xml.get_disk(), 'passback_parameters': { 'disk_uuid': disk.uuid } } Utils.emit_instruction(message=json.dumps(message)) disk.state = DiskState.unloading.value disk.update() return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_resize(uuid, size): args_rules = [ Rules.UUID.value, Rules.DISK_SIZE_STR.value ] try: ji.Check.previewing(args_rules, {'uuid': uuid, 'size': size}) disk = Disk() disk.uuid = uuid disk.get_by('uuid') ret = dict() ret['state'] = ji.Common.exchange_state(20000) if disk.size >= int(size): ret['state'] = ji.Common.exchange_state(41257) return ret config = Config() config.id = 1 config.get() disk.size = int(size) disk.quota(config=config) # 将在事件返回层(models/event_processor.py:224 附近),更新数据库中 disk 对象 message = { '_object': 'disk', 'action': 'resize', 'uuid': disk.uuid, 'guest_uuid': disk.guest_uuid, 'storage_mode': config.storage_mode, 'size': disk.size, 'dfs_volume': config.dfs_volume, 'node_id': disk.node_id, 'image_path': disk.path, 'disks': [disk.__dict__], 'passback_parameters': {'size': disk.size} } if config.storage_mode in [StorageMode.shared_mount.value, StorageMode.ceph.value, StorageMode.glusterfs.value]: message['node_id'] = Host.get_lightest_host()['node_id'] if disk.guest_uuid.__len__() == 36: message['device_node'] = dev_table[disk.sequence] Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False)) return ret except ji.PreviewingError, e: return json.loads(e.message)
def __init__(self, notifiers: bool = True): self.config = Config(config_file) if path.isfile( config_file) else Config() if self.config.debug: # pylint: disable=E1103 loggers = [ logging.getLogger(name) for name in logging.root.manager.loggerDict ] # pylint: enable=E1103 for logger in loggers: logger.setLevel(logging.DEBUG) log.info("Debugging mode enabled") self.metrics = Metrics() self.item_ids = self.config.item_ids self.amounts = {} try: self.tgtg_client = TgtgClient( email=self.config.tgtg["username"], timeout=self.config.tgtg["timeout"], access_token_lifetime=self.config. tgtg["access_token_lifetime"], max_polling_tries=self.config.tgtg["max_polling_tries"], polling_wait_time=self.config.tgtg["polling_wait_time"], access_token=self.config.tgtg["access_token"], refresh_token=self.config.tgtg["refresh_token"], user_id=self.config.tgtg["user_id"]) self.tgtg_client.login() self.config.save_tokens(self.tgtg_client.access_token, self.tgtg_client.refresh_token, self.tgtg_client.user_id) except TgtgAPIError as err: raise err except Error as err: log.error(err) raise TGTGConfigurationError() from err if notifiers: if self.config.metrics: self.metrics.enable_metrics() self.notifiers = Notifiers(self.config) if not self.config.disable_tests: log.info("Sending test Notifications ...") self.notifiers.send(self._test_item)
def init_db(): # Create the fixtures point = Point(name='John Smith', latitude='0', longitud='0.1', phone='+79000000000') point.save() config = Config(version=1, allowedRadius=1000, isDayPeriodAllowed=True) config.save()
def setup(): if not is_setup(): if request.method == 'POST' and len(request.form) == 5: errors = [] if len(request.form['username']) > 0 and User.query.filter_by( name=request.form['username']).first(): errors.append('This username is taken') else: name = request.form['username'] if re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", request.form['email']): if User.query.filter_by(email=request.form['email']).first(): errors.append('This email has already been used') else: email = request.form['email'] else: errors.append('Invalid email') if len(request.form['team_name']) > 0: team = request.form['team_name'] else: errors.append('Team name cannot be empty') if len(request.form['password']) > 0: password = bcrypt.hash(request.form['password']) else: errors.append('Password cannot be blank') if len(errors) > 0: return jsonify(errors) else: user = User(name, email, password) user.admin = True db.session.add(user) db.session.add(Config('TEAM_NAME', team)) db.session.add(Config('SETUP', "True")) db.session.commit() return redirect(url_for('auth.login')) return render_template('setup.html') return redirect(url_for('core.home'))
def r_delete(uuids): args_rules = [ Rules.UUIDS.value ] try: ji.Check.previewing(args_rules, {'uuids': uuids}) ret = dict() ret['state'] = ji.Common.exchange_state(20000) disk = Disk() # 检测所指定的 UUDIs 磁盘都存在 for uuid in uuids.split(','): disk.uuid = uuid disk.get_by('uuid') # 判断磁盘是否与虚拟机处于离状态 if disk.state not in [DiskState.idle.value, DiskState.dirty.value]: ret['state'] = ji.Common.exchange_state(41256) return ret config = Config() config.id = 1 config.get() # 执行删除操作 for uuid in uuids.split(','): disk.uuid = uuid disk.get_by('uuid') message = { '_object': 'disk', 'action': 'delete', 'uuid': disk.uuid, 'storage_mode': config.storage_mode, 'dfs_volume': config.dfs_volume, 'node_id': disk.node_id, 'image_path': disk.path } if config.storage_mode in [StorageMode.shared_mount.value, StorageMode.ceph.value, StorageMode.glusterfs.value]: message['node_id'] = Host.get_lightest_host()['node_id'] Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False)) return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_delete(uuids): args_rules = [ Rules.UUIDS.value ] # TODO: 加入是否删除使用的数据磁盘开关,如果为True,则顺便删除使用的磁盘。否则解除该磁盘被使用的状态。 try: ji.Check.previewing(args_rules, {'uuids': uuids}) guest = Guest() # 检测所指定的 UUDIs 实例都存在 for uuid in uuids.split(','): guest.uuid = uuid guest.get_by('uuid') config = Config() config.id = 1 config.get() # 执行删除操作 for uuid in uuids.split(','): guest.uuid = uuid guest.get_by('uuid') message = { '_object': 'guest', 'action': 'delete', 'uuid': uuid, 'storage_mode': config.storage_mode, 'dfs_volume': config.dfs_volume, 'node_id': guest.node_id } Utils.emit_instruction(message=json.dumps(message)) # 删除创建失败的 Guest if guest.status == status.GuestState.dirty.value: disk = Disk() disk.uuid = guest.uuid disk.get_by('uuid') if disk.state == status.DiskState.pending.value: disk.delete() guest.delete() SSHKeyGuestMapping.delete_by_filter(filter_str=':'.join(['guest_uuid', 'eq', guest.uuid])) ret = dict() ret['state'] = ji.Common.exchange_state(20000) return ret except ji.PreviewingError, e: return json.loads(e.message)
def admin_setup_init(): if utils.is_setup_complete(): raise WebException("Setup has already been complete.") verification = Config("setup_verification", utils.generate_string().lower()) with app.app_context(): for item in Config.query.filter_by(key="setup_verification").all(): db.session.delete(item) db.session.add(verification) db.session.commit() db.session.close() return { "success": 1 }
def process(self, commit, *args, **kwargs): data = commit.data config = Config() config.id = data["id"] del data["id"] data["cuota"] = str(data["cuota"]) config.data = data # config.commits.append(commit) commit.save() config.save()
def seed_config(db): a = Config( lotw_download_url="https://p1k.arrl.org/lotwuser/lotwreport.adi", lotw_upload_url="https://p1k.arrl.org/lotwuser/upload", lotw_rcvd_mark="Y", lotw_login_url="https://p1k.arrl.org/lotwuser/default", eqsl_download_url="https://www.eqsl.cc/qslcard/DownloadInBox.cfm", eqsl_upload_url="https://www.eqsl.cc/qslcard/ImportADIF.cfm", eqsl_rcvd_mark="Y", ) db.session.add(a) db.session.commit() db.session.commit()
def r_delete(uuids): args_rules = [Rules.UUIDS.value] try: ji.Check.previewing(args_rules, {'uuids': uuids}) ret = dict() ret['state'] = ji.Common.exchange_state(20000) disk = Disk() # 检测所指定的 UUDIs 磁盘都存在 for uuid in uuids.split(','): disk.uuid = uuid disk.get_by('uuid') if disk.state != DiskState.idle.value: ret['state'] = ji.Common.exchange_state(41256) return ret config = Config() config.id = 1 config.get() # 执行删除操作 for uuid in uuids.split(','): disk.uuid = uuid disk.get_by('uuid') message = { '_object': 'disk', 'action': 'delete', 'uuid': disk.uuid, 'storage_mode': config.storage_mode, 'dfs_volume': config.dfs_volume, 'hostname': disk.on_host, 'image_path': disk.path } if disk.on_host == 'shared_storage': message['hostname'] = Guest.get_lightest_host()['hostname'] Guest.emit_instruction( message=json.dumps(message, ensure_ascii=False)) return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_get(): config = Config() try: config.id = 1 config.get() ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = config.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message)
def r_create(): args_rules = [ Rules.JIMV_EDITION.value, Rules.STORAGE_MODE.value, Rules.DFS_VOLUME.value, Rules.STORAGE_PATH.value, Rules.VM_NETWORK.value, Rules.VM_MANAGE_NETWORK.value, Rules.START_IP.value, Rules.END_IP.value, Rules.START_VNC_PORT.value, Rules.NETMASK.value, Rules.GATEWAY.value, Rules.DNS1.value, Rules.DNS2.value ] config = Config() config.id = 1 config.jimv_edition = int(request.json.get('jimv_edition', 0)) config.storage_mode = int(request.json.get('storage_mode', 0)) config.dfs_volume = request.json.get('dfs_volume', '') config.storage_path = request.json.get('storage_path') config.vm_network = request.json.get('vm_network') config.vm_manage_network = request.json.get('vm_manage_network') config.start_ip = request.json.get('start_ip') config.end_ip = request.json.get('end_ip') config.start_vnc_port = int(request.json.get('start_vnc_port')) config.netmask = request.json.get('netmask') config.gateway = request.json.get('gateway') config.dns1 = request.json.get('dns1') config.dns2 = request.json.get('dns2') try: ji.Check.previewing(args_rules, config.__dict__) ret = dict() ret['state'] = ji.Common.exchange_state(20000) if config.exist(): ret['state'] = ji.Common.exchange_state(40901) return ret config.check_ip() config.generate_available_ip2set() config.generate_available_vnc_port() config.create() config.update_global_config() config.id = 1 config.get() ret['data'] = config.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message)
class TestUrlStatsCalculator(unittest.TestCase): """ Class for testing calculator of url stats """ TEST_CONFIG = Config(report_size=50, report_dir="./reports", log_dir="./nginx_logs", log_file="./script_logs/test.log", failures_percent_threshold=50.0) LOG_FILE = LatestLogFile(path="./nginx_logs/test_sample.txt", date_of_creation=datetime.date(year=2019, month=11, day=5), extension=".txt") def test_url_stats_size(self): """ Tests if url stats size coincides with report size from config :return: """ parsed_line_gen = parse_log_file( log_file=TestUrlStatsCalculator.LOG_FILE, log_file_opener=open) url_stats = calculate_url_stats(parsed_line_gen=parsed_line_gen, cfg=TestUrlStatsCalculator.TEST_CONFIG) with self.subTest(): self.assertEqual(TestUrlStatsCalculator.TEST_CONFIG.report_size, len(url_stats)) urls = set() for single_url_stat in url_stats: urls.add(single_url_stat["url"]) with self.subTest(): self.assertEqual(TestUrlStatsCalculator.TEST_CONFIG.report_size, len(urls)) with self.subTest(): for single_url_stat in url_stats: self.assertIn("url", single_url_stat) self.assertIn("count", single_url_stat) self.assertIn("count_perc", single_url_stat) self.assertIn("time_sum", single_url_stat) self.assertIn("time_perc", single_url_stat) self.assertIn("time_avg", single_url_stat) self.assertIn("time_max", single_url_stat) self.assertIn("time_med", single_url_stat)
def r_resize(uuid, size): args_rules = [Rules.UUID.value, Rules.DISK_SIZE_STR.value] try: ji.Check.previewing(args_rules, {'uuid': uuid, 'size': size}) disk = Disk() disk.uuid = uuid disk.get_by('uuid') ret = dict() ret['state'] = ji.Common.exchange_state(20000) if disk.size >= size: ret['state'] = ji.Common.exchange_state(41257) return ret config = Config() config.id = 1 config.get() message = { '_object': 'disk', 'action': 'resize', 'uuid': disk.uuid, 'guest_uuid': disk.guest_uuid, 'storage_mode': config.storage_mode, 'size': int(size), 'dfs_volume': config.dfs_volume, 'hostname': disk.on_host, 'image_path': disk.path, 'passback_parameters': { 'size': size } } if disk.on_host == 'shared_storage': message['hostname'] = Guest.get_lightest_host()['hostname'] if disk.guest_uuid.__len__() == 36: message['device_node'] = dev_table[disk.sequence] Guest.emit_instruction(message=json.dumps(message, ensure_ascii=False)) return ret except ji.PreviewingError, e: return json.loads(e.message)
def reload_config(self): try: config=Config.select().where(Config.id==1).get() except: config=Config(sitename='ABCcms', siteurl='http://localhost') config_keys=config._meta.fields.keys() print config_keys config_keys.remove('id') config_keys.remove('created_at') config_keys.remove('updated_at') for key in config_keys: if not hasattr(self,key): setattr(self,key,getattr(config, key)) else: setattr(self,key,getattr(config, key))
def main(): cfg_data = json.load(open('league_config.json')) cfg = Config().load(cfg_data) player_pool = json.load(open('player-pool.json')) random.shuffle(player_pool) # mark player's positional eligibility player_pool = builders.mark_positional_eligibility(player_pool, cfg['lg_pos'], cfg['eligibility']) context = { 'b': { 'players': list(filter(lambda p: p['player_type'] == 'b', player_pool)), 'components': const.BATTING_COMPONENTS, 'rates': const.BATTING_RATES, 'scoring': cfg['batting_stats'], 'n_draftable': cfg['draftable_b'], 'positions': sort_positions(cfg['batting_pos']), 'budget': cfg['batting_budget'], 'split': cfg['split']['b'], }, 'p': { 'players': list(filter(lambda p: p['player_type'] == 'p', player_pool)), 'components': const.PITCHING_COMPONENTS, 'rates': const.PITCHING_RATES, 'scoring': cfg['pitching_stats'], 'n_draftable': cfg['draftable_p'], 'positions': sort_positions(cfg['pitching_pos']), 'budget': cfg['pitching_budget'], 'split': cfg['split']['p'], }, 'teams': cfg['teams'], 'eligibility': cfg['eligibility'], 'lineup': cfg['positions'], 'budget': cfg['budget'], 'total_draftable': cfg['total_draftable'], } results = { 'batting': optimize(context, 'b'), 'pitching': optimize(context, 'p'), } return results