def build_config(args, build_params): build_params.gpu = args.gpu build_params.logdir = args.log build_params.dataset.name = args.dataset build_params.dataset.flip = utils.str2bool(args.flip) build_params.dataset.crop = utils.str2bool(args.crop) build_params.training.log_steps = int(args.log_steps) build_params.training.idx = args.idx build_params.training.epochs = int(args.epochs) build_params.training.batch_size = int(args.batch) build_params.training.steps = int(args.steps) build_params.training.log = utils.str2bool(args.t_log) build_params.training.whiten = args.whiten build_params.model.name = args.model build_params.model.pool = args.pool build_params.model.layer_num = int(args.layer_num) build_params.model.in_norm = utils.str2bool(args.in_norm) build_params.model.in_norm_fn = args.in_norm_fn build_params.model.fm_norm = args.fm_norm build_params.model.out_norm = utils.str2bool(args.out_norm) build_params.model.norm_scale = utils.str2bool(args.norm_scale) build_params.model.bn_relu = utils.str2bool(args.bn_relu) build_params.model.resnet = args.resnet build_params.routing.temper = float(args.temper) build_params.routing.iter_num = int(args.iter_num) build_params.caps.atoms = int(args.atoms) build_params.caps.pre_atoms = int(args.pre_atoms) build_params.recons.balance_factor = float(args.balance_factor) build_params.recons.threshold = float(args.activate_threshold) build_params.recons.conv = utils.str2bool(args.recons_conv) build_params.recons.share = utils.str2bool(args.recons_share) return build_params
def build_config(args, build_params): build_params.logdir = args.log build_params.dataset.name = args.dataset build_params.dataset.flip = utils.str2bool(args.flip) build_params.dataset.crop = utils.str2bool(args.crop) build_params.training.batch_size = int(args.batch) build_params.training.epochs = int(args.epochs) build_params.training.lr = float(args.lr) build_params.training.idx = args.idx build_params.training.save_frequency = args.save build_params.model.arch = args.arch build_params.model.type = args.type build_params.normalize.method = args.method build_params.normalize.m = int(args.m) build_params.normalize.iter = int(args.iter) build_params.normalize.affine = utils.str2bool(args.affine) return build_params
def build_config(args): config.dataset.name = args.dataset config.dataset.flip = utils.str2bool(args.flip) config.dataset.crop = utils.str2bool(args.crop) config.logdir = args.log config.training.log_steps = int(args.log_steps) config.training.idx = args.idx config.training.epochs = int(args.epochs) config.training.lr = float(args.lr) config.training.batch_size = int(args.batch) config.training.steps = int(args.steps) config.training.log = utils.str2bool(args.t_log) config.model.name = args.model config.model.layer_num = int(args.layer_num) config.normalize.type = args.normalize config.normalize.m = int(args.dbn_m) config.normalize.iter = int(args.iter) config.normalize.affine = utils.str2bool(args.dbn_affine)
def build_parse(height, width, channel, image_standardization=True, flip=True, crop=True, brightness=False, contrast=False): image_standardization = utils.str2bool(image_standardization) flip = utils.str2bool(flip) crop = utils.str2bool(crop) brightness = utils.str2bool(brightness) contrast = utils.str2bool(contrast) def parse(image, label): image = tf.cast(image, tf.float32) if image_standardization: image = tf.image.per_image_standardization(image) else: image = tf.divide(image, 255.) if flip: image = tf.image.random_flip_left_right(image) if crop: image = tf.image.resize_with_crop_or_pad(image, height+8, width+8) image = tf.image.random_crop(image, [height, width, channel]) if brightness: image = tf.image.random_brightness(image, max_delta=63) if contrast: image = tf.image.random_contrast(image, lower=0.2, upper=1.8) return image, label return parse
description='OP Benchmark of PaddlePaddle') # positional parser.add_argument( "benchmark_script", type=str, help="The full path to operator's benchmark script file. If the task " "the speed and GPU is used, nvprof will be used to get the GPU kernel time." ) # rest from the operator benchmark program parser.add_argument('benchmark_script_args', nargs=argparse.REMAINDER) args = parser.parse_args() benchmark_args_dict = _args_list_to_dict(args.benchmark_script_args) task = benchmark_args_dict.get("task", "speed") use_gpu = utils.str2bool(benchmark_args_dict.get("use_gpu", "False")) profiler = benchmark_args_dict.get("profiler", "none") repeat = benchmark_args_dict.get("repeat", "1") utils.check_commit() if use_gpu and task == "speed" and profiler == "none": total_gpu_time = launch(args.benchmark_script, args.benchmark_script_args, with_nvprof=True) args.benchmark_script_args.append(" --gpu_time ") args.benchmark_script_args.append(str(total_gpu_time)) launch(args.benchmark_script, args.benchmark_script_args, with_nvprof=False)
import numpy as np from keras.utils import to_categorical import copy from common.utils import eligibility_traces, default_config, make_env, RunningMeanStd, str2bool, discount_rewards from common.ppo_independant import PPOPolicyNetwork, ValueNetwork render = False normalize_inputs = True config = default_config() LAMBDA = float(config['agent']['lambda']) lr_actor = float(config['agent']['lr_actor']) meta_skip_etrace = str2bool(config['agent']['meta_skip_etrace']) communication_round = int(config['agent']['fen_communication_round']) env=make_env(config, normalize_inputs) env.toggle_compute_neighbors() n_agent=env.n_agent T = env.T GAMMA = env.GAMMA n_episode = env.n_episode max_steps = env.max_steps n_actions = env.n_actions n_signal = env.n_signal max_u = env.max_u i_episode = 0 meta_Pi=[] meta_V=[] for i in range(n_agent): meta_Pi.append(PPOPolicyNetwork(num_features=env.input_size+2, num_actions=n_signal,layer_size=128,epsilon=0.1,learning_rate=lr_actor))
def update(self, request, *args, **kwargs): friend = self.get_object() try: if 'remark' in request.data: # A->B 只有A有修改备注权限 if request.user == friend.from_user: friend.remark = request.data['remark'] friend.save() return success_response('设置备注成功') else: return error_response(3, '无此权限') elif 'is_block' in request.data: # A->B 只有A有拉黑权限 if request.user == friend.from_user: is_block = str2bool(request.data['is_block']) if is_block is not None: friend.is_block = is_block friend.save() if is_block: # 在融云上同步拉黑 friend.from_user.operate_black_list( friend.to_user.id, 'add') return success_response('拉黑用户成功') else: # 在融云上同步取消拉黑 friend.from_user.operate_black_list( friend.to_user.id, 'remove') return success_response('取消拉黑成功') else: return error_response(4, '参数错误(请输入合法布尔值)') else: return error_response(3, '无此权限') elif 'state' in request.data: # A->B 只有B有接受/拒绝请求权限 if request.user == friend.to_user: if friend.state == FriendState.Pending: if isdigit(request.data['state']): state = int(request.data['state']) # 接受请求 if state == FriendState.Agree: friend.state = state friend.agree_time = timezone.now() friend.save() # 反向设置B->A friend_from, is_created = self.get_queryset( ).get_or_create(from_user=friend.to_user, to_user=friend.from_user) friend_from.state = state friend_from.agree_time = timezone.now() friend_from.remark = friend.from_user.get_full_name( ) friend_from.save() # TODO 向用户A推送B通过了他的好友请求 try: jpush.audience( friend.from_user.id, '请求通过', '用户{}通过了你的好友请求'.format( request.user.get_full_name()), {'operation': 'friend_pass'}) except PushError as e: logging.error('{} {}'.format( e.code, e.message)) return success_response('添加好友成功') # 拒绝请求 elif state == FriendState.Reject: friend.state = state friend.save() return success_response('拒绝请求成功') else: return error_response(4, '参数错误') else: return error_response(4, '参数错误(state为数字)') else: return error_response(5, '不可再次处理该请求') else: return error_response(3, '无此权限') else: return error_response(4, '参数错误') except Exception as e: import traceback traceback.print_exc() return error_response(1, str(e))