def forward(img_a, img_b): img_a /= 255. img_b /= 255. img_ab = generator(img_a, name='atob', reuse=False) img_ba = generator(img_b, name='btoa', reuse=False) img_aba = generator(img_ab, name='btoa', reuse=True) img_bab = generator(img_ba, name='atob', reuse=True) logit_fake_a = discriminator(img_ba, name='a', reuse=False) logit_fake_b = discriminator(img_ab, name='b', reuse=False) score_fake_a = O.sigmoid(logit_fake_a) score_fake_b = O.sigmoid(logit_fake_b) for name in ['img_a', 'img_b', 'img_ab', 'img_ba', 'img_aba', 'img_bab', 'score_fake_a', 'score_fake_b']: dpc.add_output(locals()[name], name=name) if env.phase is env.Phase.TRAIN: logit_real_a = discriminator(img_a, name='a', reuse=True) logit_real_b = discriminator(img_b, name='b', reuse=True) score_real_a = O.sigmoid(logit_real_a) score_real_b = O.sigmoid(logit_real_b) all_g_loss = 0. all_d_loss = 0. r_loss_ratio = 0.9 for pair_name, (real, fake), (logit_real, logit_fake), (score_real, score_fake) in zip( ['lossa', 'lossb'], [(img_a, img_aba), (img_b, img_bab)], [(logit_real_a, logit_fake_a), (logit_real_b, logit_fake_b)], [(score_real_a, score_fake_a), (score_real_b, score_fake_b)]): with env.name_scope(pair_name): d_loss_real = O.sigmoid_cross_entropy_with_logits(logits=logit_real, labels=O.ones_like(logit_real)).mean(name='d_loss_real') d_loss_fake = O.sigmoid_cross_entropy_with_logits(logits=logit_fake, labels=O.zeros_like(logit_fake)).mean(name='d_loss_fake') g_loss = O.sigmoid_cross_entropy_with_logits(logits=logit_fake, labels=O.ones_like(logit_fake)).mean(name='g_loss') d_acc_real = (score_real > 0.5).astype('float32').mean(name='d_acc_real') d_acc_fake = (score_fake < 0.5).astype('float32').mean(name='d_acc_fake') g_accuracy = (score_fake > 0.5).astype('float32').mean(name='g_accuracy') d_accuracy = O.identity(.5 * (d_acc_real + d_acc_fake), name='d_accuracy') d_loss = O.identity(.5 * (d_loss_real + d_loss_fake), name='d_loss') # r_loss = O.raw_l2_loss('raw_r_loss', real, fake).flatten2().sum(axis=1).mean(name='r_loss') r_loss = O.raw_l2_loss('raw_r_loss', real, fake).mean(name='r_loss') # r_loss = O.raw_cross_entropy_prob('raw_r_loss', real, fake).flatten2().sum(axis=1).mean(name='r_loss') # all_g_loss += g_loss + r_loss all_g_loss += (1 - r_loss_ratio) * g_loss + r_loss_ratio * r_loss all_d_loss += d_loss for v in [d_loss_real, d_loss_fake, g_loss, d_acc_real, d_acc_fake, g_accuracy, d_accuracy, d_loss, r_loss]: dpc.add_output(v, name=re.sub('^tower/\d+/', '', v.name)[:-2], reduce_method='sum') dpc.add_output(all_g_loss, name='g_loss', reduce_method='sum') dpc.add_output(all_d_loss, name='d_loss', reduce_method='sum')
def make_network(env): with env.create_network() as net: dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): h, w, c = 28, 28, 1 img = O.placeholder('img', shape=(None, h, w, c)) return [img] def forward(img): _ = img _ = O.conv2d('conv1', _, 16, (3, 3), padding='SAME', nonlin=O.identity) _ = O.batch_norm('bn1', _) _ = O.relu(_) _ = O.pooling2d('pool1', _, kernel=2) _ = O.conv2d('conv2', _, 32, (3, 3), padding='SAME', nonlin=O.identity) _ = O.batch_norm('bn2', _) _ = O.relu(_) _ = O.pooling2d('pool2', _, kernel=2) dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('fc1', _, 64) _ = O.fc('fc2', _, 10) prob = O.softmax(_, name='prob') pred = _.argmax(axis=1).astype('int32', name='pred') net.add_output(prob) net.add_output(pred) if env.phase is env.Phase.TRAIN: label = O.placeholder('label', shape=(None, ), dtype='int32') loss = O.sparse_softmax_cross_entropy_with_logits( logits=_, labels=label).mean() loss = O.identity(loss, name='loss') net.set_loss(loss) accuracy = O.eq(label, pred).astype('float32').mean() error = 1. - accuracy summary.scalar('accuracy', accuracy) summary.scalar('error', error) summary.inference.scalar('loss', loss) summary.inference.scalar('accuracy', accuracy) summary.inference.scalar('error', error)
def make_network(env): with env.create_network() as net: nr_classes = get_env('dataset.nr_classes') conv_bn_relu = functools.partial(O.conv2d, nonlin=O.bn_relu) conv2d = conv_bn_relu dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): h, w, c = 32, 32, 3 img = O.placeholder('img', shape=(None, h, w, c)) return [img] def forward(img): _ = img _ = conv2d('conv1.1', _, 16, (3, 3), padding='SAME') _ = conv2d('conv1.2', _, 16, (3, 3), padding='SAME') _ = O.pooling2d('pool1', _, kernel=3, stride=2) _ = conv2d('conv2.1', _, 32, (3, 3), padding='SAME') _ = conv2d('conv2.2', _, 32, (3, 3), padding='SAME') _ = O.pooling2d('pool2', _, kernel=3, stride=2) _ = conv2d('conv3.1', _, 64, (3, 3), padding='VALID') _ = conv2d('conv3.2', _, 64, (3, 3), padding='VALID') _ = conv2d('conv3.3', _, 64, (3, 3), padding='VALID') dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('fc1', _, 128, nonlin=O.relu) _ = O.fc('fc2', _, 64, nonlin=O.relu) _ = O.fc('linear', _, nr_classes) prob = O.softmax(_, name='prob') pred = _.argmax(axis=1).astype('int32', name='pred') net.add_output(prob) net.add_output(pred) if env.phase is env.Phase.TRAIN: label = O.placeholder('label', shape=(None, ), dtype='int32') loss = O.sparse_softmax_cross_entropy_with_logits(logits=_, labels=label).mean() loss = O.identity(loss, name='loss') net.set_loss(loss) accuracy = O.eq(label, pred).astype('float32').mean() error = 1. - accuracy summary.scalar('accuracy', accuracy) summary.scalar('error', error) summary.inference.scalar('loss', loss) summary.inference.scalar('accuracy', accuracy) summary.inference.scalar('error', error)
def make_network(env): with env.create_network() as net: net.dist = O.distrib.GaussianDistribution('policy', size=get_action_shape()[0], fixed_std=False) state = O.placeholder('state', shape=(None, ) + get_input_shape()) batch_size = state.shape[0] # We have to define variable scope here for later optimization. with env.variable_scope('policy'): _ = state _ = O.fc('fc1', _, 64, nonlin=O.relu) _ = O.fc('fc2', _, 64, nonlin=O.relu) mu = O.fc('fc_mu', _, net.dist.sample_size, nonlin=O.tanh) logstd = O.variable('logstd', O.truncated_normal_initializer(stddev=0.01), shape=(net.dist.sample_size, ), trainable=True) logstd = O.tile(logstd.add_axis(0), [batch_size, 1]) theta = O.concat([mu, logstd], axis=1) policy = net.dist.sample(batch_size=batch_size, theta=theta, process_theta=True) policy = O.clip_by_value(policy, -1, 1) net.add_output(theta, name='theta') net.add_output(policy, name='policy') if env.phase == env.Phase.TRAIN: theta_old = O.placeholder('theta_old', shape=(None, net.dist.param_size)) action = O.placeholder('action', shape=(None, net.dist.sample_size)) advantage = O.placeholder('advantage', shape=(None, )) entropy_beta = O.scalar('entropy_beta', g.entropy_beta) log_prob = net.dist.log_likelihood(action, theta, process_theta=True) log_prob_old = net.dist.log_likelihood(action, theta_old, process_theta=True) ratio = O.exp(log_prob - log_prob_old) epsilon = get_env('ppo.epsilon') surr1 = ratio * advantage # surrogate from conservative policy iteration surr2 = O.clip_by_value(ratio, 1.0 - epsilon, 1.0 + epsilon) * advantage policy_loss = -O.reduce_mean(O.min( surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP) entropy = net.dist.entropy(theta, process_theta=True).mean() entropy_loss = -entropy_beta * entropy net.add_output(policy_loss, name='policy_loss') net.add_output(entropy_loss, name='entropy_loss') summary.scalar('policy_entropy', entropy) with env.variable_scope('value'): _ = state _ = O.fc('fc1', _, 64, nonlin=O.relu) _ = O.fc('fc2', _, 64, nonlin=O.relu) value = O.fc('fcv', _, 1) value = value.remove_axis(1) net.add_output(value, name='value') if env.phase == env.Phase.TRAIN: value_label = O.placeholder('value_label', shape=(None, )) value_old = O.placeholder('value_old', shape=(None, )) value_surr1 = O.raw_l2_loss('raw_value_loss_surr1', value, value_label) value_clipped = value_old + O.clip_by_value( value - value_old, -epsilon, epsilon) value_surr2 = O.raw_l2_loss('raw_value_loss_surr2', value_clipped, value_label) value_loss = O.reduce_mean(O.max(value_surr1, value_surr2)) net.add_output(value_loss, name='value_loss') if env.phase == env.Phase.TRAIN: loss = O.identity(policy_loss + entropy_loss + value_loss, name='total_loss') net.set_loss(loss)
def make_network(env): is_train = env.phase is env.Phase.TRAIN # device control: always use master device only for training session if is_train: slave_devices = env.slave_devices env.set_slave_devices([]) with env.create_network() as net: input_length, = get_input_shape() action_length, = get_action_shape() dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): state = O.placeholder('state', shape=(None, input_length)) return [state] # forward policy network and value network separately (actor-critic) def forward(x): _ = x _ = O.fc('fcp1', _, 512, nonlin=O.relu) _ = O.fc('fcp2', _, 256, nonlin=O.relu) dpc.add_output(_, name='feature_p') _ = x _ = O.fc('fcv1', _, 512, nonlin=O.relu) _ = O.fc('fcv2', _, 256, nonlin=O.relu) dpc.add_output(_, name='feature_v') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature_p'] # mu and std, assuming spherical covariance policy_mu = O.fc('fc_policy_mu', _, action_length) # In this example, we do not use variance. instead, we use fixed value. # policy_var = O.fc('fc_policy_var', _, 1, nonlin=O.softplus) # policy_var = O.tile(policy_var, [1, action_length], name='policy_var') # policy_std = O.sqrt(policy_var, name='policy_std') actor_space = get_env('a3c.actor_space') nr_bins = actor_space.shape[1] # Instead of using normal distribution, we use Laplacian distribution for policy. # And also, we are sampling from a truncated Laplacian distribution (only care the value in the # action space). To simplify the computation, we discretize the action space. actor_space = O.constant(actor_space) actor_space = O.tile(actor_space.add_axis(0), [policy_mu.shape[0], 1, 1]) policy_mu3 = O.tile(policy_mu.add_axis(2), [1, 1, nr_bins]) # policy_std3 = O.tile(policy_std.add_axis(2), [1, 1, nr_bins]) # logits = O.abs(actor_space - policy_mu3) / (policy_std3 + 1e-2) # Here, we force the std of the policy to be 1. logits_explore = -O.abs(actor_space - policy_mu3) policy_explore = O.softmax(logits_explore) # Clip the policy for output action_range = get_action_range() action_range = tuple(map(O.constant, action_range)) action_range = tuple(map(lambda x: O.tile(x.add_axis(0), [policy_mu.shape[0], 1]), action_range)) policy_output = O.clip_by_value(policy_mu, *action_range) _ = dpc.outputs['feature_v'] value = O.fc('fc_value', _, 1) value = value.remove_axis(1, name='value') # Note that, here the policy_explore is a discrete policy, # and policy is actually the continuous one. net.add_output(policy_explore, name='policy_explore') net.add_output(policy_output, name='policy') net.add_output(value, name='value') if is_train: action = O.placeholder('action', shape=(None, action_length), dtype='int64') future_reward = O.placeholder('future_reward', shape=(None, )) entropy_beta = O.scalar('entropy_beta', 0.1, trainable=False) # Since we discretized the action space, use cross entropy here. log_policy = O.log(policy_explore + 1e-4) log_pi_a_given_s = (log_policy * O.one_hot(action, nr_bins)).sum(axis=2).sum(axis=1) advantage = (future_reward - O.zero_grad(value)).rename('advantage') # Important trick: using only positive advantage to perform gradient assent. This stabilizes the training. advantage = advantage * O.zero_grad((advantage > 0.).astype('float32')) policy_loss = O.identity(-(log_pi_a_given_s * advantage).mean(), name='policy_loss') # As mentioned, there is no trainable variance. # entropy_loss = O.identity(-entropy_beta * (policy_std ** 2.).sum(axis=1).mean(), name='entropy_loss') value_loss = O.raw_smooth_l1_loss('raw_value_loss', future_reward, value).mean(name='value_loss') loss = O.add_n([policy_cost, value_loss], name='loss') net.set_loss(loss) for v in [policy_cost, value_loss, value.mean(name='predict_value'), advantage.rms(name='rms_advantage'), loss]: summary.scalar(v) if is_train: env.set_slave_devices(slave_devices)
def make_network(env): with env.create_network() as net: n = 2 nr_classes = get_env('dataset.nr_classes') conv2d = functools.partial(O.conv2d, kernel=3, use_bias=False, padding='SAME') conv_bn_relu = functools.partial(conv2d, nonlin=O.bn_relu) dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): h, w, c = 32, 32, 3 img = O.placeholder('img', shape=(None, h, w, c)) return [img] def residual(name, x, first=False, inc_dim=False): in_channel = x.static_shape[3] out_channel = in_channel stride = 1 if inc_dim: out_channel = in_channel * 2 stride = 2 with env.variable_scope(name): _ = x if first else O.bn_relu(x) _ = conv_bn_relu('conv1', _, out_channel, stride=stride) _ = conv2d('conv2', _, out_channel) if inc_dim: x = O.pooling2d('pool', x, kernel=2) x = O.pad(x, [[0, 0], [0, 0], [0, 0], [in_channel // 2, in_channel // 2]]) print(name, x.static_shape) _ = _ + x return _ def forward(img): _ = img / 128.0 - 1.0 _ = conv_bn_relu('conv0', _, 16) _ = residual('res1.0', _, first=True) for i in range(1, n): _ = residual('res1.{}'.format(i), _) _ = residual('res2.0', _, inc_dim=True) for i in range(1, n): _ = residual('res2.{}'.format(i), _) _ = residual('res3.0', _, inc_dim=True) for i in range(1, n): _ = residual('res3.{}'.format(i), _) _ = O.batch_norm('bn_last', _) _ = O.relu(_) _ = _.mean(axis=[1, 2]) # global avg pool dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('linear', _, nr_classes) prob = O.softmax(_, name='prob') pred = _.argmax(axis=1).astype('int32', name='pred') net.add_output(prob) net.add_output(pred) if env.phase is env.Phase.TRAIN: label = O.placeholder('label', shape=(None, ), dtype='int32') loss = O.sparse_softmax_cross_entropy_with_logits( logits=_, labels=label).mean() loss = O.identity(loss, name='loss') net.set_loss(loss) accuracy = O.eq(label, pred).astype('float32').mean() error = 1. - accuracy summary.scalar('accuracy', accuracy) summary.scalar('error', error) summary.inference.scalar('loss', loss) summary.inference.scalar('accuracy', accuracy) summary.inference.scalar('error', error)
def make_network(env): is_train = env.phase is env.Phase.TRAIN if is_train: slave_devices = env.slave_devices env.set_slave_devices([]) with env.create_network() as net: h, w, c = get_input_shape() dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): state = O.placeholder('state', shape=(None, h, w, c)) return [state] def forward(x): _ = x / 255.0 with O.argscope(O.conv2d, nonlin=O.relu): _ = O.conv2d('conv0', _, 32, 5) _ = O.max_pooling2d('pool0', _, 2) _ = O.conv2d('conv1', _, 32, 5) _ = O.max_pooling2d('pool1', _, 2) _ = O.conv2d('conv2', _, 64, 4) _ = O.max_pooling2d('pool2', _, 2) _ = O.conv2d('conv3', _, 64, 3) dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('fc0', _, 512, nonlin=O.p_relu) policy = O.fc('fc_policy', _, get_player_nr_actions()) value = O.fc('fc_value', _, 1) expf = O.scalar('explore_factor', 1, trainable=False) policy_explore = O.softmax(policy * expf, name='policy_explore') policy = O.softmax(policy, name='policy') value = value.remove_axis(1, name='value') net.add_output(policy_explore, name='policy_explore') net.add_output(policy, name='policy') net.add_output(value, name='value') if is_train: action = O.placeholder('action', shape=(None, ), dtype='int64') future_reward = O.placeholder('future_reward', shape=(None, )) entropy_beta = O.scalar('entropy_beta', 0.01, trainable=False) log_policy = O.log(policy + 1e-6) log_pi_a_given_s = ( log_policy * O.one_hot(action, get_player_nr_actions())).sum(axis=1) advantage = (future_reward - O.zero_grad(value)).rename('advantage') policy_loss = O.identity(-(log_pi_a_given_s * advantage).mean(), name='policy_loss') entropy_loss = O.identity( -entropy_beta * (-policy * log_policy).sum(axis=1).mean(), name='entropy_loss') value_loss = O.raw_l2_loss('raw_value_loss', future_reward, value).mean(name='value_loss') loss = O.add_n([policy_loss, entropy_loss, value_loss], name='loss') net.set_loss(loss) for v in [ policy_loss, entropy_loss, value_loss, value.mean(name='predict_value'), advantage.rms(name='rms_advantage'), loss ]: summary.scalar(v) if is_train: env.set_slave_devices(slave_devices)