def discriminator(x): _ = x with O.argscope(O.fc, nonlin=O.tanh): _ = O.fc('fc1', _, 500) _ = O.fc('fc3', _, 1) logits = _ return logits
def make_network(env): with env.create_network() as net: dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): h, w, c = 28, 28, 1 img = O.placeholder('img', shape=(None, h, w, c)) return [img] def forward(img): _ = img _ = O.conv2d('conv1', _, 16, (3, 3), padding='SAME', nonlin=O.identity) _ = O.batch_norm('bn1', _) _ = O.relu(_) _ = O.pooling2d('pool1', _, kernel=2) _ = O.conv2d('conv2', _, 32, (3, 3), padding='SAME', nonlin=O.identity) _ = O.batch_norm('bn2', _) _ = O.relu(_) _ = O.pooling2d('pool2', _, kernel=2) dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('fc1', _, 64) _ = O.fc('fc2', _, 10) prob = O.softmax(_, name='prob') pred = _.argmax(axis=1).astype('int32', name='pred') net.add_output(prob) net.add_output(pred) if env.phase is env.Phase.TRAIN: label = O.placeholder('label', shape=(None, ), dtype='int32') loss = O.sparse_softmax_cross_entropy_with_logits( logits=_, labels=label).mean() loss = O.identity(loss, name='loss') net.set_loss(loss) accuracy = O.eq(label, pred).astype('float32').mean() error = 1. - accuracy summary.scalar('accuracy', accuracy) summary.scalar('error', error) summary.inference.scalar('loss', loss) summary.inference.scalar('accuracy', accuracy) summary.inference.scalar('error', error)
def phi_fc(feature): _ = feature _ = O.fc('fc0', _, 512, nonlin=functools.partial(O.leaky_relu, alpha=0.01)) q_pred = O.fc('fcq', _, get_player_nr_actions()) q_max = q_pred.max(axis=1) q_argmax = q_pred.argmax(axis=1) return q_pred, q_max, q_argmax
def forward(x): _ = x _ = O.fc('fcp1', _, 512, nonlin=O.relu) _ = O.fc('fcp2', _, 256, nonlin=O.relu) dpc.add_output(_, name='feature_p') _ = x _ = O.fc('fcv1', _, 512, nonlin=O.relu) _ = O.fc('fcv2', _, 256, nonlin=O.relu) dpc.add_output(_, name='feature_v')
def make_network(env): with env.create_network() as net: nr_classes = get_env('dataset.nr_classes') conv_bn_relu = functools.partial(O.conv2d, nonlin=O.bn_relu) conv2d = conv_bn_relu dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): h, w, c = 32, 32, 3 img = O.placeholder('img', shape=(None, h, w, c)) return [img] def forward(img): _ = img _ = conv2d('conv1.1', _, 16, (3, 3), padding='SAME') _ = conv2d('conv1.2', _, 16, (3, 3), padding='SAME') _ = O.pooling2d('pool1', _, kernel=3, stride=2) _ = conv2d('conv2.1', _, 32, (3, 3), padding='SAME') _ = conv2d('conv2.2', _, 32, (3, 3), padding='SAME') _ = O.pooling2d('pool2', _, kernel=3, stride=2) _ = conv2d('conv3.1', _, 64, (3, 3), padding='VALID') _ = conv2d('conv3.2', _, 64, (3, 3), padding='VALID') _ = conv2d('conv3.3', _, 64, (3, 3), padding='VALID') dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('fc1', _, 128, nonlin=O.relu) _ = O.fc('fc2', _, 64, nonlin=O.relu) _ = O.fc('linear', _, nr_classes) prob = O.softmax(_, name='prob') pred = _.argmax(axis=1).astype('int32', name='pred') net.add_output(prob) net.add_output(pred) if env.phase is env.Phase.TRAIN: label = O.placeholder('label', shape=(None, ), dtype='int32') loss = O.sparse_softmax_cross_entropy_with_logits(logits=_, labels=label).mean() loss = O.identity(loss, name='loss') net.set_loss(loss) accuracy = O.eq(label, pred).astype('float32').mean() error = 1. - accuracy summary.scalar('accuracy', accuracy) summary.scalar('error', error) summary.inference.scalar('loss', loss) summary.inference.scalar('accuracy', accuracy) summary.inference.scalar('error', error)
def forward(img): g_batch_size = get_env('trainer.batch_size') if env.phase is env.Phase.TRAIN else 1 z = O.as_varnode(tf.random_normal([g_batch_size, code_length])) with env.variable_scope(GANGraphKeys.GENERATOR_VARIABLES): _ = z with O.argscope(O.fc, nonlin=O.tanh): _ = O.fc('fc1', _, 500) _ = O.fc('fc3', _, 784, nonlin=O.sigmoid) x_given_z = _.reshape(-1, 28, 28, 1) def discriminator(x): _ = x with O.argscope(O.fc, nonlin=O.tanh): _ = O.fc('fc1', _, 500) _ = O.fc('fc3', _, 1) logits = _ return logits if is_train: with env.variable_scope(GANGraphKeys.DISCRIMINATOR_VARIABLES): logits_real = discriminator(img).flatten() score_real = O.sigmoid(logits_real) with env.variable_scope(GANGraphKeys.DISCRIMINATOR_VARIABLES, reuse=is_train): logits_fake = discriminator(x_given_z).flatten() score_fake = O.sigmoid(logits_fake) if is_train: # build loss with env.variable_scope('loss'): d_loss_real = O.sigmoid_cross_entropy_with_logits( logits=logits_real, labels=O.ones_like(logits_real)).mean() d_loss_fake = O.sigmoid_cross_entropy_with_logits( logits=logits_fake, labels=O.zeros_like(logits_fake)).mean() g_loss = O.sigmoid_cross_entropy_with_logits( logits=logits_fake, labels=O.ones_like(logits_fake)).mean() d_acc_real = (score_real > 0.5).astype('float32').mean() d_acc_fake = (score_fake < 0.5).astype('float32').mean() g_accuracy = (score_fake > 0.5).astype('float32').mean() d_accuracy = .5 * (d_acc_real + d_acc_fake) d_loss = .5 * (d_loss_real + d_loss_fake) dpc.add_output(d_loss, name='d_loss', reduce_method='sum') dpc.add_output(d_accuracy, name='d_accuracy', reduce_method='sum') dpc.add_output(d_acc_real, name='d_acc_real', reduce_method='sum') dpc.add_output(d_acc_fake, name='d_acc_fake', reduce_method='sum') dpc.add_output(g_loss, name='g_loss', reduce_method='sum') dpc.add_output(g_accuracy, name='g_accuracy', reduce_method='sum') dpc.add_output(x_given_z, name='output') dpc.add_output(score_fake, name='score')
def generator(z): w_init = O.truncated_normal_initializer(stddev=0.02) with O.argscope(O.conv2d, O.deconv2d, kernel=4, stride=2, W=w_init),\ O.argscope(O.fc, W=w_init): _ = z _ = O.fc('fc1', _, 1024, nonlin=O.bn_relu) _ = O.fc('fc2', _, 128 * 7 * 7, nonlin=O.bn_relu) _ = O.reshape(_, [-1, 7, 7, 128]) _ = O.deconv2d('deconv1', _, 64, nonlin=O.bn_relu) _ = O.deconv2d('deconv2', _, 1) _ = O.sigmoid(_, 'out') return _
def discriminator(img): w_init = O.truncated_normal_initializer(stddev=0.02) with O.argscope(O.conv2d, O.deconv2d, kernel=4, stride=2, W=w_init),\ O.argscope(O.fc, W=w_init),\ O.argscope(O.leaky_relu, alpha=0.2): _ = img _ = O.conv2d('conv1', _, 64, nonlin=O.leaky_relu) _ = O.conv2d('conv2', _, 128, nonlin=O.bn_nonlin) _ = O.leaky_relu(_) _ = O.fc('fc1', _, 1024, nonlin=O.bn_nonlin) _ = O.leaky_relu(_) _ = O.fc('fct', _, 1) return _
def discriminator(x, name, reuse): with env.variable_scope(GANGraphKeys.DISCRIMINATOR_VARIABLES, reuse=reuse): with env.variable_scope(name): z = encoder(x, nonlin=bn_leaky_relu) logit = O.fc('fccls', z, 1) return logit
def discriminator(img): w_init = O.truncated_normal_initializer(stddev=0.02) with O.argscope(O.conv2d, O.deconv2d, kernel=4, stride=2, W=w_init),\ O.argscope(O.fc, W=w_init),\ O.argscope(O.leaky_relu, alpha=0.2): _ = img _ = O.conv2d('conv1', _, 64, nonlin=O.leaky_relu) _ = O.conv2d('conv2', _, 128, nonlin=O.bn_nonlin) _ = O.leaky_relu(_) _ = O.fc('fc1', _, 1024, nonlin=O.bn_nonlin) _ = O.leaky_relu(_) with env.variable_scope('score'): logits = O.fc('fct', _, 1) with env.variable_scope('code'): _ = O.fc('fc1', _, 128, nonlin=O.bn_nonlin) _ = O.leaky_relu(_) code = O.fc('fc2', _, zc_distrib.param_size) return logits, code
def forward(img): _ = img _ = O.conv2d('conv1', _, 4, (3, 3), stride=2, padding='SAME', nonlin=O.relu) # shape = (14, 14) _ = O.conv2d('conv2', _, 8, (3, 3), stride=2, padding='SAME', nonlin=O.relu) # shape = (7, 7) _ = O.fc('fc', _, 392) _ = _.reshape([-1, 7, 7, 8]) _ = O.deconv2d('deconv1', _, 4, (3, 3), stride=2, padding='SAME', nonlin=O.relu) # shape = (14, 14) _ = O.deconv2d('deconv2', _, 1, (3, 3), stride=2, padding='SAME', nonlin=O.sigmoid) # shape = (28, 28) out = _ loss = O.raw_cross_entropy_prob('raw_loss', out, img) loss = O.get_pn_balanced_loss('loss', loss, img) dpc.add_output(out, name='output') dpc.add_output(loss, name='loss', reduce_method='sum')
def forward(x): if is_reconstruct or env.phase is env.Phase.TRAIN: with env.variable_scope('encoder'): _ = x _ = O.fc('fc1', _, 500, nonlin=O.tanh) _ = O.fc('fc2', _, 500, nonlin=O.tanh) mu = O.fc('fc3_mu', _, code_length) log_var = O.fc('fc3_sigma', _, code_length) var = O.exp(log_var) std = O.sqrt(var) epsilon = O.random_normal([x.shape[0], code_length]) z_given_x = mu + std * epsilon else: z_given_x = O.random_normal([1, code_length]) with env.variable_scope('decoder'): _ = z_given_x _ = O.fc('fc1', _, 500, nonlin=O.tanh) _ = O.fc('fc2', _, 500, nonlin=O.tanh) _ = O.fc('fc3', _, 784, nonlin=O.sigmoid) _ = _.reshape(-1, h, w, c) x_given_z = _ if env.phase is env.Phase.TRAIN: with env.variable_scope('loss'): content_loss = O.raw_cross_entropy_prob( 'raw_content', x_given_z.flatten2(), x.flatten2()) content_loss = content_loss.sum(axis=1).mean( name='content') # distrib_loss = 0.5 * (O.sqr(mu) + O.sqr(std) - 2. * O.log(std + 1e-8) - 1.0).sum(axis=1) distrib_loss = -0.5 * (1. + log_var - O.sqr(mu) - var).sum(axis=1) distrib_loss = distrib_loss.mean(name='distrib') loss = content_loss + distrib_loss dpc.add_output(loss, name='loss', reduce_method='sum') dpc.add_output(x_given_z, name='output')
def make_network(env): is_train = env.phase is env.Phase.TRAIN if is_train: slave_devices = env.slave_devices env.set_slave_devices([]) with env.create_network() as net: h, w, c = get_input_shape() dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): state = O.placeholder('state', shape=(None, h, w, c)) return [state] def forward(x): _ = x / 255.0 with O.argscope(O.conv2d, nonlin=O.relu): _ = O.conv2d('conv0', _, 32, 5) _ = O.max_pooling2d('pool0', _, 2) _ = O.conv2d('conv1', _, 32, 5) _ = O.max_pooling2d('pool1', _, 2) _ = O.conv2d('conv2', _, 64, 4) _ = O.max_pooling2d('pool2', _, 2) _ = O.conv2d('conv3', _, 64, 3) dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('fc0', _, 512, nonlin=O.p_relu) policy = O.fc('fc_policy', _, get_player_nr_actions()) value = O.fc('fc_value', _, 1) expf = O.scalar('explore_factor', 1, trainable=False) policy_explore = O.softmax(policy * expf, name='policy_explore') policy = O.softmax(policy, name='policy') value = value.remove_axis(1, name='value') net.add_output(policy_explore, name='policy_explore') net.add_output(policy, name='policy') net.add_output(value, name='value') if is_train: action = O.placeholder('action', shape=(None, ), dtype='int64') future_reward = O.placeholder('future_reward', shape=(None, )) log_policy = O.log(policy + 1e-6) log_pi_a_given_s = ( log_policy * O.one_hot(action, get_player_nr_actions())).sum(axis=1) advantage = (future_reward - O.zero_grad(value)).rename('advantage') policy_cost = (log_pi_a_given_s * advantage).mean(name='policy_cost') xentropy_cost = (-policy * log_policy).sum(axis=1).mean(name='xentropy_cost') value_loss = O.raw_l2_loss('raw_value_loss', future_reward, value).mean(name='value_loss') entropy_beta = O.scalar('entropy_beta', 0.01, trainable=False) loss = O.add_n( [-policy_cost, -xentropy_cost * entropy_beta, value_loss], name='loss') net.set_loss(loss) for v in [ policy_cost, xentropy_cost, value_loss, value.mean(name='predict_value'), advantage.rms(name='rms_advantage'), loss ]: summary.scalar(v) if is_train: env.set_slave_devices(slave_devices)
def forward(img=None): encoder = O.BasicLSTMCell(256) decoder = O.BasicLSTMCell(256) batch_size = img.shape[0] if is_train else 1 canvas = O.zeros(shape=O.canonize_sym_shape([batch_size, h, w, c]), dtype='float32') enc_state = encoder.zero_state(batch_size, dtype='float32') dec_state = decoder.zero_state(batch_size, dtype='float32') enc_h, dec_h = enc_state[1], dec_state[1] def encode(x, state, reuse): with env.variable_scope('read_encoder', reuse=reuse): return encoder(x, state) def decode(x, state, reuse): with env.variable_scope('write_decoder', reuse=reuse): return decoder(x, state) all_sqr_mus, all_vars, all_log_vars = 0., 0., 0. for step in range(nr_glimpse): reuse = (step != 0) if is_reconstruct or env.phase is env.Phase.TRAIN: img_hat = draw_opr.image_diff(img, canvas) # eq. 3 # Note: here the input should be dec_h with env.variable_scope('read', reuse=reuse): read_param = O.fc('fc_param', dec_h, 5) with env.name_scope('read_step{}'.format(step)): cx, cy, delta, var, gamma = draw_opr.split_att_params(h, w, att_dim, read_param) read_inp = O.concat([img, img_hat], axis=3) # of shape: batch_size x h x w x (2c) read_out = draw_opr.att_read(att_dim, read_inp, cx, cy, delta, var) # eq. 4 enc_inp = O.concat([gamma * read_out.flatten2(), dec_h], axis=1) enc_h, enc_state = encode(enc_inp, enc_state, reuse) # eq. 5 with env.variable_scope('sample', reuse=reuse): _ = enc_h sample_mu = O.fc('fc_mu', _, code_length) sample_log_var = O.fc('fc_sigma', _, code_length) with env.name_scope('sample_step{}'.format(step)): sample_var = O.exp(sample_log_var) sample_std = O.sqrt(sample_var) sample_epsilon = O.random_normal([batch_size, code_length]) z = sample_mu + sample_std * sample_epsilon # eq. 6 # accumulate for losses all_sqr_mus += sample_mu ** 2. all_vars += sample_var all_log_vars += sample_log_var else: z = O.random_normal([1, code_length]) # z = O.callback_injector(z) dec_h, dec_state = decode(z, dec_state, reuse) # eq. 7 with env.variable_scope('write', reuse=reuse): write_param = O.fc('fc_param', dec_h, 5) write_in = O.fc('fc', dec_h, (att_dim * att_dim * c)).reshape(-1, att_dim, att_dim, c) with env.name_scope('write_step{}'.format(step)): cx, cy, delta, var, gamma = draw_opr.split_att_params(h, w, att_dim, write_param) write_out = draw_opr.att_write(h, w, write_in, cx, cy, delta, var) # eq. 8 canvas += write_out if env.phase is env.Phase.TEST: dpc.add_output(O.sigmoid(canvas), name='canvas_step{}'.format(step)) canvas = O.sigmoid(canvas) if env.phase is env.Phase.TRAIN: with env.variable_scope('loss'): img, canvas = img.flatten2(), canvas.flatten2() content_loss = O.raw_cross_entropy_prob('raw_content', canvas, img) content_loss = content_loss.sum(axis=1).mean(name='content') # distrib_loss = 0.5 * (O.sqr(mu) + O.sqr(std) - 2. * O.log(std + 1e-8) - 1.0).sum(axis=1) distrib_loss = -0.5 * (float(nr_glimpse) + all_log_vars - all_sqr_mus - all_vars).sum(axis=1) distrib_loss = distrib_loss.mean(name='distrib') summary.scalar('content_loss', content_loss) summary.scalar('distrib_loss', distrib_loss) loss = content_loss + distrib_loss dpc.add_output(loss, name='loss', reduce_method='sum') dpc.add_output(canvas, name='output')
def forward_fc(feature, action): action = O.one_hot(action, get_player_nr_actions()) _ = O.concat([feature.flatten2(), action], axis=1) _ = O.fc('fc0', _, 512, nonlin=O.p_relu) reward = O.fc('fc_reward', _, 1) return reward
def make_network(env): with env.create_network() as net: state = O.placeholder('state', shape=(None, ) + get_input_shape()) logits = O.fc('fc', state, get_action_shape()) net.add_output(logits, name='policy')
def make_network(env): use_linear_vr = get_env('trpo.use_linear_vr') with env.create_network() as net: net.dist = O.distrib.GaussianDistribution('policy', size=get_action_shape()[0], fixed_std=False) if use_linear_vr: from tartist.app.rl.utils.math import LinearValueRegressor net.value_regressor = LinearValueRegressor() state = O.placeholder('state', shape=(None, ) + get_input_shape()) # state = O.moving_average(state) # state = O.clip_by_value(state, -10, 10) batch_size = state.shape[0] # We have to define variable scope here for later optimization. with env.variable_scope('policy'): _ = state with O.argscope(O.fc): _ = O.fc('fc1', _, 64, nonlin=O.relu) _ = O.fc('fc2', _, 64, nonlin=O.relu) mu = O.fc('fc_mu', _, net.dist.sample_size, nonlin=O.tanh) logstd = O.variable( 'logstd', O.truncated_normal_initializer(stddev=0.01), shape=(net.dist.sample_size, ), trainable=True) logstd = O.tile(logstd.add_axis(0), [batch_size, 1]) theta = O.concat([mu, logstd], axis=1) policy = net.dist.sample(batch_size=batch_size, theta=theta, process_theta=True) policy = O.clip_by_value(policy, -1, 1) net.add_output(theta, name='theta') net.add_output(policy, name='policy') if env.phase == env.Phase.TRAIN: theta_old = O.placeholder('theta_old', shape=(None, net.dist.param_size)) action = O.placeholder('action', shape=(None, net.dist.sample_size)) advantage = O.placeholder('advantage', shape=(None, )) log_prob = net.dist.log_likelihood(action, theta, process_theta=True) log_prob_old = net.dist.log_likelihood(action, theta_old, process_theta=True) # Importance sampling of surrogate loss (L in paper). ratio = O.exp(log_prob - log_prob_old) policy_loss = -O.reduce_mean(ratio * advantage) kl = net.dist.kl(theta_p=theta_old, theta_q=theta, process_theta=True).mean() kl_self = net.dist.kl(theta_p=O.zero_grad(theta), theta_q=theta, process_theta=True).mean() entropy = net.dist.entropy(theta, process_theta=True).mean() net.add_output(policy_loss, name='policy_loss') net.add_output(kl, name='kl') net.add_output(kl_self, name='kl_self') summary.scalar('policy_entropy', entropy, collections=[rl.train.ACGraphKeys.POLICY_SUMMARIES]) if not use_linear_vr: with env.variable_scope('value'): value = O.fc('fcv', state, 1) net.add_output(value, name='value') if env.phase == env.Phase.TRAIN: value_label = O.placeholder('value_label', shape=(None, )) value_loss = O.raw_l2_loss('raw_value_loss', value, value_label).mean(name='value_loss') net.add_output(value_loss, name='value_loss')
def make_network(env): with env.create_network() as net: n = 2 nr_classes = get_env('dataset.nr_classes') conv2d = functools.partial(O.conv2d, kernel=3, use_bias=False, padding='SAME') conv_bn_relu = functools.partial(conv2d, nonlin=O.bn_relu) dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): h, w, c = 32, 32, 3 img = O.placeholder('img', shape=(None, h, w, c)) return [img] def residual(name, x, first=False, inc_dim=False): in_channel = x.static_shape[3] out_channel = in_channel stride = 1 if inc_dim: out_channel = in_channel * 2 stride = 2 with env.variable_scope(name): _ = x if first else O.bn_relu(x) _ = conv_bn_relu('conv1', _, out_channel, stride=stride) _ = conv2d('conv2', _, out_channel) if inc_dim: x = O.pooling2d('pool', x, kernel=2) x = O.pad(x, [[0, 0], [0, 0], [0, 0], [in_channel // 2, in_channel // 2]]) print(name, x.static_shape) _ = _ + x return _ def forward(img): _ = img / 128.0 - 1.0 _ = conv_bn_relu('conv0', _, 16) _ = residual('res1.0', _, first=True) for i in range(1, n): _ = residual('res1.{}'.format(i), _) _ = residual('res2.0', _, inc_dim=True) for i in range(1, n): _ = residual('res2.{}'.format(i), _) _ = residual('res3.0', _, inc_dim=True) for i in range(1, n): _ = residual('res3.{}'.format(i), _) _ = O.batch_norm('bn_last', _) _ = O.relu(_) _ = _.mean(axis=[1, 2]) # global avg pool dpc.add_output(_, name='feature') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature'] _ = O.fc('linear', _, nr_classes) prob = O.softmax(_, name='prob') pred = _.argmax(axis=1).astype('int32', name='pred') net.add_output(prob) net.add_output(pred) if env.phase is env.Phase.TRAIN: label = O.placeholder('label', shape=(None, ), dtype='int32') loss = O.sparse_softmax_cross_entropy_with_logits( logits=_, labels=label).mean() loss = O.identity(loss, name='loss') net.set_loss(loss) accuracy = O.eq(label, pred).astype('float32').mean() error = 1. - accuracy summary.scalar('accuracy', accuracy) summary.scalar('error', error) summary.inference.scalar('loss', loss) summary.inference.scalar('accuracy', accuracy) summary.inference.scalar('error', error)
def make_network(env): is_train = env.phase is env.Phase.TRAIN # device control: always use master device only for training session if is_train: slave_devices = env.slave_devices env.set_slave_devices([]) with env.create_network() as net: input_length, = get_input_shape() action_length, = get_action_shape() dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): state = O.placeholder('state', shape=(None, input_length)) return [state] # forward policy network and value network separately (actor-critic) def forward(x): _ = x _ = O.fc('fcp1', _, 512, nonlin=O.relu) _ = O.fc('fcp2', _, 256, nonlin=O.relu) dpc.add_output(_, name='feature_p') _ = x _ = O.fc('fcv1', _, 512, nonlin=O.relu) _ = O.fc('fcv2', _, 256, nonlin=O.relu) dpc.add_output(_, name='feature_v') dpc.set_input_maker(inputs).set_forward_func(forward) _ = dpc.outputs['feature_p'] # mu and std, assuming spherical covariance policy_mu = O.fc('fc_policy_mu', _, action_length) # In this example, we do not use variance. instead, we use fixed value. # policy_var = O.fc('fc_policy_var', _, 1, nonlin=O.softplus) # policy_var = O.tile(policy_var, [1, action_length], name='policy_var') # policy_std = O.sqrt(policy_var, name='policy_std') actor_space = get_env('a3c.actor_space') nr_bins = actor_space.shape[1] # Instead of using normal distribution, we use Laplacian distribution for policy. # And also, we are sampling from a truncated Laplacian distribution (only care the value in the # action space). To simplify the computation, we discretize the action space. actor_space = O.constant(actor_space) actor_space = O.tile(actor_space.add_axis(0), [policy_mu.shape[0], 1, 1]) policy_mu3 = O.tile(policy_mu.add_axis(2), [1, 1, nr_bins]) # policy_std3 = O.tile(policy_std.add_axis(2), [1, 1, nr_bins]) # logits = O.abs(actor_space - policy_mu3) / (policy_std3 + 1e-2) # Here, we force the std of the policy to be 1. logits_explore = -O.abs(actor_space - policy_mu3) policy_explore = O.softmax(logits_explore) # Clip the policy for output action_range = get_action_range() action_range = tuple(map(O.constant, action_range)) action_range = tuple(map(lambda x: O.tile(x.add_axis(0), [policy_mu.shape[0], 1]), action_range)) policy_output = O.clip_by_value(policy_mu, *action_range) _ = dpc.outputs['feature_v'] value = O.fc('fc_value', _, 1) value = value.remove_axis(1, name='value') # Note that, here the policy_explore is a discrete policy, # and policy is actually the continuous one. net.add_output(policy_explore, name='policy_explore') net.add_output(policy_output, name='policy') net.add_output(value, name='value') if is_train: action = O.placeholder('action', shape=(None, action_length), dtype='int64') future_reward = O.placeholder('future_reward', shape=(None, )) entropy_beta = O.scalar('entropy_beta', 0.1, trainable=False) # Since we discretized the action space, use cross entropy here. log_policy = O.log(policy_explore + 1e-4) log_pi_a_given_s = (log_policy * O.one_hot(action, nr_bins)).sum(axis=2).sum(axis=1) advantage = (future_reward - O.zero_grad(value)).rename('advantage') # Important trick: using only positive advantage to perform gradient assent. This stabilizes the training. advantage = advantage * O.zero_grad((advantage > 0.).astype('float32')) policy_loss = O.identity(-(log_pi_a_given_s * advantage).mean(), name='policy_loss') # As mentioned, there is no trainable variance. # entropy_loss = O.identity(-entropy_beta * (policy_std ** 2.).sum(axis=1).mean(), name='entropy_loss') value_loss = O.raw_smooth_l1_loss('raw_value_loss', future_reward, value).mean(name='value_loss') loss = O.add_n([policy_cost, value_loss], name='loss') net.set_loss(loss) for v in [policy_cost, value_loss, value.mean(name='predict_value'), advantage.rms(name='rms_advantage'), loss]: summary.scalar(v) if is_train: env.set_slave_devices(slave_devices)
def make_network(env): with env.create_network() as net: net.dist = O.distrib.GaussianDistribution('policy', size=get_action_shape()[0], fixed_std=False) state = O.placeholder('state', shape=(None, ) + get_input_shape()) batch_size = state.shape[0] # We have to define variable scope here for later optimization. with env.variable_scope('policy'): _ = state _ = O.fc('fc1', _, 64, nonlin=O.relu) _ = O.fc('fc2', _, 64, nonlin=O.relu) mu = O.fc('fc_mu', _, net.dist.sample_size, nonlin=O.tanh) logstd = O.variable('logstd', O.truncated_normal_initializer(stddev=0.01), shape=(net.dist.sample_size, ), trainable=True) logstd = O.tile(logstd.add_axis(0), [batch_size, 1]) theta = O.concat([mu, logstd], axis=1) policy = net.dist.sample(batch_size=batch_size, theta=theta, process_theta=True) policy = O.clip_by_value(policy, -1, 1) net.add_output(theta, name='theta') net.add_output(policy, name='policy') if env.phase == env.Phase.TRAIN: theta_old = O.placeholder('theta_old', shape=(None, net.dist.param_size)) action = O.placeholder('action', shape=(None, net.dist.sample_size)) advantage = O.placeholder('advantage', shape=(None, )) entropy_beta = O.scalar('entropy_beta', g.entropy_beta) log_prob = net.dist.log_likelihood(action, theta, process_theta=True) log_prob_old = net.dist.log_likelihood(action, theta_old, process_theta=True) ratio = O.exp(log_prob - log_prob_old) epsilon = get_env('ppo.epsilon') surr1 = ratio * advantage # surrogate from conservative policy iteration surr2 = O.clip_by_value(ratio, 1.0 - epsilon, 1.0 + epsilon) * advantage policy_loss = -O.reduce_mean(O.min( surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP) entropy = net.dist.entropy(theta, process_theta=True).mean() entropy_loss = -entropy_beta * entropy net.add_output(policy_loss, name='policy_loss') net.add_output(entropy_loss, name='entropy_loss') summary.scalar('policy_entropy', entropy) with env.variable_scope('value'): _ = state _ = O.fc('fc1', _, 64, nonlin=O.relu) _ = O.fc('fc2', _, 64, nonlin=O.relu) value = O.fc('fcv', _, 1) value = value.remove_axis(1) net.add_output(value, name='value') if env.phase == env.Phase.TRAIN: value_label = O.placeholder('value_label', shape=(None, )) value_old = O.placeholder('value_old', shape=(None, )) value_surr1 = O.raw_l2_loss('raw_value_loss_surr1', value, value_label) value_clipped = value_old + O.clip_by_value( value - value_old, -epsilon, epsilon) value_surr2 = O.raw_l2_loss('raw_value_loss_surr2', value_clipped, value_label) value_loss = O.reduce_mean(O.max(value_surr1, value_surr2)) net.add_output(value_loss, name='value_loss') if env.phase == env.Phase.TRAIN: loss = O.identity(policy_loss + entropy_loss + value_loss, name='total_loss') net.set_loss(loss)