def _create_merlin_algorithm( encoder_fc_layers=(3, ), latent_dim=3, lstm_size=(4, ), memory_size=20, learning_rate=1e-3, debug_summaries=True): observation_spec = common.get_observation_spec() action_spec = common.get_action_spec() algorithm = MerlinAlgorithm( observation_spec=observation_spec, action_spec=action_spec, encoders=EncodingNetwork( input_tensor_spec=observation_spec, fc_layer_params=encoder_fc_layers, activation_fn=None, name="ObsEncoder"), decoders=DecodingAlgorithm( decoder=EncodingNetwork( input_tensor_spec=TensorSpec((latent_dim, ), dtype=tf.float32), fc_layer_params=encoder_fc_layers, activation_fn=None, name="ObsDecoder"), loss_weight=100.), latent_dim=latent_dim, lstm_size=lstm_size, memory_size=memory_size, optimizer=tf.optimizers.Adam(learning_rate=learning_rate), debug_summaries=debug_summaries) return algorithm
def _create_ddpg_algorithm(): observation_spec = common.get_observation_spec() action_spec = common.get_action_spec() actor_net = ActorNetwork( observation_spec, action_spec, fc_layer_params=(16, 16)) critic_net = CriticNetwork((observation_spec, action_spec), joint_fc_layer_params=(16, 16)) return DdpgAlgorithm( observation_spec=observation_spec, action_spec=action_spec, actor_network=actor_net, critic_network=critic_net, actor_optimizer=tf.optimizers.Adam(learning_rate=5e-3), critic_optimizer=tf.optimizers.Adam(learning_rate=1e-1), debug_summaries=True)
def _create_ac_algorithm(): observation_spec = common.get_observation_spec() action_spec = common.get_action_spec() optimizer = tf.optimizers.Adam(learning_rate=5e-5) actor_net = ActorDistributionNetwork(observation_spec, action_spec, fc_layer_params=(8, )) value_net = ValueNetwork(observation_spec, fc_layer_params=(8, )) return ActorCriticAlgorithm(action_spec=action_spec, actor_network=actor_net, value_network=value_net, loss_class=ActorCriticLoss, optimizer=optimizer, debug_summaries=True)
def _create_ppo_algorithm(): observation_spec = common.get_observation_spec() action_spec = common.get_action_spec() optimizer = tf.optimizers.Adam(learning_rate=1e-3) actor_net = ActorDistributionRnnNetwork(observation_spec, action_spec, input_fc_layer_params=(), output_fc_layer_params=None) value_net = ValueRnnNetwork(observation_spec, input_fc_layer_params=(), output_fc_layer_params=None) return PPOAlgorithm(action_spec=action_spec, actor_network=actor_net, value_network=value_net, loss_class=PPOLoss, optimizer=optimizer, debug_summaries=True)
def get_ac_networks(conv_layer_params=None, num_embedding_dims=None, fc_layer_params=None, num_state_tiles=None, num_sentence_tiles=None): """ Generate the actor and value networks Args: conv_layer_params (list[int 3 tuple]): optional convolution layers parameters, where each item is a length-three tuple indicating (filters, kernel_size, stride). num_embedding_dims (int): optional number of dimensions of the vocabulary embedding space. fc_layer_params (list[int]): optional fully_connected parameters, where each item is the number of units in the layer. num_state_tiles (int): optional number of times to repeat the internal state tensor before concatenation with other inputs. The rationale is to match the number of dimentions of the image input, so that the final concatenation will have roughly equal representation from different sources of input. Without this, typically image input, due to its large input size, will take over and trump all other small dimensional inputs. num_sentence_tiles (int): optional number of times to repeat the sentence embedding tensor before concatenation with other inputs, so that sentence input won't be trumped by other high dimensional inputs like image observation. """ observation_spec = common.get_observation_spec() action_spec = common.get_action_spec() conv_layers = tf.keras.Sequential( tf_agents.networks.utils.mlp_layers( conv_layer_params=conv_layer_params)) preprocessing_layers = { 'image': conv_layers, } if common.get_states_shape(): state_layers = get_identity_layer() # [image: (1, 12800), sentence: (1, 16 * 800), states: (1, 16 * 800)] # Here, we tile along the last dimension of the input. if num_state_tiles: state_layers = tf.keras.Sequential([ tf.keras.layers.Lambda( lambda x: tf.tile(x, multiples=[1, num_state_tiles])) ]) preprocessing_layers['states'] = state_layers vocab_size = common.get_vocab_size() if vocab_size: sentence_layers = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, num_embedding_dims), tf.keras.layers.GlobalAveragePooling1D() ]) if num_sentence_tiles: sentence_layers.add( tf.keras.layers.Lambda( lambda x: tf.tile(x, multiples=[1, num_sentence_tiles]))) preprocessing_layers['sentence'] = sentence_layers preprocessing_combiner = tf.keras.layers.Concatenate() actor = ActorDistributionRnnNetwork( input_tensor_spec=observation_spec, output_tensor_spec=action_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, input_fc_layer_params=fc_layer_params) value = ValueRnnNetwork(input_tensor_spec=observation_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, input_fc_layer_params=fc_layer_params) return actor, value