def _(): base_model_fn = teacher_force_language_modeling( lambda: snt.GRU(256), embed_dim=64) dataset = lm1b_byte(128, 128) return base.DatasetModelTask(base_model_fn, dataset)
def _(): base_model_fn = rnn_classification( lambda: snt.GRU(64), embed_dim=64, aggregate_method="avg") dataset = imdb_subword(128, 32) return base.DatasetModelTask(base_model_fn, dataset)
presence ] return output, state if __name__ == '__main__': learning_rate = 1e-4 batch_size = 10 img_size = 50, 50 crop_size = 20, 20 n_latent = 10 n_steps = 3 x = tf.placeholder(tf.float32, (batch_size, ) + img_size, name='inpt') transition = snt.GRU(n_latent) air = AIRCell(img_size, crop_size, n_latent, transition) initial_state = air.initial_state(x) dummy_sequence = tf.zeros((n_steps, batch_size, 1), name='dummy_sequence') outputs, state = tf.nn.dynamic_rnn(air, dummy_sequence, initial_state=initial_state, time_major=True) canvas, crop, what, what_loc, what_scale, where, where_loc, where_scale, presence_prob, presence = outputs canvas = tf.reshape(canvas, ( n_steps, batch_size, ) + tuple(img_size)) final_canvas = canvas[-1]