def __init__(self, ob_space, ac_space, subgoal_space, intrinsic_type):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space),
                                    name='x')
        self.action_prev = action_prev = tf.placeholder(tf.float32,
                                                        [None, ac_space],
                                                        name='action_prev')
        self.reward_prev = reward_prev = tf.placeholder(tf.float32, [None, 1],
                                                        name='reward_prev')
        self.subgoal = subgoal = tf.placeholder(tf.float32,
                                                [None, subgoal_space],
                                                name='subgoal')
        self.intrinsic_type = intrinsic_type

        with tf.variable_scope('encoder'):
            x = tf.image.resize_images(x, [84, 84])
            x = x / 255.0
            self.p = x
            x = tf.nn.relu(conv2d(x, 16, "l1", [8, 8], [4, 4]))
            x = tf.nn.relu(conv2d(x, 32, "l2", [4, 4], [2, 2]))
            self.f = tf.reduce_mean(x, axis=[1, 2])
            x = flatten(x)

        with tf.variable_scope('sub_policy'):
            x = tf.nn.relu(
                linear(x, 256, "fc", normalized_columns_initializer(0.01)))
            x = tf.concat([x, action_prev], axis=1)
            x = tf.concat([x, reward_prev], axis=1)
            x = tf.concat([x, subgoal], axis=1)

            # introduce a "fake" batch dimension of 1 after flatten
            # so that we can do LSTM over time dim
            x = tf.expand_dims(x, [0])

            size = 256
            lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)
            self.state_size = lstm.state_size
            step_size = tf.shape(self.x)[:1]

            c_init = np.zeros((1, lstm.state_size.c), np.float32)
            h_init = np.zeros((1, lstm.state_size.h), np.float32)
            self.state_init = [c_init, h_init]
            c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
            h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
            self.state_in = [c_in, h_in]

            state_in = rnn.LSTMStateTuple(c_in, h_in)

            lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
                lstm,
                x,
                initial_state=state_in,
                sequence_length=step_size,
                time_major=False)
            lstm_c, lstm_h = lstm_state
            lstm_outputs = tf.reshape(lstm_outputs, [-1, size])
            self.logits = linear(lstm_outputs, ac_space, "action",
                                 normalized_columns_initializer(0.01))
            self.vf = tf.reshape(
                linear(lstm_outputs, 1, "value",
                       normalized_columns_initializer(1.0)), [-1])
            self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
            self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          tf.get_variable_scope().name)
예제 #2
0
        def Demo_Encoder(s_h,
                         per,
                         seq_lengths,
                         scope='Demo_Encoder',
                         reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warning(scope.name)
                state_features = tf.reshape(
                    State_Encoder(tf.reshape(s_h, [-1, self.h, self.w, depth]),
                                  tf.reshape(per, [-1, self.per_dim]),
                                  self.batch_size * max_demo_len,
                                  reuse=reuse),
                    [self.batch_size, max_demo_len, -1])
                if self.encoder_rnn_type == 'bilstm':
                    fcell = rnn.BasicLSTMCell(num_units=math.ceil(
                        self.num_lstm_cell_units),
                                              state_is_tuple=True)
                    bcell = rnn.BasicLSTMCell(num_units=math.floor(
                        self.num_lstm_cell_units),
                                              state_is_tuple=True)
                    new_h, cell_state = tf.nn.bidirectional_dynamic_rnn(
                        fcell,
                        bcell,
                        state_features,
                        sequence_length=seq_lengths,
                        dtype=tf.float32)
                    new_h = tf.reduce_sum(tf.stack(new_h, axis=2), axis=2)
                    cell_state = rnn.LSTMStateTuple(
                        tf.reduce_sum(tf.stack([cs.c for cs in cell_state],
                                               axis=1),
                                      axis=1),
                        tf.reduce_sum(tf.stack([cs.h for cs in cell_state],
                                               axis=1),
                                      axis=1))
                elif self.encoder_rnn_type == 'lstm':
                    cell = rnn.BasicLSTMCell(
                        num_units=self.num_lstm_cell_units,
                        state_is_tuple=True)
                    new_h, cell_state = tf.nn.dynamic_rnn(
                        cell=cell,
                        dtype=tf.float32,
                        sequence_length=seq_lengths,
                        inputs=state_features)
                elif self.encoder_rnn_type == 'rnn':
                    cell = rnn.BasicRNNCell(num_units=self.num_lstm_cell_units)
                    new_h, cell_state = tf.nn.dynamic_rnn(
                        cell=cell,
                        dtype=tf.float32,
                        sequence_length=seq_lengths,
                        inputs=state_features)
                elif self.encoder_rnn_type == 'gru':
                    cell = rnn.GRUCell(num_units=self.num_lstm_cell_units)
                    new_h, cell_state = tf.nn.dynamic_rnn(
                        cell=cell,
                        dtype=tf.float32,
                        sequence_length=seq_lengths,
                        inputs=state_features)
                else:
                    raise ValueError('Unknown encoder rnn type')

                if self.concat_state_feature_direct_prediction:
                    all_states = tf.concat([new_h, state_features], axis=-1)
                else:
                    all_states = new_h
                return all_states, cell_state.h, cell_state.c
예제 #3
0
def cudnn_lstm_state_to_state_tuples(cudnn_lstm_state):
    """Convert CudnnLSTM format to tuple of LSTMStateTuples."""
    h, c = cudnn_lstm_state
    return tuple(
        rnn.LSTMStateTuple(h=h_i, c=c_i)
        for h_i, c_i in zip(tf.unstack(h), tf.unstack(c)))
예제 #4
0
 def state_size(self):
     return rnn.LSTMStateTuple(self._num_units, self._num_units)
예제 #5
0
    def __init__(self,
                 reversed_dict,
                 article_max_len,
                 summary_max_len,
                 args,
                 forward_only=False):
        self.vocabulary_size = len(reversed_dict)
        self.embedding_size = args.embedding_size
        self.num_hidden = args.num_hidden
        self.num_layers = args.num_layers
        self.learning_rate = args.learning_rate
        self.beam_width = args.beam_width
        if not forward_only:
            self.keep_prob = args.keep_prob
        else:
            self.keep_prob = 1.0
        self.cell = tf.nn.rnn_cell.BasicLSTMCell
        with tf.variable_scope("decoder/projection"):
            self.projection_layer = tf.layers.Dense(self.vocabulary_size,
                                                    use_bias=False)

        self.batch_size = tf.placeholder(tf.int32, (), name="batch_size")
        self.X = tf.placeholder(tf.int32, [None, article_max_len])
        self.X_len = tf.placeholder(tf.int32, [None])
        self.decoder_input = tf.placeholder(tf.int32, [None, summary_max_len])
        self.decoder_len = tf.placeholder(tf.int32, [None])
        self.decoder_target = tf.placeholder(tf.int32, [None, summary_max_len])
        self.global_step = tf.Variable(0, trainable=False)

        with tf.name_scope("embedding"):
            if not forward_only and args.glove:
                init_embeddings = tf.constant(get_init_embedding(
                    reversed_dict, self.embedding_size),
                                              dtype=tf.float32)
            else:
                init_embeddings = tf.random_uniform(
                    [self.vocabulary_size, self.embedding_size], -1.0, 1.0)
            self.embeddings = tf.get_variable("embeddings",
                                              initializer=init_embeddings)
            self.encoder_emb_inp = tf.transpose(tf.nn.embedding_lookup(
                self.embeddings, self.X),
                                                perm=[1, 0, 2])
            self.decoder_emb_inp = tf.transpose(tf.nn.embedding_lookup(
                self.embeddings, self.decoder_input),
                                                perm=[1, 0, 2])

        with tf.name_scope("encoder"):
            fw_cells = [
                self.cell(self.num_hidden) for _ in range(self.num_layers)
            ]
            bw_cells = [
                self.cell(self.num_hidden) for _ in range(self.num_layers)
            ]
            fw_cells = [rnn.DropoutWrapper(cell) for cell in fw_cells]
            bw_cells = [rnn.DropoutWrapper(cell) for cell in bw_cells]

            encoder_outputs, encoder_state_fw, encoder_state_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
                fw_cells,
                bw_cells,
                self.encoder_emb_inp,
                sequence_length=self.X_len,
                time_major=True,
                dtype=tf.float32)
            self.encoder_output = tf.concat(encoder_outputs, 2)
            encoder_state_c = tf.concat(
                (encoder_state_fw[0].c, encoder_state_bw[0].c), 1)
            encoder_state_h = tf.concat(
                (encoder_state_fw[0].h, encoder_state_bw[0].h), 1)
            self.encoder_state = rnn.LSTMStateTuple(c=encoder_state_c,
                                                    h=encoder_state_h)

        with tf.name_scope("decoder"), tf.variable_scope(
                "decoder") as decoder_scope:
            decoder_cell = self.cell(self.num_hidden * 2)

            if not forward_only:
                attention_states = tf.transpose(self.encoder_output, [1, 0, 2])
                attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
                    self.num_hidden * 2,
                    attention_states,
                    memory_sequence_length=self.X_len,
                    normalize=True)
                decoder_cell = tf.contrib.seq2seq.AttentionWrapper(
                    decoder_cell,
                    attention_mechanism,
                    attention_layer_size=self.num_hidden * 2)
                initial_state = decoder_cell.zero_state(
                    dtype=tf.float32, batch_size=self.batch_size)
                initial_state = initial_state.clone(
                    cell_state=self.encoder_state)
                helper = tf.contrib.seq2seq.TrainingHelper(
                    self.decoder_emb_inp, self.decoder_len, time_major=True)
                decoder = tf.contrib.seq2seq.BasicDecoder(
                    decoder_cell, helper, initial_state)
                outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
                    decoder, output_time_major=True, scope=decoder_scope)
                self.decoder_output = outputs.rnn_output
                self.logits = tf.transpose(self.projection_layer(
                    self.decoder_output),
                                           perm=[1, 0, 2])
                self.logits_reshape = tf.concat([
                    self.logits,
                    tf.zeros([
                        self.batch_size, summary_max_len -
                        tf.shape(self.logits)[1], self.vocabulary_size
                    ])
                ],
                                                axis=1)
            else:
                tiled_encoder_output = tf.contrib.seq2seq.tile_batch(
                    tf.transpose(self.encoder_output, perm=[1, 0, 2]),
                    multiplier=self.beam_width)
                tiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(
                    self.encoder_state, multiplier=self.beam_width)
                tiled_seq_len = tf.contrib.seq2seq.tile_batch(
                    self.X_len, multiplier=self.beam_width)
                attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
                    self.num_hidden * 2,
                    tiled_encoder_output,
                    memory_sequence_length=tiled_seq_len,
                    normalize=True)
                decoder_cell = tf.contrib.seq2seq.AttentionWrapper(
                    decoder_cell,
                    attention_mechanism,
                    attention_layer_size=self.num_hidden * 2)
                initial_state = decoder_cell.zero_state(
                    dtype=tf.float32,
                    batch_size=self.batch_size * self.beam_width)
                initial_state = initial_state.clone(
                    cell_state=tiled_encoder_final_state)
                decoder = tf.contrib.seq2seq.BeamSearchDecoder(
                    cell=decoder_cell,
                    embedding=self.embeddings,
                    start_tokens=tf.fill([self.batch_size], tf.constant(2)),
                    end_token=tf.constant(3),
                    initial_state=initial_state,
                    beam_width=self.beam_width,
                    output_layer=self.projection_layer)
                outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
                    decoder,
                    output_time_major=True,
                    maximum_iterations=summary_max_len,
                    scope=decoder_scope)
                self.prediction = tf.transpose(outputs.predicted_ids,
                                               perm=[1, 2, 0])

        with tf.name_scope("loss"):
            if not forward_only:
                crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=self.logits_reshape, labels=self.decoder_target)
                weights = tf.sequence_mask(self.decoder_len,
                                           summary_max_len,
                                           dtype=tf.float32)
                self.loss = tf.reduce_sum(crossent * weights /
                                          tf.to_float(self.batch_size))

                params = tf.trainable_variables()
                gradients = tf.gradients(self.loss, params)
                clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
                optimizer = tf.train.AdamOptimizer(self.learning_rate)
                self.update = optimizer.apply_gradients(
                    zip(clipped_gradients, params),
                    global_step=self.global_step)
예제 #6
0
    def __init__(self, ob_space, ac_space):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        # CNN feature extraction
        x = layers.convolution2d(x,
                                 num_outputs=32,
                                 kernel_size=8,
                                 stride=4,
                                 padding='SAME',
                                 activation_fn=tf.nn.relu)
        x = layers.convolution2d(x,
                                 num_outputs=64,
                                 kernel_size=4,
                                 stride=2,
                                 padding='SAME',
                                 activation_fn=tf.nn.relu)
        x = layers.convolution2d(x,
                                 num_outputs=64,
                                 kernel_size=3,
                                 stride=1,
                                 padding='SAME',
                                 activation_fn=tf.nn.relu)
        x = tf.expand_dims(flatten(x), [0])
        x = layers.fully_connected(layers.flatten(x),
                                   num_outputs=512,
                                   activation_fn=tf.nn.relu)

        # LSTM
        size = 256
        if use_tf100_api:
            lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)
        else:
            lstm = rnn.rnn_cell.BasicLSTMCell(size, state_is_tuple=True)
        self.state_size = lstm.state_size
        step_size = tf.shape(self.x)[:1]

        c_init = np.zeros((1, lstm.state_size.c), np.float32)
        h_init = np.zeros((1, lstm.state_size.h), np.float32)
        self.state_init = [c_init, h_init]
        c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
        h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
        self.state_in = [c_in, h_in]

        if use_tf100_api:
            state_in = rnn.LSTMStateTuple(c_in, h_in)
        else:
            state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)

        lstm_outputs, lstm_state = tf.nn.dynamic_rnn(lstm,
                                                     x,
                                                     initial_state=state_in,
                                                     sequence_length=step_size,
                                                     time_major=False)

        lstm_c, lstm_h = lstm_state
        x = tf.reshape(lstm_outputs, [-1, size])

        # Policy func
        dim_x = int(x.shape[1])
        policy_w0 = tf.Variable(np.random.randn(dim_x, ac_space) * 0.01,
                                dtype=tf.float32)
        policy_b0 = tf.Variable(tf.zeros([ac_space]), dtype=tf.float32)

        # Value func
        value_w0 = tf.Variable(np.random.randn(dim_x, 1) * 0.01,
                               dtype=tf.float32)
        value_b0 = tf.Variable(tf.zeros([1]), dtype=tf.float32)

        # Polcy out and value out
        self.logits = tf.matmul(x, policy_w0) + policy_b0
        self.vf = tf.reshape(tf.matmul(x, value_w0) + value_b0, [-1])

        self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          tf.get_variable_scope().name)
예제 #7
0
    def __init__(self, dtype, *param, fn):
        super(LSTMMLP, self).__init__(dtype)
        nonlin_str = param[0]
        nonlin = getattr(tf.nn, nonlin_str)
        weight = float(param[1])

        check = 0
        for i, val in enumerate(param[2:]):
            if val == '/':
                check = i

        rnnDim = [int(i) for i in param[2:check + 2]]
        mlpDim = [int(i) for i in param[check + 3:]]

        self.input = tf.placeholder(
            dtype, shape=[None, None, rnnDim[0]],
            name=fn.input_names[0])  # [batch, time, dim]

        length_ = tf.placeholder(dtype, name='length')  # [batch]
        length_ = tf.cast(length_, dtype=tf.int32)
        self.seq_length = tf.reshape(length_, [-1])

        # GRU
        cells = []
        state_size = []  # size of c = h in LSTM
        recurrent_state_size = 0

        for size in rnnDim[1:]:
            cell = rnn.LSTMCell(
                size,
                state_is_tuple=True,
                initializer=tf.contrib.layers.xavier_initializer())
            cells.append(cell)
            recurrent_state_size += cell.state_size.c + cell.state_size.h
            state_size.append(cell.state_size.c)
            state_size.append(cell.state_size.h)
        cell = rnn.MultiRNNCell(cells, state_is_tuple=True)

        hiddenStateDim = tf.identity(tf.constant(value=[recurrent_state_size],
                                                 dtype=tf.int32),
                                     name='h_dim')
        init_states = tf.split(tf.placeholder(
            dtype=dtype, shape=[None, recurrent_state_size], name='h_init'),
                               num_or_size_splits=state_size,
                               axis=1)

        init_state_list = []
        for i in range(len(cells)):
            init_state_list.append(
                rnn.LSTMStateTuple(init_states[2 * i], init_states[2 * i + 1]))
        init_state_tuple = tuple(init_state_list)

        # LSTM output
        LSTMOutput, final_state = tf.nn.dynamic_rnn(
            cell=cell,
            inputs=self.input,
            sequence_length=self.seq_length,
            dtype=dtype,
            initial_state=init_state_tuple)
        # FCN
        top = tf.reshape(LSTMOutput, shape=[-1, rnnDim[-1]], name='fcIn')

        layer_n = 0
        for dim in mlpDim[:-1]:
            with tf.name_scope('hidden_layer' + repr(layer_n)):
                top = fully_connected(
                    activation_fn=nonlin,
                    inputs=top,
                    num_outputs=dim,
                    weights_initializer=tf.contrib.layers.xavier_initializer(),
                    trainable=True)
                layer_n += 1

        with tf.name_scope('output_layer'):
            wo = tf.Variable(
                tf.random_uniform(dtype=dtype,
                                  shape=[mlpDim[-2], mlpDim[-1]],
                                  minval=-float(weight),
                                  maxval=float(weight)))
            bo = tf.Variable(
                tf.random_uniform(dtype=dtype,
                                  shape=[mlpDim[-1]],
                                  minval=-float(weight),
                                  maxval=float(weight)))
            top = tf.matmul(top, wo) + bo

        self.output = tf.reshape(top,
                                 [-1, tf.shape(self.input)[1], mlpDim[-1]])

        final_state_list = []
        for state_tuple in final_state:
            final_state_list.append(state_tuple.c)
            final_state_list.append(state_tuple.h)

        hiddenState = tf.concat([state for state in final_state_list],
                                axis=1,
                                name='h_state')

        self.l_param_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        self.a_param_list = self.l_param_list
        self.net = None
예제 #8
0
def _create_decoder(n_neurons,
                    n_layers,
                    keep_prob,
                    dec_in_keep_prob,
                    batch_size,
                    encoder_outputs,
                    encoder_state,
                    encoder_lengths,
                    decoding_inputs,
                    decoding_lengths,
                    synth_length,
                    n_ch,
                    scope,
                    max_sequence_size,
                    n_mixtures,
                    wrapper,
                    cell_type,
                    input_layer,
                    use_attention=False,
                    n_code=256,
                    n_bijectors=10,
                    n_made_layers=3,
                    n_made_code_factor=2,
                    model='mdn',
                    variational='MAF'):
    if model == 'mdn':
        n_outputs = n_ch * n_mixtures + n_ch * n_mixtures + n_mixtures
    elif model == 'dml':
        n_outputs = n_mixtures * n_ch * 3
    else:
        n_outputs = n_ch
    output_layer = tfl.Dense(n_outputs, name='output_projection')

    with tf.variable_scope('forward'):
        cells = _create_rnn_cell(n_neurons, n_layers, keep_prob, wrapper,
                                 cell_type)

    if use_attention:
        attn_mech = tf.contrib.seq2seq.LuongAttention(cells.output_size,
                                                      encoder_outputs,
                                                      encoder_lengths,
                                                      scale=True)
        cells = tf.contrib.seq2seq.AttentionWrapper(
            cell=cells,
            attention_mechanism=attn_mech,
            attention_layer_size=cells.output_size,
            alignment_history=False)
        initial_state = cells.zero_state(dtype=tf.float32,
                                         batch_size=batch_size)
        initial_state = initial_state.clone(cell_state=encoder_state)
    else:
        initial_state = encoder_state

    losses = tf.Variable(0.0, trainable=False)
    if variational is not None:
        if use_attention:
            states = tf.concat(initial_state.cell_state, -1)
            # TODO: How do I proprely combine attention and recreate the cells?
            # states = tf.concat([
            #     tf.concat(initial_state.cell_state, -1),
            #     initial_state.attention_state], -1)
            raise NotImplementedError(
                'not working yet... cannot combine attention and variational layers'
            )
        else:
            states = tf.concat(initial_state, -1)
        state_size = int(states.shape[-1])

        z_mus = tfl.dense(states, n_code, name='mus')
        z_log_sigmas = tf.minimum(
            5.0,
            tf.maximum(
                1e-3,
                tf.nn.softplus(tfl.dense(states, n_code, name='log_sigmas'))))
        if isinstance(initial_state[0], rnn.LSTMStateTuple):
            z_mus = tf.reshape(z_mus, (2 * batch_size, n_code))
            z_log_sigmas = tf.reshape(z_log_sigmas, (2 * batch_size, n_code))
        if variational == 'VAE':
            prior = tfd.MultivariateNormalDiag(loc=z_mus,
                                               scale_diag=tf.exp(z_log_sigmas))
            z_sample = tf.concat((states, prior.sample()), -1)
            # z_sample = prior.sample()
            losses -= tf.reduce_mean(prior.log_prob(z_mus))
        elif variational == 'IAF':
            prior = tfd.MultivariateNormalDiag(loc=z_mus,
                                               scale_diag=tf.exp(z_log_sigmas))
            n_made_code = max(n_code + 1, n_made_code_factor * n_code)
            bijectors = []
            for i in range(n_bijectors):
                bijector = tfb.Invert(
                    tfb.MaskedAutoregressiveFlow(
                        shift_and_log_scale_fn=tfb.
                        masked_autoregressive_default_template(
                            hidden_layers=[n_made_code] * n_made_layers)))
                bijectors.append(
                    tfb.Permute(permutation=list(range(n_code - 1, -1, -1))))
            flow_bijector = tfb.Chain(list(reversed(bijectors[:-1])))
            flow = tfd.TransformedDistribution(distribution=prior,
                                               bijector=flow_bijector)
            losses -= tf.reduce_mean(flow.log_prob(z_mus))
            z_sample = tf.concat((states, prior.sample()), -1)
            for bijector in reversed(flow.bijector.bijectors):
                z_sample = bijector.forward(z_sample)
        elif variational == 'MAF':
            prior = tfd.MultivariateNormalDiag(loc=z_mus,
                                               scale_diag=tf.exp(z_log_sigmas))
            n_made_code = max(n_code + 1, n_made_code_factor * n_code)
            bijectors = []
            for i in range(n_bijectors):
                bijector = tfb.MaskedAutoregressiveFlow(
                    shift_and_log_scale_fn=tfb.
                    masked_autoregressive_default_template(
                        hidden_layers=[n_made_code] * n_made_layers))
                bijectors.append(
                    tfb.Permute(permutation=list(range(n_code - 1, -1, -1))))
            flow_bijector = tfb.Chain(list(reversed(bijectors[:-1])))
            flow = tfd.TransformedDistribution(distribution=prior,
                                               bijector=flow_bijector)
            losses -= tf.reduce_mean(flow.log_prob(z_mus))
            z_sample = tf.concat((states, prior.sample()), -1)
            for bijector in reversed(flow.bijector.bijectors):
                z_sample = bijector.forward(z_sample)
        elif variational == 'VQ':
            raise NotImplementedError('Nope.')
        else:
            raise NotImplementedError('Nope.')
        z_dec = tfl.dense(z_sample, state_size)
        if isinstance(initial_state[0], rnn.LSTMStateTuple):
            z_dec_rsz = tf.reshape(z_dec, [2, batch_size, n_neurons, n_layers])
            z = tuple(
                rnn.LSTMStateTuple(*[
                    tf.reshape(c, [batch_size, n_neurons]) for c in tf.split(
                        tf.reshape(el, [2, batch_size, n_neurons]), 2, axis=0)
                ]) for el in tf.split(z_dec_rsz, n_layers, axis=-1))
        else:
            z_dec_rsz = tf.reshape(z_dec, [batch_size, n_neurons, n_layers])
            z = tuple(
                tf.reshape(el, [batch_size, n_neurons])
                for el in tf.split(z_dec_rsz, n_layers, axis=-1))
    else:
        z = initial_state

    decoding_inputs_embed = tf.nn.dropout(input_layer(decoding_inputs),
                                          dec_in_keep_prob)

    helper = tf.contrib.seq2seq.TrainingHelper(
        inputs=decoding_inputs_embed,
        sequence_length=decoding_lengths,
        time_major=False)
    decoder = tf.contrib.seq2seq.BasicDecoder(cell=cells,
                                              helper=helper,
                                              initial_state=z,
                                              output_layer=output_layer)
    outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
        decoder,
        output_time_major=False,
        impute_finished=True,
        maximum_iterations=max_sequence_size)

    if model == 'mdn':
        helper = MDNRegressionHelper(batch_size=batch_size,
                                     max_sequence_size=synth_length,
                                     n_ch=n_ch,
                                     n_mixtures=n_mixtures,
                                     embed=input_layer)
    elif model == 'dml':
        helper = DMLRegressionHelper(batch_size=batch_size,
                                     max_sequence_size=synth_length,
                                     n_ch=n_ch,
                                     n_mixtures=n_mixtures,
                                     embed=input_layer)
    else:
        raise NotImplementedError('Not implemented.')

    scope.reuse_variables()
    infer_decoder = tf.contrib.seq2seq.BasicDecoder(cell=cells,
                                                    helper=helper,
                                                    initial_state=z,
                                                    output_layer=output_layer)
    infer_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
        infer_decoder,
        output_time_major=False,
        impute_finished=True,
        maximum_iterations=synth_length)
    # infer_logits = tf.identity(infer_outputs.sample_id, name='infer_logits')
    return outputs, infer_outputs, losses
예제 #9
0
    def build_encoder(self):
        print("building encoder..")
        with tf.variable_scope('encoder'):
            # Building encoder_cell
            self.encoder_cell = self.build_encoder_cell()

            # Initialize encoder_embeddings to have variance=1.
            sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
            initializer = tf.random_uniform_initializer(-sqrt3,
                                                        sqrt3,
                                                        dtype=self.dtype)

            self.encoder_embeddings = tf.get_variable(
                name='embedding',
                shape=[self.num_encoder_symbols, self.embedding_size],
                initializer=initializer,
                dtype=self.dtype)

            # Embedded_inputs: [batch_size, time_step, embedding_size]
            self.encoder_inputs_embedded = tf.nn.embedding_lookup(
                params=self.encoder_embeddings, ids=self.encoder_inputs)

            # Input projection layer to feed embedded inputs to the cell
            # ** Essential when use_residual=True to match input/output dims
            input_layer = Dense(self.encoder_hidden_units,
                                dtype=self.dtype,
                                name='input_projection')

            # Embedded inputs having gone through input projection layer
            self.encoder_inputs_embedded = input_layer(
                self.encoder_inputs_embedded)

            # Encode input sequences into context vectors:
            # encoder_outputs: [batch_size, max_time_step, cell_output_size]
            # encoder_state: [batch_size, cell_output_size]

            if self.bidirectional:
                with tf.variable_scope("BidirectionalEncoder"):
                    ((encoder_fw_outputs, encoder_bw_outputs), (
                        encoder_fw_state, encoder_bw_state
                    )) = (
                        tf.nn.bidirectional_dynamic_rnn(
                            # cell_fw=self.encoder_cell[0] if isinstance(self.encoder_cell, tuple) else self.encoder_cell,
                            # cell_bw=self.encoder_cell[1] if isinstance(self.encoder_cell, tuple) else self.encoder_cell,
                            cell_fw=self.encoder_cell,
                            cell_bw=self.encoder_cell,
                            inputs=self.encoder_inputs_embedded,
                            sequence_length=self.encoder_inputs_length,
                            time_major=False,
                            dtype=self.dtype))

                    self.outputs = tf.concat(
                        (encoder_fw_outputs, encoder_bw_outputs),
                        2,
                        name="bidirectional_output_concat")
                    # output = tf.concat([output_fw, output_bw], axis=-1)

                    if isinstance(encoder_fw_state,
                                  rnn.LSTMStateTuple):  # LstmCell
                        state_c = tf.concat(
                            (encoder_fw_state.c, encoder_bw_state.c),
                            1,
                            name="bidirectional_concat_c")
                        state_h = tf.concat(
                            (encoder_fw_state.h, encoder_bw_state.h),
                            1,
                            name="bidirectional_concat_h")
                        self.state = rnn.LSTMStateTuple(c=state_c, h=state_h)
                    elif isinstance(encoder_fw_state, tuple) \
                            and isinstance(encoder_fw_state[0], rnn.LSTMStateTuple):  # MultiLstmCell
                        self.state = tuple(
                            map(
                                lambda fw_state, bw_state: rnn.LSTMStateTuple(
                                    c=tf.concat((fw_state.c, bw_state.c),
                                                1,
                                                name="bidirectional_concat_c"),
                                    h=tf.concat(
                                        (fw_state.h, bw_state.h),
                                        1,
                                        name="bidirectional_concat_h")),
                                encoder_fw_state, encoder_bw_state))
                    else:
                        self.state = tf.concat(
                            (encoder_fw_state, encoder_bw_state),
                            1,
                            name="bidirectional_state_concat")
                self.encoder_outputs, self.encoder_last_state = self.outputs, self.state

            else:
                self.encoder_outputs, self.encoder_last_state = tf.nn.dynamic_rnn(
                    cell=self.encoder_cell,
                    inputs=self.encoder_inputs_embedded,
                    sequence_length=self.encoder_inputs_length,
                    dtype=self.dtype,
                    time_major=False)
예제 #10
0
 def state_size_flat(self):
     return contrib_rnn.LSTMStateTuple([self._param_count],
                                       [self._param_count])
예제 #11
0
    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM) with bottlenecking.

    Args:
      inputs: Input tensor at the current timestep.
      state: Tuple of tensors, the state and output at the previous timestep.
      scope: Optional scope.

    Returns:
      A tuple where the first element is the LSTM output and the second is
      a LSTMStateTuple of the state at the current timestep.
    """
        scope = scope or 'conv_lstm_cell'
        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
            c, h = state

            # unflatten state if necessary
            if self._flatten_state:
                c = tf.reshape(c, [-1] + self.output_size)
                h = tf.reshape(h, [-1] + self.output_size)

            # summary of input passed into cell
            if self._viz_gates:
                slim.summaries.add_histogram_summary(inputs, 'cell_input')
            if self._pre_bottleneck:
                bottleneck = inputs
            else:
                bottleneck = contrib_layers.separable_conv2d(
                    tf.concat([inputs, h], 3),
                    self._num_units,
                    self._filter_size,
                    depth_multiplier=1,
                    activation_fn=self._activation,
                    normalizer_fn=None,
                    scope='bottleneck')

                if self._viz_gates:
                    slim.summaries.add_histogram_summary(
                        bottleneck, 'bottleneck')

            concat = contrib_layers.separable_conv2d(bottleneck,
                                                     4 * self._num_units,
                                                     self._filter_size,
                                                     depth_multiplier=1,
                                                     activation_fn=None,
                                                     normalizer_fn=None,
                                                     scope='gates')

            i, j, f, o = tf.split(concat, 4, 3)

            new_c = (c * tf.sigmoid(f + self._forget_bias) +
                     tf.sigmoid(i) * self._activation(j))
            if self._clip_state:
                new_c = tf.clip_by_value(new_c, -6, 6)
            new_h = self._activation(new_c) * tf.sigmoid(o)
            # summary of cell output and new state
            if self._viz_gates:
                slim.summaries.add_histogram_summary(new_h, 'cell_output')
                slim.summaries.add_histogram_summary(new_c, 'cell_state')

            output = new_h
            if self._output_bottleneck:
                output = tf.concat([new_h, bottleneck], axis=3)

            # reflatten state to store it
            if self._flatten_state:
                new_c = tf.reshape(new_c, [-1, self._param_count])
                new_h = tf.reshape(new_h, [-1, self._param_count])

            return output, contrib_rnn.LSTMStateTuple(new_c, new_h)
예제 #12
0
 def state_size(self):
     return contrib_rnn.LSTMStateTuple(
         self._output_size + [self._num_units],
         self._output_size + [self._num_units])
예제 #13
0
    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM) with bottlenecking.

    Includes logic for quantization-aware training. Note that all concats and
    activations use fixed ranges unless stated otherwise.

    Args:
      inputs: Input tensor at the current timestep.
      state: Tuple of tensors, the state at the previous timestep.
      scope: Optional scope.

    Returns:
      A tuple where the first element is the LSTM output and the second is
      a LSTMStateTuple of the state at the current timestep.
    """
        scope = scope or 'conv_lstm_cell'
        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
            c, h = state

            # Set nodes to be under raw_inputs/ name scope for tfmini export.
            with tf.name_scope(None):
                c = tf.identity(c, name='raw_inputs/init_lstm_c')
                # When pre_bottleneck is enabled, input h handle is in rnn_decoder.py
                if not self._pre_bottleneck:
                    h = tf.identity(h, name='raw_inputs/init_lstm_h')

            # unflatten state if necessary
            if self._flatten_state:
                c = tf.reshape(c, [-1] + self.output_size)
                h = tf.reshape(h, [-1] + self.output_size)

            c_list = tf.split(c, self._groups, axis=3)
            if self._pre_bottleneck:
                inputs_list = tf.split(inputs, self._groups, axis=3)
            else:
                h_list = tf.split(h, self._groups, axis=3)
            out_bottleneck = []
            out_c = []
            out_h = []
            # summary of input passed into cell
            if self._viz_gates:
                slim.summaries.add_histogram_summary(inputs, 'cell_input')

            for k in range(self._groups):
                if self._pre_bottleneck:
                    bottleneck = inputs_list[k]
                else:
                    if self._use_batch_norm:
                        b_x = lstm_utils.quantizable_separable_conv2d(
                            inputs,
                            self._num_units // self._groups,
                            self._filter_size,
                            is_quantized=self._is_quantized,
                            depth_multiplier=1,
                            activation_fn=None,
                            normalizer_fn=None,
                            scope='bottleneck_%d_x' % k)
                        b_h = lstm_utils.quantizable_separable_conv2d(
                            h_list[k],
                            self._num_units // self._groups,
                            self._filter_size,
                            is_quantized=self._is_quantized,
                            depth_multiplier=1,
                            activation_fn=None,
                            normalizer_fn=None,
                            scope='bottleneck_%d_h' % k)
                        b_x = slim.batch_norm(b_x,
                                              scale=True,
                                              is_training=self._is_training,
                                              scope='BatchNorm_%d_X' % k)
                        b_h = slim.batch_norm(b_h,
                                              scale=True,
                                              is_training=self._is_training,
                                              scope='BatchNorm_%d_H' % k)
                        bottleneck = b_x + b_h
                    else:
                        # All concats use fixed quantization ranges to prevent rescaling
                        # at inference. Both |inputs| and |h_list| are tensors resulting
                        # from Relu6 operations so we fix the ranges to [0, 6].
                        bottleneck_concat = lstm_utils.quantizable_concat(
                            [inputs, h_list[k]],
                            axis=3,
                            is_training=False,
                            is_quantized=self._is_quantized,
                            scope='bottleneck_%d/quantized_concat' % k)

                        bottleneck = lstm_utils.quantizable_separable_conv2d(
                            bottleneck_concat,
                            self._num_units // self._groups,
                            self._filter_size,
                            is_quantized=self._is_quantized,
                            depth_multiplier=1,
                            activation_fn=self._activation,
                            normalizer_fn=None,
                            scope='bottleneck_%d' % k)

                concat = lstm_utils.quantizable_separable_conv2d(
                    bottleneck,
                    4 * self._num_units // self._groups,
                    self._filter_size,
                    is_quantized=self._is_quantized,
                    depth_multiplier=1,
                    activation_fn=None,
                    normalizer_fn=None,
                    scope='concat_conv_%d' % k)

                # Since there is no activation in the previous separable conv, we
                # quantize here. A starting range of [-6, 6] is used because the
                # tensors are input to a Sigmoid function that saturates at these
                # ranges.
                concat = lstm_utils.quantize_op(
                    concat,
                    is_training=self._is_training,
                    default_min=-6,
                    default_max=6,
                    is_quantized=self._is_quantized,
                    scope='gates_%d/act_quant' % k)

                # i = input_gate, j = new_input, f = forget_gate, o = output_gate
                i, j, f, o = tf.split(concat, 4, 3)

                f_add = f + self._forget_bias
                f_add = lstm_utils.quantize_op(
                    f_add,
                    is_training=self._is_training,
                    default_min=-6,
                    default_max=6,
                    is_quantized=self._is_quantized,
                    scope='forget_gate_%d/add_quant' % k)
                f_act = tf.sigmoid(f_add)
                # The quantization range is fixed for the sigmoid to ensure that zero
                # is exactly representable.
                f_act = lstm_utils.quantize_op(
                    f_act,
                    is_training=False,
                    default_min=0,
                    default_max=1,
                    is_quantized=self._is_quantized,
                    scope='forget_gate_%d/act_quant' % k)

                a = c_list[k] * f_act
                a = lstm_utils.quantize_op(a,
                                           is_training=self._is_training,
                                           is_quantized=self._is_quantized,
                                           scope='forget_gate_%d/mul_quant' %
                                           k)

                i_act = tf.sigmoid(i)
                # The quantization range is fixed for the sigmoid to ensure that zero
                # is exactly representable.
                i_act = lstm_utils.quantize_op(
                    i_act,
                    is_training=False,
                    default_min=0,
                    default_max=1,
                    is_quantized=self._is_quantized,
                    scope='input_gate_%d/act_quant' % k)

                j_act = self._activation(j)
                # The quantization range is fixed for the relu6 to ensure that zero
                # is exactly representable.
                j_act = lstm_utils.quantize_op(j_act,
                                               is_training=False,
                                               default_min=0,
                                               default_max=6,
                                               is_quantized=self._is_quantized,
                                               scope='new_input_%d/act_quant' %
                                               k)

                b = i_act * j_act
                b = lstm_utils.quantize_op(b,
                                           is_training=self._is_training,
                                           is_quantized=self._is_quantized,
                                           scope='input_gate_%d/mul_quant' % k)

                new_c = a + b
                # The quantization range is fixed to [0, 6] due to an optimization in
                # TFLite. The order of operations is as fllows:
                #     Add -> FakeQuant -> Relu6 -> FakeQuant -> Concat.
                # The fakequant ranges to the concat must be fixed to ensure all inputs
                # to the concat have the same range, removing the need for rescaling.
                # The quantization ranges input to the relu6 are propagated to its
                # output. Any mismatch between these two ranges will cause an error.
                new_c = lstm_utils.quantize_op(new_c,
                                               is_training=False,
                                               default_min=0,
                                               default_max=6,
                                               is_quantized=self._is_quantized,
                                               scope='new_c_%d/add_quant' % k)

                if not self._is_quantized:
                    if self._scale_state:
                        normalizer = tf.maximum(
                            1.0,
                            tf.reduce_max(new_c, axis=(1, 2, 3)) / 6)
                        new_c /= tf.reshape(normalizer,
                                            [tf.shape(new_c)[0], 1, 1, 1])
                    elif self._clip_state:
                        new_c = tf.clip_by_value(new_c, -6, 6)

                new_c_act = self._activation(new_c)
                # The quantization range is fixed for the relu6 to ensure that zero
                # is exactly representable.
                new_c_act = lstm_utils.quantize_op(
                    new_c_act,
                    is_training=False,
                    default_min=0,
                    default_max=6,
                    is_quantized=self._is_quantized,
                    scope='new_c_%d/act_quant' % k)

                o_act = tf.sigmoid(o)
                # The quantization range is fixed for the sigmoid to ensure that zero
                # is exactly representable.
                o_act = lstm_utils.quantize_op(o_act,
                                               is_training=False,
                                               default_min=0,
                                               default_max=1,
                                               is_quantized=self._is_quantized,
                                               scope='output_%d/act_quant' % k)

                new_h = new_c_act * o_act
                # The quantization range is fixed since it is input to a concat.
                # A range of [0, 6] is used since |new_h| is a product of ranges [0, 6]
                # and [0, 1].
                new_h_act = lstm_utils.quantize_op(
                    new_h,
                    is_training=False,
                    default_min=0,
                    default_max=6,
                    is_quantized=self._is_quantized,
                    scope='new_h_%d/act_quant' % k)

                out_bottleneck.append(bottleneck)
                out_c.append(new_c_act)
                out_h.append(new_h_act)

            # Since all inputs to the below concats are already quantized, we can use
            # a regular concat operation.
            new_c = tf.concat(out_c, axis=3)
            new_h = tf.concat(out_h, axis=3)

            # |bottleneck| is input to a concat with |new_h|. We must use
            # quantizable_concat() with a fixed range that matches |new_h|.
            bottleneck = lstm_utils.quantizable_concat(
                out_bottleneck,
                axis=3,
                is_training=False,
                is_quantized=self._is_quantized,
                scope='out_bottleneck/quantized_concat')

            # summary of cell output and new state
            if self._viz_gates:
                slim.summaries.add_histogram_summary(new_h, 'cell_output')
                slim.summaries.add_histogram_summary(new_c, 'cell_state')

            output = new_h
            if self._output_bottleneck:
                output = lstm_utils.quantizable_concat(
                    [new_h, bottleneck],
                    axis=3,
                    is_training=False,
                    is_quantized=self._is_quantized,
                    scope='new_output/quantized_concat')

            # reflatten state to store it
            if self._flatten_state:
                new_c = tf.reshape(new_c, [-1, self._param_count],
                                   name='lstm_c')
                new_h = tf.reshape(new_h, [-1, self._param_count],
                                   name='lstm_h')

            # Set nodes to be under raw_outputs/ name scope for tfmini export.
            with tf.name_scope(None):
                new_c = tf.identity(new_c, name='raw_outputs/lstm_c')
                new_h = tf.identity(new_h, name='raw_outputs/lstm_h')
            states_and_output = contrib_rnn.LSTMStateTuple(new_c, new_h)

            return output, states_and_output
예제 #14
0
def make_cudnn(inputs, rnn_layer_sizes, batch_size, mode,
               dropout_keep_prob=1.0, residual_connections=False):
  """Builds a sequence of cuDNN LSTM layers from the given hyperparameters.

  Args:
    inputs: A tensor of RNN inputs.
    rnn_layer_sizes: A list of integer sizes (in units) for each layer of the
        RNN.
    batch_size: The number of examples per batch.
    mode: 'train', 'eval', or 'generate'. For 'generate',
        CudnnCompatibleLSTMCell will be used.
    dropout_keep_prob: The float probability to keep the output of any given
        sub-cell.
    residual_connections: Whether or not to use residual connections.

  Returns:
    outputs: A tensor of RNN outputs, with shape
        `[batch_size, inputs.shape[1], rnn_layer_sizes[-1]]`.
    initial_state: The initial RNN states, a tuple with length
        `len(rnn_layer_sizes)` of LSTMStateTuples.
    final_state: The final RNN states, a tuple with length
        `len(rnn_layer_sizes)` of LSTMStateTuples.
  """
  cudnn_inputs = tf.transpose(inputs, [1, 0, 2])

  if len(set(rnn_layer_sizes)) == 1 and not residual_connections:
    initial_state = tuple(
        contrib_rnn.LSTMStateTuple(
            h=tf.zeros([batch_size, num_units], dtype=tf.float32),
            c=tf.zeros([batch_size, num_units], dtype=tf.float32))
        for num_units in rnn_layer_sizes)

    if mode != 'generate':
      # We can make a single call to CudnnLSTM since all layers are the same
      # size and we aren't using residual connections.
      cudnn_initial_state = state_tuples_to_cudnn_lstm_state(initial_state)
      cell = contrib_cudnn_rnn.CudnnLSTM(
          num_layers=len(rnn_layer_sizes),
          num_units=rnn_layer_sizes[0],
          direction='unidirectional',
          dropout=1.0 - dropout_keep_prob)
      cudnn_outputs, cudnn_final_state = cell(
          cudnn_inputs, initial_state=cudnn_initial_state,
          training=mode == 'train')
      final_state = cudnn_lstm_state_to_state_tuples(cudnn_final_state)

    else:
      # At generation time we use CudnnCompatibleLSTMCell.
      cell = contrib_rnn.MultiRNNCell([
          contrib_cudnn_rnn.CudnnCompatibleLSTMCell(num_units)
          for num_units in rnn_layer_sizes
      ])
      cudnn_outputs, final_state = tf.nn.dynamic_rnn(
          cell, cudnn_inputs, initial_state=initial_state, time_major=True,
          scope='cudnn_lstm/rnn')

  else:
    # We need to make multiple calls to CudnnLSTM, keeping the initial and final
    # states at each layer.
    initial_state = []
    final_state = []

    for i in range(len(rnn_layer_sizes)):
      # If we're using residual connections and this layer is not the same size
      # as the previous layer, we need to project into the new size so the
      # (projected) input can be added to the output.
      if residual_connections:
        if i == 0 or rnn_layer_sizes[i] != rnn_layer_sizes[i - 1]:
          cudnn_inputs = contrib_layers.linear(cudnn_inputs, rnn_layer_sizes[i])

      layer_initial_state = (contrib_rnn.LSTMStateTuple(
          h=tf.zeros([batch_size, rnn_layer_sizes[i]], dtype=tf.float32),
          c=tf.zeros([batch_size, rnn_layer_sizes[i]], dtype=tf.float32)),)

      if mode != 'generate':
        cudnn_initial_state = state_tuples_to_cudnn_lstm_state(
            layer_initial_state)
        cell = contrib_cudnn_rnn.CudnnLSTM(
            num_layers=1,
            num_units=rnn_layer_sizes[i],
            direction='unidirectional',
            dropout=1.0 - dropout_keep_prob)
        cudnn_outputs, cudnn_final_state = cell(
            cudnn_inputs, initial_state=cudnn_initial_state,
            training=mode == 'train')
        layer_final_state = cudnn_lstm_state_to_state_tuples(cudnn_final_state)

      else:
        # At generation time we use CudnnCompatibleLSTMCell.
        cell = contrib_rnn.MultiRNNCell(
            [contrib_cudnn_rnn.CudnnCompatibleLSTMCell(rnn_layer_sizes[i])])
        cudnn_outputs, layer_final_state = tf.nn.dynamic_rnn(
            cell, cudnn_inputs, initial_state=layer_initial_state,
            time_major=True,
            scope='cudnn_lstm/rnn' if i == 0 else 'cudnn_lstm_%d/rnn' % i)

      if residual_connections:
        cudnn_outputs += cudnn_inputs

      cudnn_inputs = cudnn_outputs

      initial_state += layer_initial_state
      final_state += layer_final_state

  outputs = tf.transpose(cudnn_outputs, [1, 0, 2])

  return outputs, tuple(initial_state), tuple(final_state)
예제 #15
0
lstm2 = lstm_cell()

state_size0 = lstm0.state_size
state_size1 = lstm1.state_size
state_size2 = lstm2.state_size

c0 = tf.placeholder(tf.float32, [1, state_size0.c])
h0 = tf.placeholder(tf.float32, [1, state_size0.h])
c1 = tf.placeholder(tf.float32, [1, state_size1.c])
h1 = tf.placeholder(tf.float32, [1, state_size1.h])
c2 = tf.placeholder(tf.float32, [1, state_size2.c])
h2 = tf.placeholder(tf.float32, [1, state_size2.h])

step_size = tf.shape(x)[1:2]

state_in0 = rnn.LSTMStateTuple(c0, h0)
state_in1 = rnn.LSTMStateTuple(c1, h1)
state_in2 = rnn.LSTMStateTuple(c2, h2)

outputs0, state0 = tf.nn.dynamic_rnn(lstm0,
                                     x,
                                     initial_state=state_in0,
                                     sequence_length=step_size,
                                     time_major=False,
                                     scope='rnn0')
outputs0 = tf.reshape(outputs0, [1, -1, 15])

outputs1, state1 = tf.nn.dynamic_rnn(lstm1,
                                     outputs0,
                                     initial_state=state_in1,
                                     sequence_length=step_size,
예제 #16
0
    def _build_layers_v2(self, input_dict, num_outputs, options):
        def spy(sequences, state_in, state_out, seq_lens):
            if len(sequences) == 1:
                return 0  # don't capture inference inputs
            # TF runs this function in an isolated context, so we have to use
            # redis to communicate back to our suite
            ray.experimental.internal_kv._internal_kv_put(
                "rnn_spy_in_{}".format(RNNSpyModel.capture_index),
                pickle.dumps({
                    "sequences": sequences,
                    "state_in": state_in,
                    "state_out": state_out,
                    "seq_lens": seq_lens
                }),
                overwrite=True)
            RNNSpyModel.capture_index += 1
            return 0

        features = input_dict["obs"]
        cell_size = 3
        last_layer = add_time_dimension(features, self.seq_lens)

        # Setup the LSTM cell
        lstm = rnn.BasicLSTMCell(cell_size, state_is_tuple=True)
        self.state_init = [
            np.zeros(lstm.state_size.c, np.float32),
            np.zeros(lstm.state_size.h, np.float32)
        ]

        # Setup LSTM inputs
        if self.state_in:
            c_in, h_in = self.state_in
        else:
            c_in = tf.placeholder(tf.float32, [None, lstm.state_size.c],
                                  name="c")
            h_in = tf.placeholder(tf.float32, [None, lstm.state_size.h],
                                  name="h")
        self.state_in = [c_in, h_in]

        # Setup LSTM outputs
        state_in = rnn.LSTMStateTuple(c_in, h_in)
        lstm_out, lstm_state = tf.nn.dynamic_rnn(lstm,
                                                 last_layer,
                                                 initial_state=state_in,
                                                 sequence_length=self.seq_lens,
                                                 time_major=False,
                                                 dtype=tf.float32)

        self.state_out = list(lstm_state)
        spy_fn = tf.py_func(spy, [
            last_layer,
            self.state_in,
            self.state_out,
            self.seq_lens,
        ],
                            tf.int64,
                            stateful=True)

        # Compute outputs
        with tf.control_dependencies([spy_fn]):
            last_layer = tf.reshape(lstm_out, [-1, cell_size])
            logits = linear(last_layer, num_outputs, "action",
                            normc_initializer(0.01))
        return logits, last_layer
예제 #17
0
    def _build_layers(self, inputs, num_outputs, options):
        cell_size = options.get("lstm_cell_size", 256)
        use_tf100_api = (distutils.version.LooseVersion(tf.VERSION) >=
                         distutils.version.LooseVersion("1.0.0"))
        last_layer = add_time_dimension(inputs, self.seq_lens)

        # Setup the LSTM cell
        if use_tf100_api:
            lstm1 = rnn.BasicLSTMCell(cell_size, state_is_tuple=True)
            lstm2 = rnn.BasicLSTMCell(cell_size, state_is_tuple=True)
        else:
            lstm1 = rnn.rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
            lstm2 = rnn.rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
        self.state_init = [
            np.zeros(lstm1.state_size.c, np.float32),
            np.zeros(lstm1.state_size.h, np.float32),
            np.zeros(lstm2.state_size.c, np.float32),
            np.zeros(lstm2.state_size.h, np.float32)
        ]

        # Setup LSTM inputs
        if self.state_in:
            c1_in, h1_in, c2_in, h2_in = self.state_in
        else:
            c1_in = tf.placeholder(tf.float32, [None, lstm1.state_size.c],
                                   name="c1")
            h1_in = tf.placeholder(tf.float32, [None, lstm1.state_size.h],
                                   name="h1")
            c2_in = tf.placeholder(tf.float32, [None, lstm2.state_size.c],
                                   name="c2")
            h2_in = tf.placeholder(tf.float32, [None, lstm2.state_size.h],
                                   name="h2")
            self.state_in = [c1_in, h1_in, c2_in, h2_in]

        # Setup LSTM outputs
        if use_tf100_api:
            state1_in = rnn.LSTMStateTuple(c1_in, h1_in)
            state2_in = rnn.LSTMStateTuple(c2_in, h2_in)
        else:
            state1_in = rnn.rnn_cell.LSTMStateTuple(c1_in, h1_in)
            state2_in = rnn.rnn_cell.LSTMStateTuple(c2_in, h2_in)
        lstm1_out, lstm1_state = tf.nn.dynamic_rnn(
            lstm1,
            last_layer,
            sequence_length=self.seq_lens,
            time_major=False,
            dtype=tf.float32)

        with tf.variable_scope("value_function"):
            lstm2_out, lstm2_state = tf.nn.dynamic_rnn(
                lstm2,
                last_layer,
                sequence_length=self.seq_lens,
                time_major=False,
                dtype=tf.float32)

        self.value_function = tf.reshape(
            linear(tf.reshape(lstm2_out, [-1, cell_size]), 1, "vf",
                   normc_initializer(0.01)), [-1])

        self.state_out = list(lstm1_state) + list(lstm2_state)

        # Compute outputs
        last_layer = tf.reshape(lstm1_out, [-1, cell_size])
        logits = linear(last_layer, num_outputs, "action",
                        normc_initializer(0.01))

        return logits, last_layer
예제 #18
0
    def __init__(self, dtype, *param, fn):
        super(LSTMNet, self).__init__(dtype)
        nonlin_str = param[0]
        nonlin = getattr(tf.nn, nonlin_str)
        weight = float(param[1])

        dimension = [int(i) for i in param[2:]]

        self.input = tf.placeholder(
            dtype, shape=[None, None, dimension[0]],
            name=fn.input_names[0])  # [batch, time, dim]

        length_ = tf.placeholder(dtype, name='length')  # [batch]
        length_ = tf.cast(length_, dtype=tf.int32)
        self.seq_length = tf.reshape(length_, [-1])

        # GRU
        cells = []
        state_size = []  # size of c = h in LSTM
        recurrent_state_size = 0

        for size in dimension[1:-1]:
            cell = rnn.LSTMCell(
                size,
                state_is_tuple=True,
                initializer=tf.contrib.layers.xavier_initializer())
            # cell = rnn.LayerNormBasicLSTMCell(size, layer_norm=True)
            cells.append(cell)
            recurrent_state_size += cell.state_size.c + cell.state_size.h
            state_size.append(cell.state_size.c)
            state_size.append(cell.state_size.h)
        cell = rnn.MultiRNNCell(cells, state_is_tuple=True)

        hiddenStateDim = tf.identity(tf.constant(value=[recurrent_state_size],
                                                 dtype=tf.int32),
                                     name='h_dim')
        init_states = tf.split(tf.placeholder(
            dtype=dtype, shape=[None, recurrent_state_size], name='h_init'),
                               num_or_size_splits=state_size,
                               axis=1)

        init_state_list = []
        for i in range(len(cells)):
            init_state_list.append(
                rnn.LSTMStateTuple(init_states[2 * i], init_states[2 * i + 1]))
        init_state_tuple = tuple(init_state_list)

        # LSTM output
        LSTMOutput, final_state = tf.nn.dynamic_rnn(
            cell=cell,
            inputs=self.input,
            sequence_length=self.seq_length,
            dtype=dtype,
            initial_state=init_state_tuple)

        top = tf.reshape(LSTMOutput, shape=[-1, dimension[-2]], name='fcIn')

        with tf.name_scope('output_layer'):
            wo = tf.Variable(
                tf.random_uniform(dtype=dtype,
                                  shape=[dimension[-2], dimension[-1]],
                                  minval=-float(weight),
                                  maxval=float(weight)))
            bo = tf.Variable(
                tf.random_uniform(dtype=dtype,
                                  shape=[dimension[-1]],
                                  minval=-float(weight),
                                  maxval=float(weight)))
            top = tf.matmul(top, wo) + bo

        self.output = tf.reshape(
            top, [-1, tf.shape(self.input)[1], dimension[-1]])

        final_state_list = []
        for state_tuple in final_state:
            final_state_list.append(state_tuple.c)
            final_state_list.append(state_tuple.h)

        hiddenState = tf.concat([state for state in final_state_list],
                                axis=1,
                                name='h_state')

        self.l_param_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        self.a_param_list = self.l_param_list
예제 #19
0
 def state_size(self):
     return (rnn.LSTMStateTuple(self._num_units, self._num_units)
             if self._state_is_tuple else 2 * self._num_units)
예제 #20
0
def multi_dimentional_rnn(rnn_size,
                          input_data,
                          sh,
                          dims=None,
                          scope_name="layer1"):
    """Implements naive multidimentional recurent neural networks

  Args:
    rnn_size: int, the hidden units
    input_data: (num_images, height, width, depth) tensor,
    the data to process of shape
    sh: list, [heigth,width] of the windows
    dims: list, dimentions to reverse the input data
    scope_name: string,  the scope

  Returns:
    (batch,h/sh[0],w/sh[1],chanels*sh[0]*sh[1]) tensor
  """
    with vs.variable_scope("MultiDimentionalLSTMCell-" + scope_name):
        cell = MultiDimentionalLSTMCell(rnn_size)

        shape = input_data.get_shape().as_list()

        #pad if the dimmention are not correct for the block size
        if shape[1] % sh[0] != 0:
            offset = array_ops.zeros(
                [shape[0], sh[0] - (shape[1] % sh[0]), shape[2], shape[3]])
            input_data = array_ops.concat([input_data, offset], 1)
            shape = input_data.get_shape().as_list()
        if shape[2] % sh[1] != 0:
            offset = array_ops.zeros(
                [shape[0], shape[1], sh[1] - (shape[2] % sh[1]), shape[3]])
            input_data = array_ops.concat([input_data, offset], 2)
            shape = input_data.get_shape().as_list()

        h, w = int(shape[1] / sh[0]), int(shape[2] / sh[1])
        features = sh[1] * sh[0] * shape[3]
        batch_size = shape[0]

        lines = array_ops.split(input_data, h, axis=1)
        line_blocks = []
        for line in lines:
            line = array_ops.transpose(line, [0, 2, 3, 1])
            line = array_ops.reshape(line, [batch_size, w, features])
            line_blocks.append(line)
        x = array_ops.stack(line_blocks, axis=1)
        if dims is not None:
            x = array_ops.reverse(x, dims)
        x = array_ops.transpose(x, [1, 2, 0, 3])
        x = array_ops.reshape(x, [-1, features])
        x = array_ops.split(x, h * w, 0)

        inputs_ta = tensor_array_ops.TensorArray(dtype=dtypes.float32,
                                                 size=h * w,
                                                 name='input_ta')
        inputs_ta = inputs_ta.unstack(x)
        states_ta = tensor_array_ops.TensorArray(dtype=dtypes.float32,
                                                 size=h * w + 1,
                                                 name='state_ta',
                                                 clear_after_read=False)
        outputs_ta = tensor_array_ops.TensorArray(dtype=dtypes.float32,
                                                  size=h * w,
                                                  name='output_ta')

        states_ta = states_ta.write(
            h * w,
            rnn.LSTMStateTuple(
                array_ops.zeros([batch_size, rnn_size], dtypes.float32),
                array_ops.zeros([batch_size, rnn_size], dtypes.float32)))

        def get_index_state_up(t, w):
            """get_index_state_up"""
            return control_flow_ops.cond(
                math_ops.less_equal(array_ops.constant(w),
                                    t), lambda: t - array_ops.constant(w),
                lambda: array_ops.constant(h * w))

        def get_index_state_last(t, w):
            """get_index_state_last"""
            return control_flow_ops.cond(
                math_ops.less(array_ops.constant(0),
                              math_ops.mod(t, array_ops.constant(w))),
                lambda: t - array_ops.constant(1),
                lambda: array_ops.constant(h * w))

        time = array_ops.constant(0)

        def body(time, outputs_ta, states_ta):
            """Implements multi dimmentions lstm while_loop

      Args:
        time: int
        outputs_ta: tensor_array
        states_ta: tensor_array
      """
            constant_val = array_ops.constant(0)
            state_up = control_flow_ops.cond(
                math_ops.less_equal(array_ops.constant(w), time),
                lambda: states_ta.read(get_index_state_up(time, w)),
                lambda: states_ta.read(h * w))
            state_last = control_flow_ops.cond(
                math_ops.less(constant_val,
                              math_ops.mod(time, array_ops.constant(w))),
                lambda: states_ta.read(get_index_state_last(time, w)),
                lambda: states_ta.read(h * w))

            current_state = state_up[0], state_last[0], state_up[
                1], state_last[1]
            out, state = cell(inputs_ta.read(time), current_state)
            outputs_ta = outputs_ta.write(time, out)
            states_ta = states_ta.write(time, state)
            return time + 1, outputs_ta, states_ta

        def condition(time, outputs_ta, states_ta):
            return math_ops.less(time, array_ops.constant(h * w))

        _, outputs_ta, _ = control_flow_ops.while_loop(
            condition,
            body, [time, outputs_ta, states_ta],
            parallel_iterations=1)

        outputs = outputs_ta.stack()

        outputs = array_ops.reshape(outputs, [h, w, batch_size, rnn_size])
        outputs = array_ops.transpose(outputs, [2, 0, 1, 3])
        if dims is not None:
            outputs = array_ops.reverse(outputs, dims)

        return outputs
예제 #21
0
    def __init__(self, ob_space, ac_space):
        # in python, "+" concatenates the lists. For Pong, it will give: [None, 210, 160, 3]. We add None because the tf.nn.conv2d function requires a 4-D tensor [batch, in_height, in_width, in_channels]
        # batch = 1 when the agent runs on the environment, batch = "number fo steps in the rollout" when called by the function process. This also are the values of all the "?" in the the comments below
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))
        # shape of x is (?, 42, 42, 1) for pong (image output is rescaled to 42x42x1 in the create_atari function of envs.py)
        ''' Unused (for the moment), required to avoid errors '''
        self.last_reward = tf.placeholder(tf.float32,
                                          shape=[None, 1],
                                          name="last_reward")
        self.last_action = tf.placeholder(
            tf.float32, shape=[None, ac_space], name="last_action"
        )  # the one-hot vector is of size ac_space = env.action_space.n
        self.local_time = tf.placeholder(tf.float32,
                                         shape=[None, 1],
                                         name="local_time")

        # x goes through CNN:
        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # shape of x is (?, 3, 3, 32) after CNN, for pong

        # (original comment) introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
        x = tf.expand_dims(
            flatten(x), [0]
        )  # x is a numpy array of shape [1,?,288] = [batch_size, max_time, data]
        # shape of flatten(x) is (?, 288) for pong
        # shape of x is (1, ?, 288), for pong (after evaluation, "?" is 1 here)

        size = 256  # number of units in the LSTM cell, = output size
        if use_tf100_api:
            lstm = rnn.BasicLSTMCell(size, state_is_tuple=True)
        else:
            lstm = rnn.rnn_cell.BasicLSTMCell(size, state_is_tuple=True)
        self.state_size = lstm.state_size
        step_size = tf.shape(
            self.x
        )[:
          1]  # sequence_length parameter, is [1] when the agent runs on the env, is the number of steps in the rollout otherwise
        self.step_size = step_size

        c_init = np.zeros((1, lstm.state_size.c), np.float32)
        h_init = np.zeros((1, lstm.state_size.h), np.float32)
        self.state_init = [c_init, h_init]
        c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
        h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
        self.state_in = [c_in, h_in]

        if use_tf100_api:
            state_in = rnn.LSTMStateTuple(c_in, h_in)
        else:
            state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)

        lstm_outputs, lstm_state = tf.nn.dynamic_rnn(lstm,
                                                     x,
                                                     initial_state=state_in,
                                                     sequence_length=step_size,
                                                     time_major=False)
        # shape of lstm_outputs is (1, ?, 256), for pong

        lstm_c, lstm_h = lstm_state
        x = tf.reshape(lstm_outputs,
                       [-1, size])  # shape of x is (?, 256), for pong
        self.logits = linear(
            x, ac_space, "action", normalized_columns_initializer(
                0.01))  # shape of self.logits is (?, 6), for pong
        self.vf = tf.reshape(
            linear(x, 1, "value", normalized_columns_initializer(1.0)),
            [-1])  # reshape produces a 1-D vector here. shape of vf is (?,)
        self.state_out = [lstm_c[:1, :],
                          lstm_h[:1, :]]  # self.state_out is of type "list"
        self.sample = categorical_sample(
            self.logits, ac_space)[0, :]  # shape of self.sample is (6,)
        self.var_list = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES,
            tf.get_variable_scope().name)  # self.var_list is of type "list"

"""
start computational graph
"""
batchXHolder = tf.placeholder(tf.float32, [batchSize, backpropagationLength],
                              name="x_input")
batchYHolder = tf.placeholder(tf.int32, [batchSize, backpropagationLength],
                              name="y_input")

# rnn replace
#initState = tf.placeholder(tf.float32, [batchSize, stateSize], "rnn_init_state")

cellState = tf.placeholder(tf.float32, [batchSize, stateSize])
hiddenState = tf.placeholder(tf.float32, [batchSize, stateSize])
initState = rnn.LSTMStateTuple(cellState, hiddenState)

W = tf.Variable(np.random.rand(stateSize + 1, stateSize),
                dtype=tf.float32,
                name="weight1")
bias1 = tf.Variable(np.zeros((1, stateSize)), dtype=tf.float32)

W2 = tf.Variable(np.random.rand(stateSize, numClasses),
                 dtype=tf.float32,
                 name="weight2")
bias2 = tf.Variable(np.zeros((1, numClasses)), dtype=tf.float32)

tf.summary.histogram(name="weights", values=W)

# Unpack columns
inputsSeries = tf.split(axis=1,
예제 #23
0
    def __init__(self,
                 data,
                 char_dim=100,
                 hidden_dim=256,
                 initializer='xavier',
                 rnn_cell='GRU',
                 bias_init=0,
                 rnn_class='single',
                 embedding_size=300):
        super(EmbeddingModel, self).__init__(data, initializer)

        embedding_matrix = tf.get_variable(
            name="embedding_matrix",
            initializer=self.initializer,
            shape=[self.data.vocab_size, char_dim],
            trainable=True)
        self.char_embedding = tf.nn.embedding_lookup(embedding_matrix,
                                                     self.data.word)

        if rnn_cell == 'GRU':
            cell_fn = partial(
                rnn.GRUCell,
                num_units=hidden_dim,
                kernel_initializer=self.initializer,
                bias_initializer=tf.constant_initializer(bias_init))
        elif rnn_cell == 'LSTM':
            cell_fn = partial(rnn.CoupledInputForgetGateLSTMCell,
                              num_units=hidden_dim,
                              initializer=self.initializer)
        elif rnn_cell == 'BasicRNN':
            cell_fn = partial(tf.nn.rnn_cell.BasicRNNCell,
                              num_units=hidden_dim)
        else:
            cell_fn = None

        if rnn_class == 'multi':
            lstm_cell_fw = tf.nn.rnn_cell.MultiRNNCell(
                [cell_fn() for _ in range(4)])
            lstm_cell_bw = tf.nn.rnn_cell.MultiRNNCell(
                [cell_fn() for _ in range(4)])
        else:
            lstm_cell_fw = cell_fn()
            lstm_cell_bw = cell_fn()

        init_fw_c_state = tf.get_variable("init_fw_c_state", [1, hidden_dim],
                                          tf.float32, self.initializer)
        init_fw_h_state = tf.get_variable("init_fw_h_state", [1, hidden_dim],
                                          tf.float32, self.initializer)
        fw_state = rnn.LSTMStateTuple(
            tf.tile(init_fw_c_state, [self.data.batch_size, 1]),
            tf.tile(init_fw_h_state, [self.data.batch_size, 1]))

        init_bw_c_state = tf.get_variable("init_bw_c_state", [1, hidden_dim],
                                          tf.float32, self.initializer)
        init_bw_h_state = tf.get_variable("init_bw_h_state", [1, hidden_dim],
                                          tf.float32, self.initializer)
        bw_state = rnn.LSTMStateTuple(
            tf.tile(init_bw_c_state, [self.data.batch_size, 1]),
            tf.tile(init_bw_h_state, [self.data.batch_size, 1]))

        self.rnn_outputs, self.rnn_final_state = tf.nn.bidirectional_dynamic_rnn(
            lstm_cell_fw, lstm_cell_bw, self.char_embedding, self.data.len,
            fw_state, bw_state, tf.float32)
        # init_fw_state, init_bw_state, tf.float32)
        # self.val_f, self.val_b = extract_axis_1(self.rnn_outputs[0],
        #                                         self.data.len - 1), \
        #                          extract_axis_1(self.rnn_outputs[1],
        #                                         np.zeros(2))
        # of, ob = o
        # of, ob0, ob1 = extract_axis_1(o[0], self.data.len - 1), extract_axis_1(o[1], np.zeros(64)), extract_axis_1(
        #     o[1], self.data.len - 1)
        # sf, sb = s

        self.fw, self.bw = self.rnn_final_state
        if rnn == 'multi':
            self.fw = tf.concat([t for t in self.fw], axis=1)
            self.bw = tf.concat([t for t in self.bw], axis=1)
        # self.fw_h, self.bw_h = tf.reshape(self.fw.h, [-1, 16, hidden_dim]),
        # tf.reshape(self.bw.h, [-1, 16, hidden_dim])
        self.fw_h, self.bw_h = self.fw.h, self.bw.h
        self.fc = tf.concat([self.fw_h, self.bw_h], axis=1)
        # self.fct = tf.transpose(self.fc, [1, 0, 2])
        # self.attention = tf.expand_dims(tf.transpose(
        #     layers.fully_connected(layers.flatten(self.fc), 16, tf.nn.softmax,
        #                            biases_initializer=tf.constant_initializer(args.bias_init)), [1, 0]), 2)
        self.attention = tf.expand_dims(
            tf.transpose(
                layers.fully_connected(
                    self.fc,
                    16,
                    tf.nn.softmax,
                    biases_initializer=tf.constant_initializer(bias_init)),
                [1, 0]), 2)

        output_list = []
        for i in range(16):
            with tf.variable_scope('Affine_Transform_%d' % i):
                if rnn == 'multi':
                    self.fc = layers.fully_connected(self.fc, 1024)
                fc2 = layers.fully_connected(
                    self.fc,
                    embedding_size,
                    activation_fn=tf.nn.tanh,
                    biases_initializer=tf.constant_initializer(bias_init))
                output_list.append(
                    layers.fully_connected(
                        fc2, embedding_size, activation_fn=None) *
                    self.attention[i])
        print(output_list[0].shape)
        self.output = tf.reduce_sum(tf.stack(output_list, 0), 0)
        print(self.output.shape)
    def BuildNetwork(self, learningRate):
        self.dataInput = tensorflow.placeholder(dtype=tensorflow.float32,
                                                shape=[None, None, 40],
                                                name='dataInput')
        self.labelInput = tensorflow.placeholder(dtype=tensorflow.float32,
                                                 shape=None,
                                                 name='labelInput')
        self.seqInput = tensorflow.placeholder(dtype=tensorflow.int32,
                                               shape=[None],
                                               name='seqInput')

        ##########################################################################

        self.parameters['BatchSize'], self.parameters[
            'TimeStep'], _ = tensorflow.unstack(
                tensorflow.shape(input=self.dataInput, name='Parameter'))

        ##########################################################################

        with tensorflow.variable_scope('First_BLSTM'):
            self.parameters[
                'First_FW_Cell'] = tensorflow.nn.rnn_cell.MultiRNNCell(
                    cells=[
                        rnn.LSTMCell(num_units=self.hiddenNodules)
                        for _ in range(self.rnnLayers)
                    ],
                    state_is_tuple=True)
            self.parameters[
                'First_BW_Cell'] = tensorflow.nn.rnn_cell.MultiRNNCell(
                    cells=[
                        rnn.LSTMCell(num_units=self.hiddenNodules)
                        for _ in range(self.rnnLayers)
                    ],
                    state_is_tuple=True)

            self.parameters['First_Output'], self.parameters['First_FinalState'] = \
                tensorflow.nn.bidirectional_dynamic_rnn(cell_fw=self.parameters['First_FW_Cell'],
                                                        cell_bw=self.parameters['First_BW_Cell'], inputs=self.dataInput,
                                                        sequence_length=self.seqInput, dtype=tensorflow.float32)

        if self.firstAttention is None:
            self.parameters['First_FinalOutput'] = tensorflow.concat([
                self.parameters['First_FinalState'][self.rnnLayers - 1][0].h,
                self.parameters['First_FinalState'][self.rnnLayers - 1][1].h
            ],
                                                                     axis=1)
        else:
            self.firstAttentionList = self.firstAttention(
                dataInput=self.parameters['First_Output'],
                scopeName=self.firstAttentionName,
                hiddenNoduleNumber=2 * self.hiddenNodules,
                attentionScope=self.firstAttentionScope,
                blstmFlag=True)
            self.parameters['First_FinalOutput'] = self.firstAttentionList[
                'FinalResult']

        ##########################################################################

        with tensorflow.variable_scope('Second_BLSTM'):
            self.parameters[
                'Second_FW_Cell'] = tensorflow.nn.rnn_cell.MultiRNNCell(
                    cells=[
                        rnn.LSTMCell(num_units=self.hiddenNodules)
                        for _ in range(self.rnnLayers)
                    ],
                    state_is_tuple=True)
            self.parameters[
                'Second_BW_Cell'] = tensorflow.nn.rnn_cell.MultiRNNCell(
                    cells=[
                        rnn.LSTMCell(num_units=self.hiddenNodules)
                        for _ in range(self.rnnLayers)
                    ],
                    state_is_tuple=True)

            self.parameters['Second_Output'], self.parameters['Second_FinalState'] = \
                tensorflow.nn.bidirectional_dynamic_rnn(
                    cell_fw=self.parameters['Second_FW_Cell'], cell_bw=self.parameters['Second_BW_Cell'],
                    inputs=self.parameters['First_FinalOutput'][tensorflow.newaxis, :, :],
                    dtype=tensorflow.float32)

        ##########################################################################

        if self.secondAttention is None:
            self.parameters['Decoder_InitalState'] = []
            for index in range(self.rnnLayers):
                self.parameters[
                    'Encoder_Cell_Layer%d' % index] = rnn.LSTMStateTuple(
                        c=tensorflow.concat([
                            self.parameters['Second_FinalState'][index][0].c,
                            self.parameters['Second_FinalState'][index][1].c
                        ],
                                            axis=1),
                        h=tensorflow.concat([
                            self.parameters['Second_FinalState'][index][0].h,
                            self.parameters['Second_FinalState'][index][1].h
                        ],
                                            axis=1))
                self.parameters['Decoder_InitalState'].append(
                    self.parameters['Encoder_Cell_Layer%d' % index])
            self.parameters['Decoder_InitalState_First'] = tuple(
                self.parameters['Decoder_InitalState'])
        else:
            self.attentionListSecond = self.secondAttention(
                dataInput=self.parameters['Second_Output'],
                scopeName=self.secondAttentionName,
                hiddenNoduleNumber=2 * self.hiddenNodules,
                attentionScope=self.secondAttentionScope,
                blstmFlag=True)
            self.parameters['Decoder_InitalState'] = []
            self.attentionListSecond['FinalResult'].set_shape(
                [1, 2 * self.hiddenNodules])
            self.parameters['FinalResult'] = self.attentionListSecond[
                'FinalResult']
            for index in range(self.rnnLayers):
                self.parameters[
                    'Encoder_Cell_Layer%d' % index] = rnn.LSTMStateTuple(
                        c=self.attentionListSecond['FinalResult'],
                        h=tensorflow.concat([
                            self.parameters['Second_FinalState'][index][0].h,
                            self.parameters['Second_FinalState'][index][1].h
                        ],
                                            axis=1))
                self.parameters['Decoder_InitalState'].append(
                    self.parameters['Encoder_Cell_Layer%d' % index])
            self.parameters['Decoder_InitalState_First'] = tuple(
                self.parameters['Decoder_InitalState'])

        #########################################################################

        with tensorflow.variable_scope('Decoder_First'):
            self.parameters['Decoder_Helper_First'] = seq2seq.TrainingHelper(
                inputs=self.parameters['First_FinalOutput'][
                    tensorflow.newaxis, :, :],
                sequence_length=[self.parameters['BatchSize']],
                name='Decoder_Helper_First')
            self.parameters['Decoder_FC_First'] = Dense(self.hiddenNodules * 2)
            self.parameters[
                'Decoder_Cell_First'] = tensorflow.nn.rnn_cell.MultiRNNCell(
                    cells=[
                        rnn.LSTMCell(num_units=self.hiddenNodules * 2)
                        for _ in range(self.rnnLayers)
                    ],
                    state_is_tuple=True)

            self.parameters['Decoder_First'] = seq2seq.BasicDecoder(
                cell=self.parameters['Decoder_Cell_First'],
                helper=self.parameters['Decoder_Helper_First'],
                initial_state=self.parameters['Decoder_InitalState_First'],
                output_layer=self.parameters['Decoder_FC_First'])

            self.parameters['Decoder_Logits_First'], self.parameters[
                'Decoder_FinalState_First'], self.parameters[
                    'Decoder_FinalSeq_First'] = seq2seq.dynamic_decode(
                        decoder=self.parameters['Decoder_First'])

        self.parameters['Decoder_First_Result'] = self.parameters[
            'Decoder_Logits_First'][0]

        self.parameters['Decoder_InitialState_Second_Media'] = []
        for index in range(self.rnnLayers):
            self.parameters['Decoder_Cell_Second_Layer%d' %
                            index] = rnn.LSTMStateTuple(
                                c=self.parameters['Decoder_First_Result'][0],
                                h=self.parameters['Decoder_First_Result'][0])
            self.parameters['Decoder_InitialState_Second_Media'].append(
                self.parameters['Decoder_Cell_Second_Layer%d' % index])
        self.parameters['Decoder_InitialState_Second'] = tuple(
            self.parameters['Decoder_InitialState_Second_Media'])

        with tensorflow.variable_scope('Decoder_Second'):
            self.parameters['Decoder_Helper_Second'] = seq2seq.TrainingHelper(
                inputs=self.dataInput,
                sequence_length=self.seqInput,
                name='Decoder_Helper_Second')
            self.parameters['Decoder_FC_Second'] = Dense(40)
            self.parameters[
                'Decoder_Cell_Second'] = tensorflow.nn.rnn_cell.MultiRNNCell(
                    cells=[
                        rnn.LSTMCell(num_units=self.hiddenNodules * 2)
                        for _ in range(self.rnnLayers)
                    ],
                    state_is_tuple=True)

            self.parameters['Decoder_Second'] = seq2seq.BasicDecoder(
                cell=self.parameters['Decoder_Cell_Second'],
                helper=self.parameters['Decoder_Helper_Second'],
                initial_state=self.parameters['Decoder_InitialState_Second'],
                output_layer=self.parameters['Decoder_FC_Second'])

            self.parameters['Decoder_Logits_Second'], self.parameters[
                'Decoder_FinalState_Second'], self.parameters[
                    'Decoder_FinalSeq_Second'] = seq2seq.dynamic_decode(
                        decoder=self.parameters['Decoder_Second'])

        with tensorflow.variable_scope('LossScope'):
            if self.lossType == 'frame':
                self.parameters['Loss'] = tensorflow.losses.absolute_difference(
                    labels=self.dataInput,
                    predictions=self.parameters['Decoder_Logits_Second'][0],
                    weights=100)
            if self.lossType == 'sentence':
                self.parameters[
                    'Loss'] = tensorflow.losses.absolute_difference(
                        labels=self.parameters['First_FinalOutput'][
                            tensorflow.newaxis, :, :],
                        predictions=self.parameters['Decoder_Logits_First'][0],
                        weights=100)
            self.train = tensorflow.train.AdamOptimizer(
                learning_rate=learningRate).minimize(self.parameters['Loss'])
예제 #25
0
    def build(self, is_train=True):
        # demo_len = self.demo_len
        if self.stack_subsequent_state:
            max_demo_len = self.max_demo_len - 1
            demo_len = self.demo_len - 1
            s_h = tf.stack([
                self.s_h[:, :, :max_demo_len, :, :, :], self.s_h[:, :,
                                                                 1:, :, :, :]
            ],
                           axis=-1)
            depth = self.depth * 2
        else:
            max_demo_len = self.max_demo_len
            demo_len = self.demo_len
            s_h = self.s_h
            depth = self.depth

        # s [bs, h, w, depth] -> feature [bs, v]
        # CNN
        def State_Encoder(s,
                          per,
                          batch_size,
                          scope='State_Encoder',
                          reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warning(scope.name)
                _ = conv2d(s,
                           16,
                           is_train,
                           k_h=3,
                           k_w=3,
                           info=not reuse,
                           batch_norm=True,
                           name='conv1')
                _ = conv2d(_,
                           32,
                           is_train,
                           k_h=3,
                           k_w=3,
                           info=not reuse,
                           batch_norm=True,
                           name='conv2')
                _ = conv2d(_,
                           48,
                           is_train,
                           k_h=3,
                           k_w=3,
                           info=not reuse,
                           batch_norm=True,
                           name='conv3')
                if self.pixel_input:
                    _ = conv2d(_,
                               48,
                               is_train,
                               k_h=3,
                               k_w=3,
                               info=not reuse,
                               batch_norm=True,
                               name='conv4')
                    _ = conv2d(_,
                               48,
                               is_train,
                               k_h=3,
                               k_w=3,
                               info=not reuse,
                               batch_norm=True,
                               name='conv5')
                state_feature = tf.reshape(_, [batch_size, -1])
                if self.state_encoder_fc:
                    state_feature = fc(state_feature,
                                       512,
                                       is_train,
                                       info=not reuse,
                                       name='fc1')
                    state_feature = fc(state_feature,
                                       512,
                                       is_train,
                                       info=not reuse,
                                       name='fc2')
                state_feature = tf.concat([state_feature, per], axis=-1)
                if not reuse:
                    log.info('concat feature {}'.format(state_feature))
                return state_feature

        # s_h [bs, t, h, w, depth] -> feature [bs, v]
        # LSTM
        def Demo_Encoder(s_h,
                         per,
                         seq_lengths,
                         scope='Demo_Encoder',
                         reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warning(scope.name)
                state_features = tf.reshape(
                    State_Encoder(tf.reshape(s_h, [-1, self.h, self.w, depth]),
                                  tf.reshape(per, [-1, self.per_dim]),
                                  self.batch_size * max_demo_len,
                                  reuse=reuse),
                    [self.batch_size, max_demo_len, -1])
                if self.encoder_rnn_type == 'bilstm':
                    fcell = rnn.BasicLSTMCell(num_units=math.ceil(
                        self.num_lstm_cell_units),
                                              state_is_tuple=True)
                    bcell = rnn.BasicLSTMCell(num_units=math.floor(
                        self.num_lstm_cell_units),
                                              state_is_tuple=True)
                    new_h, cell_state = tf.nn.bidirectional_dynamic_rnn(
                        fcell,
                        bcell,
                        state_features,
                        sequence_length=seq_lengths,
                        dtype=tf.float32)
                    new_h = tf.reduce_sum(tf.stack(new_h, axis=2), axis=2)
                    cell_state = rnn.LSTMStateTuple(
                        tf.reduce_sum(tf.stack([cs.c for cs in cell_state],
                                               axis=1),
                                      axis=1),
                        tf.reduce_sum(tf.stack([cs.h for cs in cell_state],
                                               axis=1),
                                      axis=1))
                elif self.encoder_rnn_type == 'lstm':
                    cell = rnn.BasicLSTMCell(
                        num_units=self.num_lstm_cell_units,
                        state_is_tuple=True)
                    new_h, cell_state = tf.nn.dynamic_rnn(
                        cell=cell,
                        dtype=tf.float32,
                        sequence_length=seq_lengths,
                        inputs=state_features)
                elif self.encoder_rnn_type == 'rnn':
                    cell = rnn.BasicRNNCell(num_units=self.num_lstm_cell_units)
                    new_h, cell_state = tf.nn.dynamic_rnn(
                        cell=cell,
                        dtype=tf.float32,
                        sequence_length=seq_lengths,
                        inputs=state_features)
                elif self.encoder_rnn_type == 'gru':
                    cell = rnn.GRUCell(num_units=self.num_lstm_cell_units)
                    new_h, cell_state = tf.nn.dynamic_rnn(
                        cell=cell,
                        dtype=tf.float32,
                        sequence_length=seq_lengths,
                        inputs=state_features)
                else:
                    raise ValueError('Unknown encoder rnn type')

                if self.concat_state_feature_direct_prediction:
                    all_states = tf.concat([new_h, state_features], axis=-1)
                else:
                    all_states = new_h
                return all_states, cell_state.h, cell_state.c

        # program token [bs, len] -> embedded tokens [len] list of [bs, dim]
        # tensors
        # Embedding
        def Token_Embedding(token_dim,
                            embedding_dim,
                            scope='Token_Embedding',
                            reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warning(scope.name)
                # We add token_dim + 1, to use this tokens as a starting token
                # <s>
                embedding_map = tf.get_variable(
                    name="embedding_map",
                    shape=[token_dim + 1, embedding_dim],
                    initializer=tf.random_uniform_initializer(minval=-0.01,
                                                              maxval=0.01))

                def embedding_lookup(t):
                    embedding = tf.nn.embedding_lookup(embedding_map, t)
                    return embedding

                return embedding_lookup

        # program token feature [bs, u] -> program token [bs, dim_program_token]
        # MLP
        def Token_Decoder(f, token_dim, scope='Token_Decoder', reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warning(scope.name)
                _ = fc(f,
                       token_dim,
                       is_train,
                       info=not reuse,
                       batch_norm=False,
                       activation_fn=None,
                       name='fc1')
                return _

        # Input {{{
        # =========
        # test_k list of [bs, ac, max_demo_len - 1] tensor
        self.gt_test_actions_onehot = [
            single_test_a_h for single_test_a_h in tf.unstack(
                tf.transpose(self.test_a_h, [0, 1, 3, 2]), axis=1)
        ]
        # test_k list of [bs, max_demo_len - 1] tensor
        self.gt_test_actions_tokens = [
            single_test_a_h_token
            for single_test_a_h_token in tf.unstack(self.test_a_h_tokens,
                                                    axis=1)
        ]

        # a_h = self.a_h
        # }}}

        # Graph {{{
        # =========
        # Demo -> Demo feature
        demo_h_list = []
        demo_c_list = []
        demo_feature_history_list = []
        for i in range(self.k):
            demo_feature_history, demo_h, demo_c = \
                Demo_Encoder(s_h[:, i], self.per[:, i],
                             demo_len[:, i], reuse=i > 0)
            demo_feature_history_list.append(demo_feature_history)
            demo_h_list.append(demo_h)
            demo_c_list.append(demo_c)
            if i == 0: log.warning(demo_feature_history)
        demo_h_stack = tf.stack(demo_h_list, axis=1)  # [bs, k, v]
        demo_c_stack = tf.stack(demo_c_list, axis=1)  # [bs, k, v]
        if self.demo_aggregation == 'concat':  # [bs, k*v]
            demo_h_summary = tf.reshape(demo_h_stack, [self.batch_size, -1])
            demo_c_summary = tf.reshape(demo_c_stack, [self.batch_size, -1])
        elif self.demo_aggregation == 'avgpool':  # [bs, v]
            demo_h_summary = tf.reduce_mean(demo_h_stack, axis=1)
            demo_c_summary = tf.reduce_mean(demo_c_stack, axis=1)
        elif self.demo_aggregation == 'maxpool':  # [bs, v]
            demo_h_summary = tf.squeeze(tf.layers.max_pooling1d(
                demo_h_stack,
                demo_h_stack.get_shape().as_list()[1],
                1,
                padding='valid',
                data_format='channels_last'),
                                        axis=1)
            demo_c_summary = tf.squeeze(tf.layers.max_pooling1d(
                demo_c_stack,
                demo_c_stack.get_shape().as_list()[1],
                1,
                padding='valid',
                data_format='channels_last'),
                                        axis=1)
        else:
            raise ValueError('Unknown demo aggregation type')

        def get_DecoderHelper(embedding_lookup,
                              seq_lengths,
                              token_dim,
                              gt_tokens=None,
                              unroll_type='teacher_forcing'):
            if unroll_type == 'teacher_forcing':
                if gt_tokens is None:
                    raise ValueError('teacher_forcing requires gt_tokens')
                embedding = embedding_lookup(gt_tokens)
                helper = seq2seq.TrainingHelper(embedding, seq_lengths)
            elif unroll_type == 'scheduled_sampling':
                if gt_tokens is None:
                    raise ValueError('scheduled_sampling requires gt_tokens')
                embedding = embedding_lookup(gt_tokens)
                # sample_prob 1.0: always sample from ground truth
                # sample_prob 0.0: always sample from prediction
                helper = seq2seq.ScheduledEmbeddingTrainingHelper(
                    embedding,
                    seq_lengths,
                    embedding_lookup,
                    1.0 - self.sample_prob,
                    seed=None,
                    scheduling_seed=None)
            elif unroll_type == 'greedy':
                # during evaluation, we perform greedy unrolling.
                start_token = tf.zeros([self.batch_size],
                                       dtype=tf.int32) + token_dim
                end_token = token_dim - 1
                helper = seq2seq.GreedyEmbeddingHelper(embedding_lookup,
                                                       start_token, end_token)
            else:
                raise ValueError('Unknown unroll type')
            return helper

        def LSTM_Decoder(visual_h,
                         visual_c,
                         gt_tokens,
                         lstm_cell,
                         unroll_type='teacher_forcing',
                         seq_lengths=None,
                         max_sequence_len=10,
                         token_dim=50,
                         embedding_dim=128,
                         init_state=None,
                         scope='LSTM_Decoder',
                         reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warning(scope.name)
                # augmented embedding with token_dim + 1 (<s>) token
                s_token = tf.zeros([self.batch_size, 1],
                                   dtype=gt_tokens.dtype) + token_dim + 1
                gt_tokens = tf.concat([s_token, gt_tokens[:, :-1]], axis=1)

                embedding_lookup = Token_Embedding(token_dim,
                                                   embedding_dim,
                                                   reuse=reuse)

                # dynamic_decode implementation
                helper = get_DecoderHelper(embedding_lookup,
                                           seq_lengths,
                                           token_dim,
                                           gt_tokens=gt_tokens,
                                           unroll_type=unroll_type)
                projection_layer = layers_core.Dense(token_dim,
                                                     use_bias=False,
                                                     name="output_projection")
                if init_state is None:
                    init_state = rnn.LSTMStateTuple(visual_c, visual_h)
                decoder = seq2seq.BasicDecoder(lstm_cell,
                                               helper,
                                               init_state,
                                               output_layer=projection_layer)
                # pred_length [batch_size]: length of the predicted sequence
                outputs, final_context_state, pred_length = seq2seq.dynamic_decode(
                    decoder,
                    maximum_iterations=max_sequence_len,
                    scope='dynamic_decoder')
                pred_length = tf.expand_dims(pred_length, axis=1)

                # as dynamic_decode generate variable length sequence output,
                # we pad it dynamically to match input embedding shape.
                rnn_output = outputs.rnn_output
                sz = tf.shape(rnn_output)
                dynamic_pad = tf.zeros(
                    [sz[0], max_sequence_len - sz[1], sz[2]],
                    dtype=rnn_output.dtype)
                pred_seq = tf.concat([rnn_output, dynamic_pad], axis=1)
                seq_shape = pred_seq.get_shape().as_list()
                pred_seq.set_shape(
                    [seq_shape[0], max_sequence_len, seq_shape[2]])

                pred_seq = tf.transpose(
                    tf.reshape(pred_seq,
                               [self.batch_size, max_sequence_len, -1]),
                    [0, 2, 1])  # make_dim: [bs, n, len]
                return pred_seq, pred_length, final_context_state

        if self.scheduled_sampling:
            train_unroll_type = 'scheduled_sampling'
        else:
            train_unroll_type = 'teacher_forcing'

        # Attn
        lstm_cell = rnn.BasicLSTMCell(num_units=self.num_lstm_cell_units)
        attn_mechanisms = []
        for j in range(self.test_k):
            attn_mechanisms_k = []
            for i in range(self.k):
                with tf.variable_scope('AttnMechanism', reuse=i > 0 or j > 0):
                    if self.attn_type == 'luong':
                        attn_mechanism = seq2seq.LuongAttention(
                            self.num_lstm_cell_units,
                            demo_feature_history_list[i],
                            memory_sequence_length=self.demo_len[:, i])
                    elif self.attn_type == 'luong_monotonic':
                        attn_mechanism = seq2seq.LuongMonotonicAttention(
                            self.num_lstm_cell_units,
                            demo_feature_history_list[i],
                            memory_sequence_length=self.demo_len[:, i])
                    else:
                        raise ValueError('Unknown attention type')
                attn_mechanisms_k.append(attn_mechanism)
            attn_mechanisms.append(attn_mechanisms_k)

        self.attn_cells = []
        for i in range(self.test_k):
            attn_cell = PoolingAttentionWrapper(
                lstm_cell,
                attn_mechanisms[i],
                attention_layer_size=self.num_lstm_cell_units,
                alignment_history=True,
                output_attention=True,
                pooling='avgpool')
            self.attn_cells.append(attn_cell)

        # Demo + current state -> action
        self.pred_action_list = []
        self.greedy_pred_action_list = []
        self.greedy_pred_action_len_list = []
        for i in range(self.test_k):
            attn_init_state = self.attn_cells[i].zero_state(
                self.batch_size,
                dtype=tf.float32).clone(cell_state=rnn.LSTMStateTuple(
                    demo_h_summary, demo_c_summary))
            embedding_dim = demo_h_summary.get_shape().as_list()[-1]
            pred_action, pred_action_len, action_state = LSTM_Decoder(
                demo_h_summary,
                demo_c_summary,
                self.gt_test_actions_tokens[i],
                self.attn_cells[i],
                unroll_type=train_unroll_type,
                seq_lengths=self.test_action_len[:, i],
                max_sequence_len=self.max_action_len,
                token_dim=self.action_space,
                embedding_dim=embedding_dim,
                init_state=attn_init_state,
                scope='Manipulation',
                reuse=i > 0)
            assert pred_action.get_shape() == \
                self.gt_test_actions_onehot[i].get_shape()
            self.pred_action_list.append(pred_action)

            greedy_attn_init_state = self.attn_cells[i].zero_state(
                self.batch_size,
                dtype=tf.float32).clone(cell_state=rnn.LSTMStateTuple(
                    demo_h_summary, demo_c_summary))
            greedy_pred_action, greedy_pred_action_len, \
                greedy_action_state = LSTM_Decoder(
                    demo_h_summary, demo_c_summary, self.gt_test_actions_tokens[i],
                    self.attn_cells[i], unroll_type='greedy',
                    seq_lengths=self.test_action_len[:, i],
                    max_sequence_len=self.max_action_len,
                    token_dim=self.action_space,
                    embedding_dim=embedding_dim,
                    init_state=greedy_attn_init_state,
                    scope='Manipulation', reuse=True
                )
            assert greedy_pred_action.get_shape() == \
                self.gt_test_actions_onehot[i].get_shape()
            self.greedy_pred_action_list.append(greedy_pred_action)
            self.greedy_pred_action_len_list.append(greedy_pred_action_len)
        # }}}

        # Build losses {{{
        # ================
        def Sequence_Loss(pred_sequence,
                          gt_sequence,
                          pred_sequence_lengths=None,
                          gt_sequence_lengths=None,
                          max_sequence_len=None,
                          token_dim=None,
                          sequence_type='program',
                          name=None):
            with tf.name_scope(name, "SequenceOutput") as scope:
                log.warning(scope)
                max_sequence_lengths = tf.maximum(pred_sequence_lengths,
                                                  gt_sequence_lengths)
                min_sequence_lengths = tf.minimum(pred_sequence_lengths,
                                                  gt_sequence_lengths)
                gt_mask = tf.sequence_mask(gt_sequence_lengths[:, 0],
                                           max_sequence_len,
                                           dtype=tf.float32,
                                           name='mask')
                max_mask = tf.sequence_mask(max_sequence_lengths[:, 0],
                                            max_sequence_len,
                                            dtype=tf.float32,
                                            name='max_mask')
                min_mask = tf.sequence_mask(min_sequence_lengths[:, 0],
                                            max_sequence_len,
                                            dtype=tf.float32,
                                            name='min_mask')
                labels = tf.reshape(
                    tf.transpose(gt_sequence, [0, 2, 1]),
                    [self.batch_size * max_sequence_len, token_dim])
                logits = tf.reshape(
                    tf.transpose(pred_sequence, [0, 2, 1]),
                    [self.batch_size * max_sequence_len, token_dim])
                # [bs, max_program_len]
                cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
                    labels=labels, logits=logits)
                # normalize loss
                loss = tf.reduce_sum(cross_entropy * tf.reshape(gt_mask, [-1])) / \
                    tf.reduce_sum(gt_mask)
                output = [gt_sequence, pred_sequence]

                label_argmax = tf.argmax(labels, axis=-1)
                logit_argmax = tf.argmax(logits, axis=-1)

                # accuracy
                # token level acc
                correct_token_pred = tf.reduce_sum(
                    tf.to_float(tf.equal(label_argmax, logit_argmax)) *
                    tf.reshape(min_mask, [-1]))
                token_accuracy = correct_token_pred / tf.reduce_sum(max_mask)
                # seq level acc
                seq_equal = tf.equal(
                    tf.reshape(
                        tf.to_float(label_argmax) * tf.reshape(gt_mask, [-1]),
                        [self.batch_size, -1]),
                    tf.reshape(
                        tf.to_float(logit_argmax) * tf.reshape(gt_mask, [-1]),
                        [self.batch_size, -1]))
                len_equal = tf.equal(gt_sequence_lengths[:, 0],
                                     pred_sequence_lengths[:, 0])
                is_same_seq = tf.logical_and(tf.reduce_all(seq_equal, axis=-1),
                                             len_equal)
                seq_accuracy = tf.reduce_sum(
                    tf.to_float(is_same_seq)) / self.batch_size

                pred_tokens = None
                syntax_accuracy = None
                is_correct_syntax = None

                output_stat = SequenceLossOutput(
                    mask=gt_mask,
                    loss=loss,
                    output=output,
                    token_acc=token_accuracy,
                    seq_acc=seq_accuracy,
                    syntax_acc=syntax_accuracy,
                    is_correct_syntax=is_correct_syntax,
                    pred_tokens=pred_tokens,
                    is_same_seq=is_same_seq,
                )

                return output_stat

        self.loss = 0
        self.output = []

        # Manipulation network loss
        avg_action_loss = 0
        avg_action_token_acc = 0
        avg_action_seq_acc = 0
        seq_match = []
        for i in range(self.test_k):
            action_stat = Sequence_Loss(
                self.pred_action_list[i],
                self.gt_test_actions_onehot[i],
                pred_sequence_lengths=tf.expand_dims(self.test_action_len[:,
                                                                          i],
                                                     axis=1),
                gt_sequence_lengths=tf.expand_dims(self.test_action_len[:, i],
                                                   axis=1),
                max_sequence_len=self.max_action_len,
                token_dim=self.action_space,
                sequence_type='action',
                name="Action_Sequence_Loss_{}".format(i))
            avg_action_loss += action_stat.loss
            avg_action_token_acc += action_stat.token_acc
            avg_action_seq_acc += action_stat.seq_acc
            seq_match.append(action_stat.is_same_seq)
            self.output.extend(action_stat.output)
        avg_action_loss /= self.test_k
        avg_action_token_acc /= self.test_k
        avg_action_seq_acc /= self.test_k
        avg_action_seq_all_acc = tf.reduce_sum(
            tf.to_float(tf.reduce_all(tf.stack(seq_match, axis=1),
                                      axis=-1))) / self.batch_size
        self.loss += avg_action_loss

        greedy_avg_action_loss = 0
        greedy_avg_action_token_acc = 0
        greedy_avg_action_seq_acc = 0
        greedy_seq_match = []
        for i in range(self.test_k):
            greedy_action_stat = Sequence_Loss(
                self.greedy_pred_action_list[i],
                self.gt_test_actions_onehot[i],
                pred_sequence_lengths=self.greedy_pred_action_len_list[i],
                gt_sequence_lengths=tf.expand_dims(self.test_action_len[:, i],
                                                   axis=1),
                max_sequence_len=self.max_action_len,
                token_dim=self.action_space,
                sequence_type='action',
                name="Greedy_Action_Sequence_Loss_{}".format(i))
            greedy_avg_action_loss += greedy_action_stat.loss
            greedy_avg_action_token_acc += greedy_action_stat.token_acc
            greedy_avg_action_seq_acc += greedy_action_stat.seq_acc
            greedy_seq_match.append(greedy_action_stat.is_same_seq)
        greedy_avg_action_loss /= self.test_k
        greedy_avg_action_token_acc /= self.test_k
        greedy_avg_action_seq_acc /= self.test_k
        greedy_avg_action_seq_all_acc = tf.reduce_sum(
            tf.to_float(
                tf.reduce_all(tf.stack(greedy_seq_match, axis=1),
                              axis=-1))) / self.batch_size
        # }}}

        # Evalutaion {{{
        # ==============
        self.report_loss = {}
        self.report_accuracy = {}
        self.report_hist = {}
        self.report_loss['avg_action_loss'] = avg_action_loss
        self.report_accuracy['avg_action_token_acc'] = avg_action_token_acc
        self.report_accuracy['avg_action_seq_acc'] = avg_action_seq_acc
        self.report_accuracy['avg_action_seq_all_acc'] = avg_action_seq_all_acc
        self.report_loss['greedy_avg_action_loss'] = greedy_avg_action_loss
        self.report_accuracy['greedy_avg_action_token_acc'] = \
            greedy_avg_action_token_acc
        self.report_accuracy['greedy_avg_action_seq_acc'] = \
            greedy_avg_action_seq_acc
        self.report_accuracy['greedy_avg_action_seq_all_acc'] = \
            greedy_avg_action_seq_all_acc
        self.report_output = []
        # dummy fetch values for evaler
        self.ground_truth_program = self.program
        self.pred_program = []
        self.greedy_pred_program = []
        self.greedy_pred_program_len = []
        self.greedy_program_is_correct_syntax = []
        self.program_is_correct_syntax = []
        self.program_num_execution_correct = []
        self.program_is_correct_execution = []
        self.greedy_num_execution_correct = []
        self.greedy_is_correct_execution = []

        #

        # Tensorboard Summary {{{
        # =======================
        # Loss
        def train_test_scalar_summary(name, value):
            tf.summary.scalar(name, value, collections=['train'])
            tf.summary.scalar("test_{}".format(name),
                              value,
                              collections=['test'])

        train_test_scalar_summary("loss/loss", self.loss)

        if self.scheduled_sampling:
            train_test_scalar_summary("loss/sample_prob", self.sample_prob)
        train_test_scalar_summary("loss/avg_action_loss", avg_action_loss)
        train_test_scalar_summary("loss/avg_action_token_acc",
                                  avg_action_token_acc)
        train_test_scalar_summary("loss/avg_action_seq_acc",
                                  avg_action_seq_acc)
        train_test_scalar_summary("loss/avg_action_seq_all_acc",
                                  avg_action_seq_all_acc)
        tf.summary.scalar("test_loss/greedy_avg_action_loss",
                          greedy_avg_action_loss,
                          collections=['test'])
        tf.summary.scalar("test_loss/greedy_avg_action_token_acc",
                          greedy_avg_action_token_acc,
                          collections=['test'])
        tf.summary.scalar("test_loss/greedy_avg_action_seq_acc",
                          greedy_avg_action_seq_acc,
                          collections=['test'])
        tf.summary.scalar("test_loss/greedy_avg_action_seq_all_acc",
                          greedy_avg_action_seq_all_acc,
                          collections=['test'])

        def program2str(p_token, p_len):
            program_str = []
            for i in range(self.batch_size):
                program_str.append(
                    self.vocab.intseq2str(
                        np.argmax(p_token[i], axis=0)[:p_len[i, 0]]))
            program_str = np.stack(program_str, axis=0)
            return program_str

        tf.summary.text('program_id/id',
                        self.program_id,
                        collections=['train'])
        tf.summary.text('program/ground_truth',
                        tf.py_func(program2str,
                                   [self.program, self.program_len],
                                   tf.string),
                        collections=['train'])
        tf.summary.text('test_program_id/id',
                        self.program_id,
                        collections=['test'])
        tf.summary.text('test_program/ground_truth',
                        tf.py_func(program2str,
                                   [self.program, self.program_len],
                                   tf.string),
                        collections=['test'])

        # Visualization
        def visualized_map(pred, gt):
            dummy = tf.expand_dims(tf.zeros_like(pred), axis=-1)
            pred = tf.expand_dims(tf.nn.softmax(pred, dim=1), axis=-1)
            gt = tf.expand_dims(gt, axis=-1)
            return tf.concat([pred, gt, dummy], axis=-1)

        # Attention visualization
        def build_alignments(alignment_history):
            alignments = []
            for i in alignment_history:
                align = tf.expand_dims(tf.transpose(i.stack(), [1, 2, 0]),
                                       -1) * 255
                align_shape = tf.shape(align)
                alignments.append(align)
                alignments.append(
                    tf.zeros([align_shape[0], 1, align_shape[2], 1],
                             dtype=tf.float32) + 255)
            alignments_image = tf.reshape(
                tf.tile(tf.concat(alignments, axis=1), [1, 1, 1, self.k]),
                [align_shape[0], -1, align_shape[2] * self.k, 1])
            return alignments_image

        alignments = build_alignments(action_state.alignment_history)
        tf.summary.image("attn", alignments, collections=['train'])
        tf.summary.image("test_attn", alignments, collections=['test'])

        greedy_alignments = build_alignments(
            greedy_action_state.alignment_history)
        tf.summary.image("test_greedy_attn",
                         greedy_alignments,
                         collections=['test'])

        if self.pixel_input:
            tf.summary.image("state/initial_state",
                             self.s_h[:, 0, 0, :, :, :],
                             collections=['train'])
            tf.summary.image("state/demo_program_1",
                             self.s_h[0, 0, :, :, :, :],
                             max_outputs=self.max_demo_len,
                             collections=['train'])

        i = 0  # show only the first demo (among k)
        tf.summary.image("visualized_action/k_{}".format(i),
                         visualized_map(self.pred_action_list[i],
                                        self.gt_test_actions_onehot[i]),
                         collections=['train'])
        tf.summary.image("test_visualized_action/k_{}".format(i),
                         visualized_map(self.pred_action_list[i],
                                        self.gt_test_actions_onehot[i]),
                         collections=['test'])
        tf.summary.image("test_visualized_greedy_action/k_{}".format(i),
                         visualized_map(self.greedy_pred_action_list[i],
                                        self.gt_test_actions_onehot[i]),
                         collections=['test'])

        # Visualize demo features
        if self.debug:
            i = 0  # show only the first images
            tf.summary.image("debug/demo_feature_history/k_{}".format(i),
                             tf.image.grayscale_to_rgb(
                                 tf.expand_dims(demo_feature_history_list[i],
                                                -1)),
                             collections=['train'])
        # }}}
        print('\033[93mSuccessfully loaded the model.\033[0m')
예제 #26
0
    def BuildNetwork(self, learningRate):
        #############################################################################
        # Input Data
        #############################################################################

        self.dataInput = tensorflow.placeholder(
            dtype=tensorflow.float32,
            shape=[None, None, self.featureShape],
            name='dataInput')
        self.dataLenInput = tensorflow.placeholder(dtype=tensorflow.int32,
                                                   shape=[None],
                                                   name='dataLenInput')

        self.labelInputSR = tensorflow.placeholder(dtype=tensorflow.int32,
                                                   shape=[None, None],
                                                   name='labelInput')
        self.labelLenInputSR = tensorflow.placeholder(dtype=tensorflow.int32,
                                                      shape=[None],
                                                      name='labelLenInput')

        self.labelInputDR = tensorflow.placeholder(dtype=tensorflow.float32,
                                                   shape=None,
                                                   name='labelInputDR')

        self.sentenceDataInput = tensorflow.placeholder(
            dtype=tensorflow.float32,
            shape=[None, 2 * self.hiddenNodules],
            name='sentenceDataInput')
        self.speechDataInput = tensorflow.placeholder(
            dtype=tensorflow.float32,
            shape=[1, 2 * self.hiddenNodules],
            name='speechDataInput')

        #############################################################################
        # Batch Parameters
        #############################################################################

        self.parameters['BatchSize'], self.parameters[
            'TimeStep'], _ = tensorflow.unstack(
                tensorflow.shape(input=self.dataInput, name='DataShape'))
        self.parameters['LabelStep'] = tensorflow.shape(
            input=self.labelInputSR, name='LabelShape')[1]

        ###################################################################################################
        # Encoder
        ###################################################################################################

        with tensorflow.variable_scope('Encoder'):
            self.parameters[
                'Encoder_Cell_Forward'] = tensorflow.nn.rnn_cell.MultiRNNCell(
                    cells=[
                        rnn.LSTMCell(num_units=self.hiddenNodules)
                        for _ in range(self.rnnLayers)
                    ],
                    state_is_tuple=True)
            self.parameters[
                'Encoder_Cell_Backward'] = tensorflow.nn.rnn_cell.MultiRNNCell(
                    cells=[
                        rnn.LSTMCell(num_units=self.hiddenNodules)
                        for _ in range(self.rnnLayers)
                    ],
                    state_is_tuple=True)

            self.parameters['Encoder_Output'], self.parameters['Encoder_FinalState'] = \
                tensorflow.nn.bidirectional_dynamic_rnn(
                    cell_fw=self.parameters['Encoder_Cell_Forward'], cell_bw=self.parameters['Encoder_Cell_Backward'],
                    inputs=self.dataInput, sequence_length=self.dataLenInput, dtype=tensorflow.float32)

        self.attentionList = self.firstAttention(
            dataInput=self.parameters['Encoder_Output'],
            scopeName=self.firstAttentionName,
            hiddenNoduleNumber=2 * self.hiddenNodules,
            attentionScope=self.firstAttentionScope,
            blstmFlag=True)
        self.parameters['Decoder_InitalState'] = []
        for index in range(self.rnnLayers):
            self.parameters[
                'Encoder_Cell_Layer%d' % index] = rnn.LSTMStateTuple(
                    c=self.attentionList['FinalResult'],
                    h=tensorflow.concat([
                        self.parameters['Encoder_FinalState'][index][0].h,
                        self.parameters['Encoder_FinalState'][index][1].h
                    ],
                                        axis=1))
            self.parameters['Decoder_InitalState'].append(
                self.parameters['Encoder_Cell_Layer%d' % index])
        self.parameters['Decoder_InitalState'] = tuple(
            self.parameters['Decoder_InitalState'])

        #############################################################################
        # Decoder Label Pretreatment
        #############################################################################

        self.parameters['DecoderEmbedding'] = tensorflow.Variable(
            initial_value=tensorflow.truncated_normal(
                shape=[VOCABULAR, self.hiddenNodules * 2],
                stddev=0.1,
                name='DecoderEmbedding'))

        self.parameters[
            'DecoderEmbeddingResult'] = tensorflow.nn.embedding_lookup(
                params=self.parameters['DecoderEmbedding'],
                ids=self.labelInputSR,
                name='DecoderEmbeddingResult')

        #############################################################################
        # Decoder
        #############################################################################

        self.parameters['Decoder_Helper'] = seq2seq.TrainingHelper(
            inputs=self.parameters['DecoderEmbeddingResult'],
            sequence_length=self.labelLenInputSR,
            name='Decoder_Helper')
        with tensorflow.variable_scope('Decoder'):
            self.parameters['Decoder_FC'] = Dense(VOCABULAR)

            self.parameters[
                'Decoder_Cell'] = tensorflow.nn.rnn_cell.MultiRNNCell(
                    cells=[
                        rnn.LSTMCell(num_units=self.hiddenNodules * 2)
                        for _ in range(self.rnnLayers)
                    ],
                    state_is_tuple=True)

            self.parameters['Decoder'] = seq2seq.BasicDecoder(
                cell=self.parameters['Decoder_Cell'],
                helper=self.parameters['Decoder_Helper'],
                initial_state=self.parameters['Decoder_InitalState'],
                output_layer=self.parameters['Decoder_FC'])

            self.parameters['Decoder_Logits'], self.parameters[
                'Decoder_FinalState'], self.parameters[
                    'Decoder_FinalSeq'] = seq2seq.dynamic_decode(
                        decoder=self.parameters['Decoder'])

        with tensorflow.name_scope('Loss'):
            self.parameters['TargetsReshape'] = tensorflow.reshape(
                tensor=self.labelInputSR, shape=[-1], name='TargetsReshape')
            self.parameters['Decoder_Reshape'] = tensorflow.reshape(
                self.parameters['Decoder_Logits'].rnn_output, [-1, VOCABULAR],
                name='Decoder_Reshape')
            self.parameters[
                'Cost'] = tensorflow.losses.sparse_softmax_cross_entropy(
                    labels=self.parameters['TargetsReshape'],
                    logits=self.parameters['Decoder_Reshape'])

            self.trainEncoderDecoder = tensorflow.train.AdamOptimizer(
                learning_rate=learningRate).minimize(self.parameters['Cost'])

        #############################################################################
        self.DBLSTM_Structure(learningRate=learningRate)
예제 #27
0
        def LSTM_Decoder(visual_h,
                         visual_c,
                         gt_tokens,
                         lstm_cell,
                         unroll_type='teacher_forcing',
                         seq_lengths=None,
                         max_sequence_len=10,
                         token_dim=50,
                         embedding_dim=128,
                         init_state=None,
                         scope='LSTM_Decoder',
                         reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warning(scope.name)
                # augmented embedding with token_dim + 1 (<s>) token
                s_token = tf.zeros([self.batch_size, 1],
                                   dtype=gt_tokens.dtype) + token_dim + 1
                gt_tokens = tf.concat([s_token, gt_tokens[:, :-1]], axis=1)

                embedding_lookup = Token_Embedding(token_dim,
                                                   embedding_dim,
                                                   reuse=reuse)

                # dynamic_decode implementation
                helper = get_DecoderHelper(embedding_lookup,
                                           seq_lengths,
                                           token_dim,
                                           gt_tokens=gt_tokens,
                                           unroll_type=unroll_type)
                projection_layer = layers_core.Dense(token_dim,
                                                     use_bias=False,
                                                     name="output_projection")
                if init_state is None:
                    init_state = rnn.LSTMStateTuple(visual_c, visual_h)
                decoder = seq2seq.BasicDecoder(lstm_cell,
                                               helper,
                                               init_state,
                                               output_layer=projection_layer)
                # pred_length [batch_size]: length of the predicted sequence
                outputs, final_context_state, pred_length = seq2seq.dynamic_decode(
                    decoder,
                    maximum_iterations=max_sequence_len,
                    scope='dynamic_decoder')
                pred_length = tf.expand_dims(pred_length, axis=1)

                # as dynamic_decode generate variable length sequence output,
                # we pad it dynamically to match input embedding shape.
                rnn_output = outputs.rnn_output
                sz = tf.shape(rnn_output)
                dynamic_pad = tf.zeros(
                    [sz[0], max_sequence_len - sz[1], sz[2]],
                    dtype=rnn_output.dtype)
                pred_seq = tf.concat([rnn_output, dynamic_pad], axis=1)
                seq_shape = pred_seq.get_shape().as_list()
                pred_seq.set_shape(
                    [seq_shape[0], max_sequence_len, seq_shape[2]])

                pred_seq = tf.transpose(
                    tf.reshape(pred_seq,
                               [self.batch_size, max_sequence_len, -1]),
                    [0, 2, 1])  # make_dim: [bs, n, len]
                return pred_seq, pred_length, final_context_state
    def __init__(self, asset_num, info_num):
        action_size = asset_num + 1
        #BasicACNetwork.__init__(self, action_size, thread_index)
        with tf.variable_scope('Direct_Sharing_LSTM_ACNetwork') as vs0:
            with tf.variable_scope('placeholders') as vs:
                # s is the information of first (minutes_of_aday-1) steps, for the last information is actually valueless
                # s in shape [steps, len = asset_num * info_num]
                self.s = tf.placeholder(tf.float32,
                                        [None, asset_num * info_num])
                # first_price is the price of the first minute
                self.first_price = tf.placeholder(tf.float32, [1, asset_num])
                # the other (minutes_of_aday-1) minutes prices of that day
                # in shape [steps, asset_num]
                self.next_price = tf.placeholder(tf.float32, [None, asset_num])

            with tf.variable_scope('LSTM1') as vs:
                # lstm1_in in shape [batch, steps, len], where batch=1
                lstm1_in = tf.expand_dims(self.s, [0])
                # lstm1_in_split[i] in shape [batch, steps, info_num], where batch=1
                self.lstm1_in_split = tf.split(lstm1_in, asset_num, axis=2)
                lstm1_cell = rnn.BasicLSTMCell(num_units=args.lstm1_unit,
                                               state_is_tuple=True)
                # asset_num inputs using the same variable
                # but the state are independent
                # init state, constant
                self.lstm1_c_in = tf.constant(
                    0,
                    dtype=tf.float32,
                    shape=[asset_num, lstm1_cell.state_size.c])
                self.lstm1_h_in = tf.constant(
                    0,
                    dtype=tf.float32,
                    shape=[asset_num, lstm1_cell.state_size.h])
                # a list of tensors
                # correspond to different asset
                lstm1_c_in_split = tf.split(self.lstm1_c_in, asset_num, axis=0)
                lstm1_h_in_split = tf.split(self.lstm1_h_in, asset_num, axis=0)
                self.lstm1_c_split = []  # no use
                self.lstm1_h_split = []  # nouse
                self.lstm1_output_split = []

                for iAsset in range(asset_num):
                    # every time call dynamic_rnn, will redefine the variables, so use reuse_variables to implement share variables
                    if iAsset > 0:
                        tf.get_variable_scope().reuse_variables()
                    c_in_i = lstm1_c_in_split[iAsset]
                    h_in_i = lstm1_h_in_split[iAsset]
                    # concat c and h into a statetuple
                    statei_tuple = rnn.LSTMStateTuple(c_in_i, h_in_i)
                    # lstm1_outputi in shape [1, steps, lstm1_unit]
                    # lstm1_statetuplei in shape [(lstm1_c, lstm1_h)]
                    lstm1_outputi, lstm1_statetuplei = tf.nn.dynamic_rnn(
                        lstm1_cell,
                        inputs=self.lstm1_in_split[iAsset],
                        initial_state=statei_tuple,
                        time_major=False)
                    # the index of list is the num of asset
                    # lstm1_output_split shape [asset_num, lstm1_outputi]
                    # lstm1_outputi in shape [1, steps, lstm1_unit]
                    self.lstm1_c_split.append(lstm1_statetuplei[0])  # no use
                    self.lstm1_h_split.append(lstm1_statetuplei[1])  # nouse
                    self.lstm1_output_split.append(lstm1_outputi)
                # concat states
                # self.lstm1_c and self.lstm1_h are operators, not value
                self.lstm1_c = tf.concat(self.lstm1_c_split, 0)  # no use
                self.lstm1_h = tf.concat(self.lstm1_h_split, 0)  # no use

                # lstm1_outpust in shape [1, steps, lstm1_unit * asset_num]
                self.lstm1_outputs = tf.concat(self.lstm1_output_split, 2)
                # lstm1_outputs in shape [steps, lstm1_unit * asset_num]
                self.lstm1_outputs = tf.reshape(
                    self.lstm1_outputs, [-1, args.lstm1_unit * asset_num])

            with tf.variable_scope('Allocation_RNN') as vs:
                allo_rnn = direct_allocation_RNNCell(
                    args.lstm1_unit * asset_num, asset_num)
                '''
                inputs: [this_price, this_LSTM1_info]
                outputs: [this_action's_logreward, this_action(allocation)]
                states: [last_price, last_allocation](for calculate reward)
                init_state: [first_price, init_allocation]
                '''

                # init allocation as [[0,0...0,1]]
                self.allo_init = tf.concat([
                    tf.constant(0, dtype=tf.float32, shape=[1, asset_num]),
                    tf.constant(1, dtype=tf.float32, shape=[1, 1])
                ],
                                           axis=1)
                allo_rnn_state_init = tf.concat(
                    [self.first_price, self.allo_init], axis=1)
                allo_rnn_input = tf.concat(
                    [self.next_price, self.lstm1_outputs], axis=1)
                allo_rnn_input = tf.expand_dims(allo_rnn_input, [0])

                self.allo_rnn_output, self.allo_rnn_state = tf.nn.dynamic_rnn(
                    allo_rnn,
                    inputs=allo_rnn_input,
                    initial_state=allo_rnn_state_init,
                    time_major=False)
                self.logrewards, self.actions = tf.split(self.allo_rnn_output,
                                                         [1, asset_num + 1],
                                                         axis=2)

            self.totallogreward = tf.reduce_sum(self.logrewards)
            self.totalreward = tf.exp(self.totallogreward)

            self.vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          vs0.name)
예제 #29
0
    def inference(self):

        with tf.variable_scope('batch_lstm') as vs_batch_lstm:
            # input shape is (step_size, feature_num * station_num)
            self.input = tf.placeholder(tf.float32,
                                        shape=(None, args.feature_num *
                                               args.station_num))
            # batch_input shape is (batch_size, step_size, feature_num * station_num) where batch_size is always 1
            self.batch_input = tf.expand_dims(self.input, axis=[0])
            self.input_split_list = tf.split(self.batch_input,
                                             args.station_num,
                                             axis=2)

            self.lstm_c_size = args.lstm_num_units
            self.lstm_h_size = args.lstm_num_units

            # lstm state shape is (batch_size, lstm_c_size or lstm_h_size) where lstm_c_size or lstm_h_size is always lstm_num_units
            self.lstm_c_in = tf.placeholder(tf.float32,
                                            shape=(args.station_num,
                                                   self.lstm_c_size))
            lstm_c_in_list = tf.split(self.lstm_c_in, args.station_num, axis=0)
            self.lstm_h_in = tf.placeholder(tf.float32,
                                            shape=(args.station_num,
                                                   self.lstm_h_size))
            lstm_h_in_list = tf.split(self.lstm_h_in, args.station_num, axis=0)

            lstm_output_list = []
            lstm_out_c_list = []
            lstm_out_h_list = []
            for i in range(args.station_num):
                with tf.variable_scope('share_lstm' + str(i)) as vs_share_lstm:
                    # if i > 0:
                    #     tf.get_variable_scope().reuse_variables()
                    lstm_cell = rnn.BasicLSTMCell(args.lstm_num_units,
                                                  state_is_tuple=True)
                    lstm_cell = rnn.DropoutWrapper(
                        lstm_cell,
                        input_keep_prob=args.lstm_input_prob,
                        output_keep_prob=args.lstm_output_prob,
                        state_keep_prob=args.lstm_state_prob)
                    lstm_state_in = rnn.LSTMStateTuple(lstm_c_in_list[i],
                                                       lstm_h_in_list[i])
                    # station_i_output shape is (batch_size, step_size, lstm_num_units]
                    # station_i_state is statetuple
                    station_i_output, station_i_state = tf.nn.dynamic_rnn(
                        lstm_cell,
                        self.input_split_list[i],
                        initial_state=lstm_state_in,
                        time_major=False)
                    # each station's private lstm network has one FC layer
                    # station_i_output_reshape shape is (step,lstm_num_units)
                    station_i_output_reshape = tf.reshape(
                        station_i_output, (-1, args.lstm_num_units))
                    lstm_out_c_list.append(
                        tf.reshape(station_i_state.c,
                                   (1, args.lstm_num_units)))
                    lstm_out_h_list.append(
                        tf.reshape(station_i_state.h, (
                            1,
                            args.lstm_num_units,
                        )))
                    # local FC_1 layer
                    local_fc_1_w, local_fc_1_b = self._get_fc_variable(
                        (args.lstm_num_units, args.local_fc_1_units_num))
                    tf.summary.histogram('local_fc_1_w', local_fc_1_w)
                    # BN and drop out
                    local_fc_1_output = tf.layers.batch_normalization(
                        tf.matmul(station_i_output_reshape, local_fc_1_w) +
                        local_fc_1_b)
                    local_fc_1_output = tf.nn.relu(local_fc_1_output)
                    local_fc_1_output = tf.nn.dropout(
                        local_fc_1_output, keep_prob=args.local_fc_1_prob)

                    # local FC_2 layer
                    local_fc_2_w, local_fc_2_b = self._get_fc_variable(
                        (args.local_fc_1_units_num, args.local_fc_2_units_num))
                    tf.summary.histogram('local_fc_2_w', local_fc_2_w)
                    local_fc_2_output = tf.layers.batch_normalization(
                        tf.matmul(local_fc_1_output, local_fc_2_w) +
                        local_fc_2_b)
                    local_fc_2_output = tf.nn.relu(local_fc_2_output)
                    local_fc_2_output = tf.nn.dropout(
                        local_fc_2_output, keep_prob=args.local_fc_2_prob)

                    lstm_output_list.append(local_fc_2_output)

            # concat the lstm output
            # lstm_output shape is (batch , station_num *  local_fc_2_units_num)
            lstm_output = tf.concat(lstm_output_list, axis=1)
            lstm_out_c = tf.concat(lstm_out_c_list, axis=0)
            lstm_out_h = tf.concat(lstm_out_h_list, axis=0)
            tf.summary.histogram('concat_lstm_fc_output', lstm_output)
            with tf.variable_scope('fc_layer_1') as vs_fc1:
                w1, b1 = self._get_fc_variable(
                    (args.station_num * args.local_fc_2_units_num,
                     args.fc_1_units_num))
                tf.summary.histogram('fc_1_w', w1)
                fc1_output = tf.matmul(lstm_output, w1) + b1
                fc1_output = tf.layers.batch_normalization(fc1_output)
            #     fc1_output = tf.nn.relu(fc1_output)
            #     fc1_output = tf.nn.dropout(fc1_output,keep_prob=args.fc1_prob)

            # with tf.variable_scope('fc_layer_2') as vs_fc2:
            #     w2,b2 = self._get_fc_variable((args.fc_1_units_num,args.station_num))
            #     tf.summary.histogram('fc_2_w',w2)
            #     fc2_output = tf.matmul(fc1_output,w2) + b2
            #     fc2_output = tf.layers.batch_normalization(fc2_output)

        return fc1_output, lstm_out_c, lstm_out_h
예제 #30
0
    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope
                               or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            if self._state_is_tuple:
                c, h = state
            else:
                c, h = array_ops.split(1, 2, state)
            concat = _linear([inputs, h], 4 * self._num_units, True, 0.,
                             self.weights_init, self.trainable, self.restore,
                             self.reuse)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = array_ops.split(1, 4, concat)

            # apply batch normalization to inner state and gates
            if self.batch_norm == True:
                i = batch_normalization(i,
                                        gamma=0.1,
                                        trainable=self.trainable,
                                        restore=self.restore,
                                        reuse=self.reuse)
                j = batch_normalization(j,
                                        gamma=0.1,
                                        trainable=self.trainable,
                                        restore=self.restore,
                                        reuse=self.reuse)
                f = batch_normalization(f,
                                        gamma=0.1,
                                        trainable=self.trainable,
                                        restore=self.restore,
                                        reuse=self.reuse)
                o = batch_normalization(o,
                                        gamma=0.1,
                                        trainable=self.trainable,
                                        restore=self.restore,
                                        reuse=self.reuse)

            new_c = (c * self._inner_activation(f + self._forget_bias) +
                     self._inner_activation(i) * self._activation(j))

            # hidden-to-hidden batch normalizaiton
            if self.batch_norm == True:
                batch_norm_new_c = batch_normalization(
                    new_c,
                    gamma=0.1,
                    trainable=self.trainable,
                    restore=self.restore,
                    reuse=self.reuse)
                new_h = self._activation(
                    batch_norm_new_c) * self._inner_activation(o)
            else:
                new_h = self._activation(new_c) * self._inner_activation(o)

            if self._state_is_tuple:
                new_state = _rnn_cell.LSTMStateTuple(new_c, new_h)
            else:
                new_state = array_ops.concat(1, [new_c, new_h])

            # Retrieve RNN Variables
            with tf.variable_scope('Linear', reuse=True):
                self.W = tf.get_variable('Matrix')
                self.b = tf.get_variable('Bias')

            return new_h, new_state