def encoder_decoder(self, inputdata):
        """ LSTM-based encoder-decoder module. """

        with tf.variable_scope('LSTMLayers'):
            [batch_size, width, _] = inputdata.get_shape().as_list()

            with tf.variable_scope('encoder'):
                forward_cells = []
                backward_cells = []

                for _ in range(2):
                    forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
                    backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))

                encoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
                    forward_cells, backward_cells, inputdata, dtype=tf.float32)

            with tf.variable_scope('decoder'):
                forward_cells = []
                backward_cells = []

                for _ in range(2):
                    forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))
                    backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))

                decoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
                    forward_cells, backward_cells, encoder_layer, dtype=tf.float32)

            rnn_reshaped = tf.reshape(decoder_layer, [batch_size * width, -1])

            logits = slim.fully_connected(rnn_reshaped, self.num_classes, activation_fn=None)
            logits = tf.reshape(logits, [batch_size, width, self.num_classes])
            rnn_out = tf.transpose(logits, (1, 0, 2))

        return rnn_out
def BiRNN_new(x,
              c,
              l,
              num_layers,
              num_hidden,
              meta_data,
              num_classes,
              timesteps,
              keep_prob,
              uncertainty,
              is_train=True,
              use_sequence_lengths=False,
              use_embedding=True,
              sequence_length=66,
              dict_size=32):

    if use_embedding:
        emb = tf.keras.layers.Embedding(dict_size,
                                        num_hidden,
                                        input_length=sequence_length)
        x = emb(x)

    num_layers = num_layers
    with tf.name_scope("birnn"):
        #lstm_fw_cell = [tf.contrib.rnn.DropoutWrapper(rnn.BasicLSTMCell(num_hidden), input_keep_prob = keep_prob) for _ in range(num_layers)]
        #lstm_bw_cell = [tf.contrib.rnn.DropoutWrapper(rnn.BasicLSTMCell(num_hidden), input_keep_prob = keep_prob) for _ in range(num_layers)]
        #lstm_fw_cell = [rnn.ConvLSTMCell(1,[66,32],128, (5,1)) for _ in range(num_layers)]
        #lstm_bw_cell = [rnn.ConvLSTMCell(1,[66,32],128, (5,1)) for _ in range(num_layers)]
        lstm_fw_cell = [
            rnn.BasicLSTMCell(num_hidden) for _ in range(num_layers)
        ]
        lstm_bw_cell = [
            rnn.BasicLSTMCell(num_hidden) for _ in range(num_layers)
        ]
        #

    if use_sequence_lengths:
        rnn_outputs_all, final_fw, final_bw = rnn.stack_bidirectional_dynamic_rnn(
            lstm_fw_cell, lstm_bw_cell, x, sequence_length=l, dtype=tf.float32)
    else:
        rnn_outputs_all, final_fw, final_bw = rnn.stack_bidirectional_dynamic_rnn(
            lstm_fw_cell, lstm_bw_cell, x, dtype=tf.float32)
        final_fw = final_fw[-1].h
        final_bw = final_bw[-1].h

    out = tf.concat([final_fw, final_bw], 1)
    print(out, final_bw, final_fw)
    feat = tf.concat([out, c], axis=1)

    #add another layer !
    l1 = tf.contrib.slim.fully_connected(feat, num_hidden)
    # l1 = tf.layers.dropout(l1, rate=keep_prob, training=is_train)

    # l2 = tf.contrib.slim.fully_connected(l1, num_hidden)
    preds = tf.contrib.slim.fully_connected(l1,
                                            num_classes,
                                            activation_fn=None)
    # maybe we need to return somethin for attention score
    return preds, l1, None, None, None, None
 def bi_gru(self, inputs, hidden_size, res_add=True):
     """build the bi-GRU network. Return the encoder represented vector.
     X_inputs: [batch_size, n_step]
     n_step: 句子的词数量;或者文档的句子数。
     outputs: [batch_size, n_step, hidden_size*2+embedding_size(if res_add)]
     """
     cells_fw = [self.gru_cell(hidden_size) for _ in range(1)]
     cells_bw = [self.gru_cell(hidden_size) for _ in range(1)]
     initial_states_fw = [
         cell_fw.zero_state(self.batch_size, tf.float32)
         for cell_fw in cells_fw
     ]
     initial_states_bw = [
         cell_bw.zero_state(self.batch_size, tf.float32)
         for cell_bw in cells_bw
     ]
     outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(
         cells_fw,
         cells_bw,
         inputs,
         initial_states_fw=initial_states_fw,
         initial_states_bw=initial_states_bw,
         dtype=tf.float32)
     if res_add:
         outputs = tf.concat([outputs, inputs], axis=2)
     return outputs
Example #4
0
def BiLSTM(x, is_train):
    # lstm_fw_cell = rnn.BasicLSTMCell(cfg.hidden_units_num)
    # lstm_bw_cell = rnn.BasicLSTMCell(cfg.hidden_units_num)
    stacked_lstm = []
    stacked_bw_lstm = []
    for i in range(1):
        lstm_cell = tf.contrib.rnn.BasicLSTMCell(cfg.hidden_units_num)
        if tensor_util.constant_value(is_train):
            lstm_cell = tf.contrib.rnn.DropoutWrapper(
                lstm_cell, output_keep_prob=cfg.dropout)
        lstm_cell_bw = tf.contrib.rnn.BasicLSTMCell(cfg.hidden_units_num)
        if tensor_util.constant_value(is_train):
            lstm_cell_bw = tf.contrib.rnn.DropoutWrapper(
                lstm_cell_bw, output_keep_prob=cfg.dropout)
        stacked_lstm.append(lstm_cell)
        stacked_bw_lstm.append(lstm_cell_bw)

    # 建立前向和后向的多层LSTM
    Gmcell = tf.contrib.rnn.MultiRNNCell(stacked_lstm)
    Gmcell_bw = tf.contrib.rnn.MultiRNNCell(stacked_bw_lstm)

    outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn([Gmcell], [Gmcell_bw],
                                                        x,
                                                        dtype=tf.float32)
    return outputs
    def _sequence_lstm(self, inputdata, name):
        """ Implements the sequence label part of the network

        :param inputdata:
        :param name:
        :return:
        """
        with tf.variable_scope(name_or_scope=name):
            # construct stack lstm rcnn layer
            # forward lstm cell
            fw_cell_list = [
                tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0)
                for nh in [self._hidden_nums] * self._layers_nums
            ]
            # Backward direction cells
            bw_cell_list = [
                tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0)
                for nh in [self._hidden_nums] * self._layers_nums
            ]

            stack_lstm, _, _ = rnn.stack_bidirectional_dynamic_rnn(
                fw_cell_list, bw_cell_list, inputdata, dtype=tf.float32)
            stack_lstm = self.dropout(inputdata=stack_lstm,
                                      keep_prob=0.5,
                                      is_training=self._is_training,
                                      name='sequence_drop_out')

        return stack_lstm
Example #6
0
    def BidirectionalLSTM(self, inputdata):
        """
        创建双向lstm网络
        :param inputdata:
        :return:
        """
        # 多层网络 论文实现为两层256个隐层单元
        # 前向cell
        fw_cell_list = [rnn.BasicLSTMCell(nh, forget_bias=1.0) for nh in [self.hidden_nums, self.hidden_nums]]
        # 后向 direction cells
        bw_cell_list = [rnn.BasicLSTMCell(nh, forget_bias=1.0) for nh in [self.hidden_nums, self.hidden_nums]]

        stack_lstm_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(fw_cell_list, bw_cell_list, inputdata, dtype=tf.float32)
        # 添加dropout层
        stack_lstm_layer = tf.nn.dropout(stack_lstm_layer, keep_prob=0.5, noise_shape=None, name=None)
        [batch_s, _, hidden_nums] = inputdata.get_shape().as_list()
        outputs = tf.reshape(stack_lstm_layer, [-1, hidden_nums])
        weights = tf.get_variable(name='W_out',
                                  shape=[hidden_nums, self.num_class],
                                  dtype=tf.float32,
                                  initializer=tf.glorot_uniform_initializer())  # tf.glorot_normal_initializer
        bias = tf.get_variable(name='b_out',
                               shape=[self.num_class],
                               dtype=tf.float32,
                               initializer=tf.constant_initializer())
        # 仿射投影到num_class上
        # 未进入到softmax之前的概率
        logits = tf.matmul(outputs, weights) + bias
        logits = tf.reshape(logits, [batch_s, -1, self.num_class])
        # 交换batch和轴 转置
        raw_pred = tf.argmax(tf.nn.softmax(logits), axis=2, name='raw_prediction')
        logits = tf.transpose(logits, (1, 0, 2))
        return logits, raw_pred
Example #7
0
def lstm_layer(inputs,
               batch_size,
               num_units,
               lengths=None,
               stack_size=1,
               use_cudnn=False,
               rnn_dropout_drop_amt=0,
               is_training=True,
               bidirectional=True):
  """Create a LSTM layer using the specified backend."""
  if use_cudnn:
    return cudnn_lstm_layer(inputs, batch_size, num_units, lengths, stack_size,
                            rnn_dropout_drop_amt, is_training, bidirectional)
  else:
    assert rnn_dropout_drop_amt == 0
    cells_fw = [
        contrib_cudnn_rnn.CudnnCompatibleLSTMCell(num_units)
        for _ in range(stack_size)
    ]
    cells_bw = [
        contrib_cudnn_rnn.CudnnCompatibleLSTMCell(num_units)
        for _ in range(stack_size)
    ]
    with tf.variable_scope('cudnn_lstm'):
      (outputs, unused_state_f,
       unused_state_b) = contrib_rnn.stack_bidirectional_dynamic_rnn(
           cells_fw,
           cells_bw,
           inputs,
           dtype=tf.float32,
           sequence_length=lengths,
           parallel_iterations=1)

    return outputs
Example #8
0
def BiGRU(x, hidden_size, dropout_prob=None, n_layer=None):
    x_len = length_seq(x)
    if n_layer:  # multilayer GRU
        GRU_forwards = [rnn.GRUCell(hidden_size) for _ in range(n_layer)]
        GRU_backwards = [rnn.GRUCell(hidden_size) for _ in range(n_layer)]
        if dropout_prob:
            GRU_forwards = [
                rnn.DropoutWrapper(i, input_keep_prob=dropout_prob)
                for i in GRU_forwards
            ]
            GRU_backwards = [
                rnn.DropoutWrapper(i, input_keep_prob=dropout_prob)
                for i in GRU_backwards
            ]
        out, _, _ = rnn.stack_bidirectional_dynamic_rnn(cells_fw=GRU_forwards,
                                                        cells_bw=GRU_backwards,
                                                        inputs=x,
                                                        sequence_length=x_len,
                                                        dtype=tf.float32)
    else:
        GRU_forward = rnn.GRUCell(hidden_size)
        GRU_backword = rnn.GRUCell(hidden_size)
        if dropout_prob:
            GRU_forward = rnn.DropoutWrapper(GRU_forward,
                                             input_keep_prob=dropout_prob)
            GRU_backword = rnn.DropoutWrapper(GRU_backword,
                                              input_keep_prob=dropout_prob)
        (forward_out, backward_out), _ = tf.nn.bidirectional_dynamic_rnn(
            cell_fw=GRU_forward,
            cell_bw=GRU_backword,
            inputs=x,
            sequence_length=x_len,
            dtype=tf.float32)
        out = tf.concat((forward_out, backward_out), axis=2)
    return out
Example #9
0
  def encode(self, sequence, sequence_length):
    cells_fw, cells_bw = self._cells
    if self._use_cudnn:
      # Implements stacked bidirectional LSTM for variable-length sequences,
      # which are not supported by the CudnnLSTM layer.
      inputs_fw = tf.transpose(sequence, [1, 0, 2])
      for lstm_fw, lstm_bw in zip(cells_fw, cells_bw):
        outputs_fw, _ = lstm_fw(inputs_fw, training=self._is_training)
        inputs_bw = tf.reverse_sequence(
            inputs_fw, sequence_length, seq_axis=0, batch_axis=1)
        outputs_bw, _ = lstm_bw(inputs_bw, training=self._is_training)
        outputs_bw = tf.reverse_sequence(
            outputs_bw, sequence_length, seq_axis=0, batch_axis=1)

        inputs_fw = tf.concat([outputs_fw, outputs_bw], axis=2)

      last_h_fw = _get_final(outputs_fw, sequence_length)
      # outputs_bw has already been reversed, so we can take the first element.
      last_h_bw = outputs_bw[0]

    else:
      _, states_fw, states_bw = rnn.stack_bidirectional_dynamic_rnn(
          cells_fw,
          cells_bw,
          sequence,
          sequence_length=sequence_length,
          time_major=False,
          dtype=tf.float32,
          scope=self._name_or_scope)
      # Note we access the outputs (h) from the states since the backward
      # ouputs are reversed to the input order in the returned outputs.
      last_h_fw = states_fw[-1][-1].h
      last_h_bw = states_bw[-1][-1].h

    return tf.concat([last_h_fw, last_h_bw], 1)
def add_birnn_layer(model, input):

    # Check required hyperparams
    try:
        num_cells = model.hyperparams["num_cells"]
    except KeyError:
        num_cells = 50

    try:
        num_layers = model.hyperparams["num_layers"]
    except KeyError:
        num_layers = 1

    fw_cells = [rnn.BasicLSTMCell(num_cells) for _ in range(num_layers)]
    bw_cells = [rnn.BasicLSTMCell(num_cells) for _ in range(num_layers)]
    fw_cells = [
        rnn.DropoutWrapper(cell, output_keep_prob=model.dropout_keep_prob)
        for cell in fw_cells
    ]
    bw_cells = [
        rnn.DropoutWrapper(cell, output_keep_prob=model.dropout_keep_prob)
        for cell in bw_cells
    ]

    model.rnn_outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(
        fw_cells,
        bw_cells,
        input,
        sequence_length=model.x_len,
        dtype=tf.float32)

    return model.rnn_outputs
Example #11
0
def bidi_gru_layers(config,
                    inputs,
                    hidden_sizes,
                    seq_lens=None,
                    bidirectional=False,
                    phase=Phase.Predict):
    fcells = [rnn.GRUCell(size) for size in hidden_sizes]
    if phase == Phase.Train:
        fcells = [
            rnn.DropoutWrapper(cell,
                               output_keep_prob=config.rnn_output_dropout,
                               state_keep_prob=config.rnn_state_dropout)
            for cell in fcells
        ]

    bcells = [rnn.GRUCell(size) for size in hidden_sizes]
    if phase == Phase.Train:
        bcells = [
            rnn.DropoutWrapper(cell,
                               output_keep_prob=config.rnn_output_dropout,
                               state_keep_prob=config.rnn_state_dropout)
            for cell in bcells
        ]

    return rnn.stack_bidirectional_dynamic_rnn(fcells,
                                               bcells,
                                               inputs,
                                               dtype=tf.float32,
                                               sequence_length=seq_lens)
Example #12
0
    def __sequence_label(self, inputdata: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
        """ Implements the sequence label part of the network
        :param inputdata:
        :return:
        """
        with tf.variable_scope('LSTMLayers'):
            # construct stack lstm rcnn layer
            # forward lstm cell
            fw_cell_list = [tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0) for nh in [self.__hidden_nums]*self.__layers_nums]
            # Backward direction cells
            bw_cell_list = [tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0) for nh in [self.__hidden_nums]*self.__layers_nums]

            stack_lstm_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(fw_cell_list, bw_cell_list, inputdata,
                                                                         dtype=tf.float32)

            if self.phase.lower() == 'train':
                stack_lstm_layer = self.dropout(inputdata=stack_lstm_layer, keep_prob=0.5)

            [batch_s, _, hidden_nums] = inputdata.get_shape().as_list()  # [batch, width, 2*n_hidden]
            rnn_reshaped = tf.reshape(stack_lstm_layer, [-1, hidden_nums])  # [batch x width, 2*n_hidden]

            w = tf.Variable(tf.truncated_normal([hidden_nums, self.__num_classes], stddev=0.1), name="w")
            # Doing the affine projection

            logits = tf.matmul(rnn_reshaped, w)

            logits = tf.reshape(logits, [batch_s, -1, self.__num_classes])

            raw_pred = tf.argmax(tf.nn.softmax(logits), axis=2, name='raw_prediction')

            # Swap batch and batch axis
            rnn_out = tf.transpose(logits, (1, 0, 2), name='transpose_time_major')  # [width, batch, n_classes]

        return rnn_out, raw_pred
Example #13
0
    def encode(self, sequence, sequence_length):
        if self._use_cudnn:
            with tf.control_dependencies([
                    tf.assert_equal(
                        sequence_length,
                        tf.shape(sequence)[1],
                        message=
                        '`use_cudnn_enc` must be False if sequence lengths vary.'
                    )
            ]):
                _, (states_h, _) = self._cudnn_enc_lstm(
                    tf.transpose(sequence, [1, 0, 2]),
                    training=self._is_training)

            # Note we access the outputs (h) from the states since the backward
            # ouputs are reversed to the input order in the returned outputs.
            last_h_fw, last_h_bw = states_h[-2], states_h[-1]
        else:
            _, states_fw, states_bw = rnn.stack_bidirectional_dynamic_rnn(
                self._enc_cells_fw,
                self._enc_cells_bw,
                sequence,
                sequence_length=sequence_length,
                time_major=False,
                dtype=tf.float32,
                scope='encoder')
            # Note we access the outputs (h) from the states since the backward
            # ouputs are reversed to the input order in the returned outputs.
            last_h_fw = states_fw[-1][-1].h
            last_h_bw = states_bw[-1][-1].h

        # Concatenate the final outputs for each direction.
        last_h = tf.concat([last_h_fw, last_h_bw], 1)

        return last_h
Example #14
0
  def encode(self, sequence, sequence_length):
    if self._use_cudnn:
      with tf.control_dependencies(
          [tf.assert_equal(
              sequence_length, tf.shape(sequence)[1],
              message='`use_cudnn_enc` must be False if sequence lengths vary.')
          ]):
        _, (states_h, _) = self._cudnn_enc_lstm(
            tf.transpose(sequence, [1, 0, 2]),
            training=self._is_training)

      # Note we access the outputs (h) from the states since the backward
      # ouputs are reversed to the input order in the returned outputs.
      last_h_fw, last_h_bw = states_h[-2], states_h[-1]
    else:
      _, states_fw, states_bw = rnn.stack_bidirectional_dynamic_rnn(
          self._enc_cells_fw,
          self._enc_cells_bw,
          sequence,
          sequence_length=sequence_length,
          time_major=False,
          dtype=tf.float32,
          scope='encoder')
      # Note we access the outputs (h) from the states since the backward
      # ouputs are reversed to the input order in the returned outputs.
      last_h_fw = states_fw[-1][-1].h
      last_h_bw = states_bw[-1][-1].h

    # Concatenate the final outputs for each direction.
    last_h = tf.concat([last_h_fw, last_h_bw], 1)

    return last_h
def LSTM(inputs):
    # 定义LSTM网络
    _hidden_nums = g_num_hidden
    _layers_nums = g_num_layers
    fw_cell_list = [
        tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0)
        for nh in [_hidden_nums] * _layers_nums
    ]
    # Backward direction cells
    bw_cell_list = [
        tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0)
        for nh in [_hidden_nums] * _layers_nums
    ]

    stack_lstm_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
        fw_cell_list, bw_cell_list, inputs, dtype=tf.float32)
    stack_lstm_layer = dropout(inputdata=stack_lstm_layer,
                               keep_prob=0.5,
                               is_training=g_is_training,
                               name='sequence_drop_out')

    [batch_s, _,
     hidden_nums] = inputs.get_shape().as_list()  # [batch, width, 2*n_hidden]

    shape = tf.shape(stack_lstm_layer)

    rnn_reshaped = tf.reshape(stack_lstm_layer,
                              [shape[0] * shape[1], shape[2]])

    return shape, hidden_nums, rnn_reshaped
Example #16
0
    def __init__(self,vocab_size,emb_dim,num_classes=4):
        # Hyper parameters
        self.dropout = 0.4
        self.num_unit=100
        self.num_layer=1
        self.init_lr=0.001
        self.clip_norm=5

        Base.__init__(self,vocab_size,emb_dim)

        def cell():
            cell=rnn.DropoutWrapper(rnn.GRUCell(num_units=self.num_unit))
            return cell
        cell_fw=[cell() for _  in range(self.num_layer)]
        cell_bw =[cell() for _ in range(self.num_layer)]
        outputs,state_fw,state_bw = rnn.stack_bidirectional_dynamic_rnn(
            cells_fw=cell_fw,
            cells_bw=cell_bw,
            inputs=self.emb_inputs,
            sequence_length=self.lengths,
            dtype=tf.float32
        )

        self.merge = tf.concat([state_fw[-1],state_bw[-1]],axis=-1)

        # шонч╗Г
        self.train(num_classes)
Example #17
0
    def _sequence_label(self, inputdata, name):
        with tf.variable_scope(name_or_scope=name):
            fw_cell_list = [tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0) for
                            nh in [self._hidden_nums] * self._layers_nums]
            # Backward direction cells
            bw_cell_list = [tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0) for
                            nh in [self._hidden_nums] * self._layers_nums]

            stack_lstm_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(fw_cell_list,
                                                                         bw_cell_list, inputdata,
                                                                         # sequence_length=CFG.ARCH.SEQ_LENGTH * np.ones(CFG.TRAIN.BATCH_SIZE),
                                                                         dtype=tf.float32)
            # stack_lstm_layer = self.dropout(inputdata=stack_lstm_layer, keep_prob=0.5,\
            #    is_training=self._is_training, name='sequence_drop_out')

            [batch_s, _, hidden_nums] = inputdata.get_shape().as_list()  # [batch, width, 2*n_hidden]

            shape = tf.shape(stack_lstm_layer)
            rnn_reshaped = tf.reshape(stack_lstm_layer, [shape[0] * shape[1], shape[2]])

            w = tf.get_variable(name='w', shape=[hidden_nums, self._num_classes], \
                                initializer=tf.truncated_normal_initializer(stddev=0.02), trainable=True)
            # Doing the affine projection
            logits = tf.matmul(rnn_reshaped, w, name='logits')
            logits = tf.reshape(logits, [shape[0], shape[1], self._num_classes], name='logits_reshape')
            raw_pred = tf.argmax(tf.nn.softmax(logits), axis=2, name='raw_prediction')
            # Swap batch and batch axis
            rnn_out = tf.transpose(logits, [1, 0, 2], name='transpose_time_major')  # [width, batch, n_classes]

        return rnn_out, raw_pred
Example #18
0
def StackedBRNN(input_rnn,
                input_size,
                hidden_size,
                num_layers,
                dropout_input=0,
                dropout_output=0,
                scope_stack=None):
    cells_fw = []
    cells_bw = []
    for i in range(num_layers):
        cell_fw = BasicLSTMCell(hidden_size, state_is_tuple=True)
        cell_bw = BasicLSTMCell(hidden_size, state_is_tuple=True)
        d_cell_fw = tf.contrib.rnn.DropoutWrapper(
            cell_fw,
            output_keep_prob=dropout_output,
            input_keep_prob=dropout_input)
        d_cell_bw = tf.contrib.rnn.DropoutWrapper(
            cell_bw,
            output_keep_prob=dropout_output,
            input_keep_prob=dropout_input)
        cells_fw.append(d_cell_fw)
        cells_bw.append(d_cell_bw)
    outputs, _, _ = stack_bidirectional_dynamic_rnn(cells_fw,
                                                    cells_bw,
                                                    input_rnn,
                                                    dtype=tf.float32,
                                                    sequence_length=input_size,
                                                    scope=scope_stack)

    return outputs
Example #19
0
    def stack_bi_gru_layer(self, embedded_input, sequence_length):
        def gru_cell():
            if not self.is_training:
                self.keep_prob = 1.0
            with tf.name_scope('gru_cell'):
                cell = rnn.GRUCell(self.settings.bi_gru_hidden_dim,
                                   reuse=tf.get_variable_scope().reuse)
            return rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)

        cells_fw = [gru_cell() for _ in range(self.settings.bi_gru_layer_num)]
        cells_bw = [gru_cell() for _ in range(self.settings.bi_gru_layer_num)]
        initial_states_fw = [
            cell_fw.zero_state(self.settings.batch_size, tf.float32)
            for cell_fw in cells_fw
        ]
        initial_states_bw = [
            cell_bw.zero_state(self.settings.batch_size, tf.float32)
            for cell_bw in cells_bw
        ]
        outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(
            cells_fw,
            cells_bw,
            embedded_input,
            sequence_length=sequence_length,
            initial_states_fw=initial_states_fw,
            initial_states_bw=initial_states_bw,
            dtype=tf.float32)
        return outputs  # [batch_size, max_time, layers_output]
Example #20
0
def bi_rnn(inputs, extract_size, reuse=None):
    with tf.variable_scope("rnn", reuse=reuse):
        """express output of CNN on time step dimension."""
        fw_cells = list()
        bw_cells = list()
        rnn_layer_num = 2
        keep_prob = 0.5
        for _ in range(rnn_layer_num):
            # Define cells
            fw_cell = rnn.BasicLSTMCell(extract_size, forget_bias=1.0)
            bw_cell = rnn.BasicLSTMCell(extract_size, forget_bias=1.0)
            # Drop out in case of over-fitting.
            fw_cell = rnn.DropoutWrapper(fw_cell,
                                         input_keep_prob=keep_prob,
                                         output_keep_prob=keep_prob)
            bw_cell = rnn.DropoutWrapper(bw_cell,
                                         input_keep_prob=keep_prob,
                                         output_keep_prob=keep_prob)
            # Stack same cells.
            fw_cells.append(fw_cell)
            bw_cells.append(bw_cell)
        output, _, _ = rnn.stack_bidirectional_dynamic_rnn(fw_cells,
                                                           bw_cells,
                                                           inputs,
                                                           dtype=tf.float32)
        output = tf.transpose(output, [1, 0, 2])
        seq_len = tf.shape(output)[0] - 1
        output = tf.gather_nd(output, [seq_len])
        # TODO express output of CNN on time step dimension.
    return output
Example #21
0
 def bi_gru(self, X_inputs):
     """build the bi-GRU network. Return the encoder represented vector.
         X_inputs: [batch_size, n_step]
         n_step: 句子的词数量;或者文档的句子数。
         outputs: [fw_state, embeddings, bw_state], shape=[batch_size, hidden_size+embedding_size+hidden_size]
         """
     inputs = tf.nn.embedding_lookup(
         self.embedding, X_inputs)  # [batch_size, n_step, embedding_size]
     cells_fw = [self.gru_cell() for _ in range(self.n_layer)]
     cells_bw = [self.gru_cell() for _ in range(self.n_layer)]
     initial_states_fw = [
         cell_fw.zero_state(self.batch_size, tf.float32)
         for cell_fw in cells_fw
     ]
     initial_states_bw = [
         cell_bw.zero_state(self.batch_size, tf.float32)
         for cell_bw in cells_bw
     ]
     outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(
         cells_fw,
         cells_bw,
         inputs,
         initial_states_fw=initial_states_fw,
         initial_states_bw=initial_states_bw,
         dtype=tf.float32)
     hidden_outputs = tf.concat([outputs, inputs], axis=2)
     return hidden_outputs  # shape =[seg_num, n_steps, hidden_size*2+embedding_size]
Example #22
0
 def bi_gru(self, inputs):
     print('================== bi_gru ==================')
     """build the Bi-GRU network. 返回个所有层的隐含状态。"""
     cells_fw = [self.gru_cell() for _ in range(self.n_layer)]
     cells_bw = [self.gru_cell() for _ in range(self.n_layer)]
     print('inputs: ', inputs.shape)  # (?, 200, 256)
     print('cells_fw: ', type(cells_fw))
     print('cells_bw: ', type(cells_bw))
     initial_states_fw = [
         cell_fw.zero_state(self.batch_size, tf.float32)
         for cell_fw in cells_fw
     ]
     initial_states_bw = [
         cell_bw.zero_state(self.batch_size, tf.float32)
         for cell_bw in cells_bw
     ]
     # print('initial_states_bw: ', np.asarray(initial_states_bw).shape)
     outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(
         cells_fw,
         cells_bw,
         inputs,
         initial_states_fw=initial_states_fw,
         initial_states_bw=initial_states_bw,
         dtype=tf.float32)
     outputs = tf.concat([outputs, inputs], axis=2)
     print('outputs: ', outputs.shape)  # (?, 200, self.hidden_size * 2)
     return outputs
Example #23
0
 def DNNModel(self):
     with tf.name_scope('DNNModel'):
         lstm_fw_cells = list()
         lstm_bw_cells = list()
         for _ in range(self.layerNum):
             # Define LSTM cells with tensorflow
             fw_cell = rnn.BasicLSTMCell(self.hiddenSize, forget_bias=1.0)
             bw_cell = rnn.BasicLSTMCell(self.hiddenSize, forget_bias=1.0)
             # Drop out in case of over-fitting.
             fw_cell = rnn.DropoutWrapper(fw_cell, input_keep_prob=self.keep_prob, output_keep_prob=self.keep_prob)
             bw_cell = rnn.DropoutWrapper(bw_cell, input_keep_prob=self.keep_prob, output_keep_prob=self.keep_prob)
             # Stack same LSTM cells.
             lstm_fw_cells.append(fw_cell)
             lstm_bw_cells.append(bw_cell)
             pass
         outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(
             lstm_fw_cells,
             lstm_bw_cells,
             self.x,
             dtype=tf.float32
         )
         self.logits = tf.contrib.layers.fully_connected(outputs, self.classNum, activation_fn=None)
         self.variableSummaries(self.logits)
         pass
     pass
Example #24
0
    def __init__(self, training, batch_size, name=''):
        super(FeatureNet, self).__init__()
        self.training = training

        # scalar
        self.batch_size = batch_size
        # [ΣK, 35/45, 7]
        self.feature = tf.placeholder(
            tf.float32, [None, cfg.VOXEL_POINT_COUNT, 7], name='feature')
        # [ΣK]
        self.number = tf.placeholder(tf.int64, [None], name='number')
        # [ΣK, 4], each row stores (batch, d, h, w)
        self.coordinate = tf.placeholder(
            tf.int64, [None, 4], name='coordinate')







        # with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
        #     self.vfe1 = VFELayer(32, 'VFE-1')
        #     self.vfe2 = VFELayer(128, 'VFE-2')
        #
        # # boolean mask [K, T, 2 * units]
        # mask = tf.not_equal(tf.reduce_max(
        #     self.feature, axis=2, keep_dims=True), 0)
        # x = self.vfe1.apply(self.feature, mask, self.training)
        # x = self.vfe2.apply(x, mask, self.training)
        # print("x.get_shape()", x.get_shape())
        # # [ΣK, 128]
        # voxelwise = tf.reduce_max(x, axis=1)





        self.__hidden_nums=64
        self.__layers_nums=2
        fw_cell_list = [rnn.BasicLSTMCell(nh, forget_bias=1.0) for nh in [self.__hidden_nums] * self.__layers_nums]
        # Backward direction cells
        bw_cell_list = [rnn.BasicLSTMCell(nh, forget_bias=1.0) for nh in [self.__hidden_nums] * self.__layers_nums]

        stack_lstm_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(fw_cell_list, bw_cell_list, self.feature,
                                                                     dtype=tf.float32)
        print("stack_lstm_layer.get_shape()",stack_lstm_layer.get_shape())
        voxelwise = tf.reduce_max(stack_lstm_layer, axis=1)






        print("voxelwise.get_shape()", voxelwise.get_shape())
        # car: [batch_size * 400 * 352 * 128]
        # pedestrian/cyclist: [N * 10 * 200 * 240 * 128]
        self.outputs = tf.scatter_nd(
            self.coordinate, voxelwise, [self.batch_size, 10, cfg.INPUT_HEIGHT, cfg.INPUT_WIDTH, 128])
Example #25
0
    def _sequence_label(self, inputdata, name):
        """ Implements the sequence label part of the network

        :param inputdata:
        :param name:
        :return:
        """
        with tf.variable_scope(name_or_scope=name):
            # construct stack lstm rcnn layer
            # forward lstm cell
            fw_cell_list = [
                tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0)
                for nh in [self._hidden_nums] * self._layers_nums
            ]
            # Backward direction cells
            bw_cell_list = [
                tf.nn.rnn_cell.LSTMCell(nh, forget_bias=1.0)
                for nh in [self._hidden_nums] * self._layers_nums
            ]

            stack_lstm_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(
                fw_cell_list, bw_cell_list, inputdata, dtype=tf.float32)
            stack_lstm_layer = self.dropout(inputdata=stack_lstm_layer,
                                            keep_prob=0.5,
                                            is_training=self._is_training,
                                            name='sequence_drop_out')

            [batch_s, _, hidden_nums
             ] = inputdata.get_shape().as_list()  # [batch, width, 2*n_hidden]

            shape = tf.shape(stack_lstm_layer)
            rnn_reshaped = tf.reshape(stack_lstm_layer,
                                      [shape[0] * shape[1], shape[2]])

            w = tf.get_variable(
                name='w',
                shape=[hidden_nums, self._num_classes],
                initializer=tf.truncated_normal_initializer(stddev=0.02),
                trainable=True)

            # Doing the affine projection
            logits = tf.matmul(rnn_reshaped, w, name='logits')

            logits = tf.reshape(logits,
                                [shape[0], shape[1], self._num_classes],
                                name='logits_reshape')

            raw_pred = tf.argmax(tf.nn.softmax(logits),
                                 axis=2,
                                 name='raw_prediction')

            # Swap batch and batch axis
            rnn_out = tf.transpose(
                logits, [1, 0, 2],
                name='transpose_time_major')  # [width, batch, n_classes]

        return rnn_out, raw_pred
Example #26
0
def feed_forward(inputs, seq_length):

    # multiple RNN layers
    stacked_rnn_fw, stacked_rnn_bw = [], []
    for _ in range(num_layers - 1):
        stacked_rnn_fw.append(rnn.BasicLSTMCell(rnn_size, state_is_tuple=True))
        stacked_rnn_bw.append(rnn.BasicLSTMCell(rnn_size, state_is_tuple=True))
    # adding attention at last LSTM layer
    stacked_rnn_fw.append(
        tf.contrib.rnn.AttentionCellWrapper(rnn.BasicLSTMCell(
            rnn_size, state_is_tuple=True),
                                            attn_length=attn_length,
                                            state_is_tuple=True))
    stacked_rnn_bw.append(
        tf.contrib.rnn.AttentionCellWrapper(rnn.BasicLSTMCell(
            rnn_size, state_is_tuple=True),
                                            attn_length=attn_length,
                                            state_is_tuple=True))
    # stacking LSTM layers
    cell_fw = rnn.MultiRNNCell(cells=stacked_rnn_fw, state_is_tuple=True)
    cell_bw = rnn.MultiRNNCell(cells=stacked_rnn_bw, state_is_tuple=True)
    # applying bidirectional wrapper
    rnn_outputs, final_state_fw, final_state_bw = stack_bidirectional_dynamic_rnn(
        [cell_fw], [cell_bw],
        inputs,
        dtype=tf.float32,
        sequence_length=seq_length)
    print('rnn_outputs shape:', rnn_outputs.shape)

    # applying batch normalization
    if phase == True:  # while training
        batch_mean, batch_var = tf.nn.moments(rnn_outputs, [0])
        train_mean = tf.assign(pop_mean,
                               pop_mean * decay + batch_mean * (1 - decay))
        train_var = tf.assign(pop_var,
                              pop_var * decay + batch_var * (1 - decay))
        with tf.control_dependencies([train_mean, train_var]):
            rnn_outputs = tf.nn.batch_normalization(rnn_outputs, batch_mean,
                                                    batch_var, beta, scale,
                                                    epsilon)
    else:  # while validation
        rnn_outputs = tf.nn.batch_normalization(rnn_outputs, pop_mean, pop_var,
                                                beta, scale, epsilon)

    rnn_outputs = tf.nn.relu(rnn_outputs)
    rnn_outputs = tf.nn.dropout(rnn_outputs, keep_prob)

    # stacking the outputs of all time frames
    last_rnn_output = tf.gather_nd(
        rnn_outputs, tf.stack([tf.range(batch_size), seq_length - 1], axis=1))

    # output layer
    output = tf.add(tf.matmul(last_rnn_output, layer['weight']), layer['bias'])
    print('Output shape', output.get_shape())

    return output
 def bi_gru(self, inputs):
     """build the bi-GRU network. 返回个所有层的隐含状态。"""
     cells_fw = [self.gru_cell() for _ in range(self.n_layer)]
     cells_bw = [self.gru_cell() for _ in range(self.n_layer)]
     initial_states_fw = [cell_fw.zero_state(self.batch_size, tf.float32) for cell_fw in cells_fw]
     initial_states_bw = [cell_bw.zero_state(self.batch_size, tf.float32) for cell_bw in cells_bw]
     outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs,
                                                         initial_states_fw=initial_states_fw,
                                                         initial_states_bw=initial_states_bw, dtype=tf.float32)
     return outputs
 def bi_lstm(self, inputs):
     """build the bi-LSTM network. 返回个所有层的隐含状态。"""
     cells_fw = [self.lstm_cell() for _ in range(self.n_layer)]
     cells_bw = [self.lstm_cell() for _ in range(self.n_layer)]
     initial_states_fw = [cell_fw.zero_state(self.batch_size, tf.float32) for cell_fw in cells_fw]
     initial_states_bw = [cell_bw.zero_state(self.batch_size, tf.float32) for cell_bw in cells_bw]
     outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs,
                                                         initial_states_fw=initial_states_fw,
                                                         initial_states_bw=initial_states_bw, dtype=tf.float32)
     return outputs
Example #29
0
    def __init__(self, learning_rate, num_classes, hidden_units):

        # Initialize data and variables
        self.weights = tf.Variable(
            tf.random_uniform([hidden_units * 2, num_classes],
                              minval=-0.5,
                              maxval=0.5))
        self.biases = tf.Variable(tf.random_uniform([num_classes]))
        self.x = tf.placeholder("float", [None, 700, 44])
        self.y = tf.placeholder("float", [None, 700, num_classes])

        # Do the prediction
        self.fw_rnn_cell1 = rnn.LSTMCell(hidden_units, forget_bias=1.0)
        self.fw_rnn_cell2 = rnn.LSTMCell(hidden_units, forget_bias=1.0)
        self.fw_rnn_cell3 = rnn.LSTMCell(hidden_units, forget_bias=1.0)
        self.bw_rnn_cell1 = rnn.LSTMCell(hidden_units, forget_bias=1.0)
        self.bw_rnn_cell2 = rnn.LSTMCell(hidden_units, forget_bias=1.0)
        self.bw_rnn_cell3 = rnn.LSTMCell(hidden_units, forget_bias=1.0)
        self.fw_rnn_cells = [
            self.fw_rnn_cell1, self.fw_rnn_cell2, self.fw_rnn_cell3
        ]
        self.bw_rnn_cells = [
            self.bw_rnn_cell1, self.bw_rnn_cell2, self.bw_rnn_cell3
        ]
        self.outputs, self.states_fw, self.states_bw = rnn.stack_bidirectional_dynamic_rnn(
            self.fw_rnn_cells, self.bw_rnn_cells, self.x, dtype=tf.float32)
        # self.output.shape is (?, 700, 600)
        self.outputs_reshaped = tf.reshape(self.outputs,
                                           [-1, 2 * hidden_units])
        self.y_reshaped = tf.reshape(self.y, [-1, num_classes])
        # check importantFunctions.py : line-40 to see how it works
        # reference link  is :
        # https://stackoverflow.com/questions/38051143/no-broadcasting-for-tf-matmul-in-tensorflow
        #         self.y_predicted = tf.nn.softmax(tf.matmul(self.outputs_reshaped, self.weights) + self.biases)
        self.y_predicted = tf.matmul(self.outputs_reshaped,
                                     self.weights) + self.biases

        # Define the loss function
        self.loss = tf.nn.softmax_cross_entropy_with_logits(
            logits=self.y_predicted, labels=self.y_reshaped)

        # Define the trainer and optimizer
        self.optimizer = tf.train.GradientDescentOptimizer(learning_rate)
        self.trainer = self.optimizer.minimize(self.loss)

        # creating session and initializing variables
        self.sess = tf.Session()
        self.init = tf.global_variables_initializer()
        self.sess.run(self.init)

        # get accuracy
        self.get_equal = tf.equal(tf.argmax(self.y_reshaped, 1),
                                  tf.argmax(self.y_predicted, 1))
        self.accuracy = tf.reduce_mean(tf.cast(self.get_equal, tf.float32))
Example #30
0
 def bi_gru(self, inputs, seg_num):
     """build the bi-GRU network. Return the encoder represented vector.
     n_step: 句子的词数量;或者文档的句子数。
     seg_num: 序列的数量,原本应该为 batch_size, 但是这里将 batch_size 个 doc展开成很多个句子。
     """
     cells_fw = [self.gru_cell() for _ in range(self.n_layer)]
     cells_bw = [self.gru_cell() for _ in range(self.n_layer)]
     initial_states_fw = [cell_fw.zero_state(seg_num, tf.float32) for cell_fw in cells_fw]
     initial_states_bw = [cell_bw.zero_state(seg_num, tf.float32) for cell_bw in cells_bw]
     outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs,
                     initial_states_fw = initial_states_fw, initial_states_bw = initial_states_bw, dtype=tf.float32)
     # outputs: Output Tensor shaped: seg_num, max_time, layers_output],其中layers_output=hidden_size * 2 在这里。
     return outputs
Example #31
0
def discriminator_rnn(x,
                      labels,
                      df_dim,
                      number_classes,
                      kernel=(3, 3),
                      strides=(2, 2),
                      dilations=(1, 1),
                      pooling='avg',
                      update_collection=None,
                      act=tf.nn.relu,
                      scope_name='Discriminator',
                      reuse=False):
    num_layers = 3
    num_nodes = [int(8 / 2), df_dim, df_dim]
    x = tf.transpose(tf.squeeze(x), perm=[0, 2, 1])

    with tf.variable_scope(scope_name) as scope:
        if reuse:
            scope.reuse_variables()
        # Define LSTM cells
        enc_fw_cells = [
            LSTMCell(num_nodes[layer], name="fw_" + str(layer))
            for layer in range(num_layers)
        ]
        enc_bw_cells = [
            LSTMCell(num_nodes[layer], name="bw_" + str(layer))
            for layer in range(num_layers)
        ]

        # Connect LSTM cells bidirectionally and stack
        (all_states, fw_state,
         bw_state) = stack_bidirectional_dynamic_rnn(cells_fw=enc_fw_cells,
                                                     cells_bw=enc_bw_cells,
                                                     inputs=x,
                                                     dtype=tf.float32)

        # Concatenate results
        for k in range(num_layers):
            if k == 0:
                con_c = tf.concat((fw_state[k].c, bw_state[k].c), 1)
                con_h = tf.concat((fw_state[k].h, bw_state[k].h), 1)
            else:
                con_c = tf.concat((con_c, fw_state[k].c, bw_state[k].c), 1)
                con_h = tf.concat((con_h, fw_state[k].h, bw_state[k].h), 1)

        output = all_states[:, x.get_shape.as_list()[2]]
        output = ops.snlinear(output,
                              1,
                              update_collection=update_collection,
                              name='d_sn_linear')
    return output, tf.concat((fw_state[2].c, bw_state[2].c), 1)
Example #32
0
def lstm(seq, seq_len, num_class, is_training):
    # bid-lstm
    with tf.variable_scope('bid-lstm'):
        lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(256,
                                               forget_bias=1.0,
                                               state_is_tuple=True)
        lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(256,
                                               forget_bias=1.0,
                                               state_is_tuple=True)
        cell_fw = [lstm_fw_cell] * 2
        cell_bw = [lstm_bw_cell] * 2
        outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(cell_fw,
                                                            cell_bw,
                                                            seq,
                                                            dtype=tf.float32)

        #     cell = tf.contrib.rnn.LSTMCell(512, state_is_tuple=True)
        #     stack = tf.contrib.rnn.MultiRNNCell([cell] * 1, state_is_tuple=True)
        #     outputs, _ = tf.nn.dynamic_rnn(cell, seq,  dtype=tf.float32)
        outputs = tf.cond(
            pred=is_training,
            true_fn=lambda: tf.nn.dropout(outputs, keep_prob=0.4),
            false_fn=lambda: outputs,
            name='lstm_dropout')

        batch_size = 100
        output = tf.reshape(outputs, [-1, 256 * 2])
        W = tf.get_variable(name='weights',
                            shape=[256 * 2, num_class],
                            initializer=tf.truncated_normal_initializer(
                                stddev=5e-2, dtype=tf.float16))
        b = tf.get_variable(name='biases',
                            shape=[num_class],
                            initializer=tf.constant_initializer(0.0))
        logits = tf.matmul(output, W) + b

        #     logits = tf.cond(pred=is_training,
        #                 true_fn=lambda: tf.nn.dropout(logits, keep_prob=0.8),
        #                 false_fn=lambda: logits,
        #                 name='lstm_dropout_logits')

        #[batch_size,max_timesteps,num_classes]
        logits = tf.reshape(logits, [batch_size, -1, num_class])

        raw_pred = tf.argmax(tf.nn.softmax(logits),
                             axis=2,
                             name='raw_prediction')

        #转置矩阵,第0和第1列互换位置=>[max_timesteps,batch_size,num_classes]
        logits = tf.transpose(logits, (1, 0, 2))
        return logits, raw_pred, seq_len
 def bi_gru(self, X_inputs):
     """build the bi-GRU network. Return the encoder represented vector.
     X_inputs: [batch_size, n_step]
     n_step: 句子的词数量;或者文档的句子数。
     outputs: [fw_state, embeddings, bw_state], shape=[batch_size, hidden_size+embedding_size+hidden_size]
     """
     inputs = tf.nn.embedding_lookup(self.embedding, X_inputs)   # [batch_size, n_step, embedding_size]
     cells_fw = [self.gru_cell() for _ in range(self.n_layer)]
     cells_bw = [self.gru_cell() for _ in range(self.n_layer)]
     initial_states_fw = [cell_fw.zero_state(self.batch_size, tf.float32) for cell_fw in cells_fw]
     initial_states_bw = [cell_bw.zero_state(self.batch_size, tf.float32) for cell_bw in cells_bw]
     outputs, _, _ = rnn.stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs,
                     initial_states_fw = initial_states_fw, initial_states_bw = initial_states_bw, dtype=tf.float32)
     hidden_outputs = tf.concat([outputs, inputs], axis=2)
     return hidden_outputs  # shape =[seg_num, n_steps, hidden_size*2+embedding_size]