예제 #1
0
def build_LSTMCellwRNN_model(mdlstm_units=32, dense_units=200):
    dense_act = 'tanh'

    input_img = layers.Input(shape=(max_img_width, max_img_height, 1),
                             name='image',
                             dtype='float32')
    labels = layers.Input(name='label', shape=(None, ), dtype='float32')

    input_reshaped = layers.Reshape(target_shape=(max_img_width,
                                                  max_img_height))(input_img)

    x = layers.RNN(layers.LSTMCell(mdlstm_units),
                   return_sequences=True)(input_reshaped)
    x = layers.Dense(100, activation=dense_act, name='x_out')(x)

    y = layers.Permute((2, 1))(input_reshaped)
    y = layers.RNN(layers.LSTMCell(mdlstm_units), return_sequences=True)(y)
    y = layers.Dense(200, activation=dense_act, name='y_out')(y)
    y = layers.Permute((2, 1))(y)
    print(x)
    print(y)

    added = layers.Add()([x, y])
    out = layers.Dense(len(alphabet) + 1,
                       activation='softmax',
                       name='dense_out')(added)
    classified = CTCLayer(name='ctc_loss')(labels, out)

    model = keras.models.Model(inputs=[input_img, labels],
                               outputs=classified,
                               name='LSTMlayerModel')

    model.compile(optimizer=keras.optimizers.Adam())

    return model
예제 #2
0
def LSTMCell4D(inp,
               mdlstm_units,
               dense_units,
               return_sequences=False,
               dense_act='tanh'):
    w = layers.RNN(layers.LSTMCell(mdlstm_units), return_sequences=True)(inp)
    w = layers.Dense(dense_units, activation=dense_act)(w)

    x = lambda_reverse_layer_A()(inp)
    x = layers.RNN(layers.LSTMCell(mdlstm_units), return_sequences=True)(x)
    x = layers.Dense(dense_units, activation=dense_act)(x)
    x = lambda_reverse_layer_A()(x)

    y = lambda_reverse_layer_B()(inp)
    y = layers.RNN(layers.LSTMCell(mdlstm_units), return_sequences=True)(y)
    y = layers.Dense(dense_units, activation=dense_act)(y)
    y = lambda_reverse_layer_B()(y)

    z = lambda_reverse_layer_A()(inp)
    z = lambda_reverse_layer_B()(z)
    z = layers.RNN(layers.LSTMCell(mdlstm_units), return_sequences=True)(z)
    z = layers.Dense(dense_units, activation=dense_act)(z)
    z = lambda_reverse_layer_B()(z)
    z = lambda_reverse_layer_A()(z)

    added = layers.Add()([w, x, y, z])

    return added
예제 #3
0
    def __init__(self, lstm_hidden_num):
        super(LstmDecoder, self).__init__()
        self.lstm_hidden_num = lstm_hidden_num
        k_initializer = tf.keras.initializers.truncated_normal()
        b_initializer = tf.keras.initializers.zeros()
        forward_layer = layers.LSTMCell(self.lstm_hidden_num,
                                        dropout=0.8,
                                        recurrent_dropout=0.8,
                                        kernel_initializer=k_initializer,
                                        bias_initializer=b_initializer)
        backward_layer = layers.LSTMCell(
            self.lstm_hidden_num,
            dropout=0.8,
            recurrent_dropout=0.8,
            kernel_initializer=k_initializer,
            bias_initializer=b_initializer,
        )
        forward_layer = layers.RNN(forward_layer, return_sequences=True)
        backward_layer = layers.RNN(backward_layer,
                                    return_sequences=True,
                                    go_backwards=True)

        self.bilstm = layers.Bidirectional(forward_layer,
                                           backward_layer=backward_layer)
        self.W = tf.Variable(initial_value=lambda: tf.random.truncated_normal(
            shape=[self.lstm_hidden_num * 2, config.NUM_CLASSES], stddev=0.1),
                             trainable=True)
        self.b = tf.Variable(
            initial_value=lambda: tf.constant(0., shape=[config.NUM_CLASSES]),
            trainable=True)
예제 #4
0
def test_GRUClipCell():
    from indl.rnn.gru_clip import GRUClipCell

    K.clear_session()

    n_times, n_sensors = 246, 36
    batch_size = 16
    f_units = 128

    f_enc_inputs = tf.keras.Input(shape=(n_times, n_sensors))
    cell = GRUClipCell(f_units)
    assert isinstance(cell, tfkl.GRUCell)
    assert cell.units == f_units
    init_state = cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)
    assert init_state.shape.as_list() == [batch_size, f_units]
    rnn = tfkl.RNN(cell)
    assert rnn.cell == cell
    bidir = tfkl.Bidirectional(rnn)
    final_state = bidir(f_enc_inputs)
    assert final_state.shape.as_list()[-1] == (f_units * 2)

    model = tf.keras.Model(inputs=f_enc_inputs, outputs=final_state, name="GRUClip")
    dummy_state = model(tf.random.uniform((batch_size, n_times, n_sensors)))
    assert dummy_state.shape.as_list() == [batch_size, f_units * 2]
    assert (dummy_state.numpy() != 0).sum() > 0
예제 #5
0
파일: __init__.py 프로젝트: SachsLab/indl
def create_generator_lfads(params):
    """
    units_gen,
    units_con,
    factors_dim,
    co_dim,
    ext_input_dim,
    inject_ext_input_to_gen,
    """
    from indl.model.lfads.complex import ComplexCell

    # TODO: Sample/Mean from $q(f)$. This will replace the first element in generator init_states
    #  TODO: need a custom function for sample-during-train-mean-during-test. See nn.dropout for inspiration.
    # TODO: Sample from $q(z_t)$, and optionally concat with ext_input, to build generator inputs.

    # TODO: continue generator from lfads-cd/lfadslite.py start at 495
    custom_cell = ComplexCell(
        params['gen_dim'],  # Units in generator GRU
        con_hidden_state_dim,  # Units in controller GRU
        params['factors_dim'],
        params['co_dim'],
        params['ext_input_dim'],
        True,
    )
    generator = tfkl.RNN(
        custom_cell,
        return_sequences=True,
        # recurrent_regularizer=tf.keras.regularizers.l2(l=gen_l2_reg),
        name='gen_rnn')
    init_states = generator.get_initial_state(gen_input)

    gen_output = generator(gen_input, initial_state=init_states)
    factors = gen_output[-1]
    return factors
예제 #6
0
파일: func.py 프로젝트: xlnwel/d2rl
def dnc_rnn(output_size, 
            access_config=dict(memory_size=128, word_size=16, num_reads=4, num_writes=1), 
            controller_config=dict(hidden_size=128),
            clip_value=20,
            name='dnc',
            rnn_config={}):
    """Return an RNN that encapsulates DNC
    
    Args:
        output_size: Output dimension size of dnc
        access_config: A dictionary of access module configuration. 
            memory_size: The number of memory slots
            word_size: The size of each memory slot
            num_reads: The number of read heads
            num_writes: The number of write heads
            name: name of the access module, optionally
        controller_config: A dictionary of controller(LSTM) module configuration
        clip_value: Clips controller and core output value to between
            `[-clip_value, clip_value]` if specified
        name: module name
        rnn_config: specifies extra arguments for keras.layers.RNN
    """
    dnc_cell = DNC(access_config, 
                controller_config, 
                output_size, 
                clip_value, 
                name)
    return layers.RNN(dnc_cell, **rnn_config)
예제 #7
0
 def __init__(self, config):
     self.stack_rnn_size  = config.stack_rnn_size
     # xy encoder: [N,T1,h_dim]
     super(TrajectoryEncoder, self).__init__(name="trajectory_encoder")
     # Linear embedding of the observed positions (for each x,y)
     self.traj_xy_emb_enc = layers.Dense(config.emb_size,
         activation=config.activation_func,
         use_bias=True,
         name='position_embedding')
     # LSTM cell, including dropout, with a stacked configuration.
     # Output is composed of:
     # - the sequence of h's along time, from the highest level only: h1,h2,...
     # - last pair of states (h,c) for the first layer
     # - last pair of states (h,c) for the second layer
     # - ... and so on
     self.lstm_cells= [layers.LSTMCell(config.enc_hidden_size,
             name   = 'trajectory_encoder_cell',
             dropout= config.dropout_rate,
             recurrent_dropout=config.dropout_rate) for _ in range(self.stack_rnn_size)]
     self.lstm_cell = layers.StackedRNNCells(self.lstm_cells)
     # Recurrent neural network using the previous cell
     # Initial state is zero; We return the full sequence of h's and the pair of last states
     self.lstm      = layers.RNN(self.lstm_cell,
             name   = 'trajectory_encoder_rnn',
             return_sequences= True,
             return_state    = True)
예제 #8
0
 def _stacked_lstm_impl2(self, dim):
     rnn_cells = [layers.LSTMCell(dim) for _ in range(self.num_layers)]
     stacked_lstm = layers.StackedRNNCells(rnn_cells)
     lstm_layer = layers.RNN(stacked_lstm, return_sequences=True)
     if self.bidirectional:
         lstm_layer = layers.Bidirectional(lstm_layer)
     return [lstm_layer]
예제 #9
0
def rnn_model(dictionary):
    n_hidden = 512
    model = tf.keras.Sequential()
    # Add an Embedding layer expecting input vocab of size 1000, and
    # output embedding dimension of size 64.
    model.add(layers.Embedding(len(dictionary), 64, input_length=3))
    # model.add(layers.Dense(64, input_shape=(3,10)))
    # Add a LSTM layer with 128 internal units.
    # model.add(layers.LSTM(128))
    new_shape = (3, 1)
    # model.add(layers.Dense(64, input_shape=new_shape))

    rnn_cell = tf.keras.layers.StackedRNNCells([
        tf.keras.layers.LSTMCell(n_hidden),
        tf.keras.layers.LSTMCell(n_hidden)
    ])
    # model.add(layers.RNN(rnn_cell, input_length=n_input))
    model.add(layers.RNN(rnn_cell, input_shape=new_shape))
    # Add a Dense layer with 10 units.
    model.add(layers.Dense(len(dictionary), activation="softmax"))

    model.compile(
        optimizer=tf.keras.optimizers.RMSprop(),  # Optimizer
        # Loss function to minimize
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        # List of metrics to monitor
        metrics=['sparse_categorical_accuracy'])

    return lambda: model
예제 #10
0
    def __init__(self, target_ensembles, nh_lstm, nh_bottleneck,
                 dropoutrates_bottleneck, bottleneck_weight_decay,
                 bottleneck_has_bias, init_weight_disp, **kwargs):
        super(GridCellNetwork, self).__init__(**kwargs)

        self._target_ensembles = target_ensembles
        self._nh_lstm = nh_lstm
        self._nh_bottleneck = nh_bottleneck
        self._dropoutrates_botleneck = dropoutrates_bottleneck
        self._bottleneck_weight_decay = bottleneck_weight_decay
        self._bottleneck_has_bias = bottleneck_has_bias
        self._init_weight_disp = bottleneck_has_bias

        self.init_lstm_state = layers.Dense(self._nh_lstm, name="state_init")
        self.init_lstm_cell = layers.Dense(self._nh_lstm, name="cell_init")
        self.rnn_core = MinimalRNNCell(
            target_ensembles=target_ensembles,
            nh_lstm=nh_lstm,
            nh_bottleneck=nh_bottleneck,
            dropoutrates_bottleneck=dropoutrates_bottleneck,
            bottleneck_weight_decay=bottleneck_weight_decay,
            bottleneck_has_bias=bottleneck_has_bias,
            init_weight_disp=init_weight_disp)
        self.RNN = layers.RNN(return_state=True,
                              return_sequences=True,
                              cell=self.rnn_core)
예제 #11
0
 def __init__(self):
     super().__init__()
     self.embed_layer = layers.Embedding(10,
                                         32,
                                         batch_input_shape=[None, None])
     self.rnncell = layers.SimpleRNNCell(64)
     self.rnn_layer = layers.RNN(self.rnncell, return_sequences=True)
     self.dense = layers.Dense(10)
예제 #12
0
    def __init__(self, config):
        super(DecoderAtt, self).__init__(name="trajectory_decoder")
        self.add_social     = config.add_social
        self.stack_rnn_size = config.stack_rnn_size
        self.rnn_type       = config.rnn_type
        # Linear embedding of the encoding resulting observed trajectories
        self.traj_xy_emb_dec = layers.Dense(config.emb_size,
            activation=config.activation_func,
            name='trajectory_position_embedding')
        # RNN cell
        # Condition for cell type
        if self.rnn_type == 'gru':
            # GRU cell
            self.dec_cell_traj = layers.GRUCell(config.dec_hidden_size,
                                                recurrent_initializer='glorot_uniform',
                                                dropout=config.dropout_rate,
                                                recurrent_dropout=config.dropout_rate,
                                                name='trajectory_decoder_GRU_cell')
        else:
            # LSTM cell
            self.dec_cell_traj = layers.LSTMCell(config.dec_hidden_size,
                                                recurrent_initializer='glorot_uniform',
                                                name='trajectory_decoder_LSTM_cell',
                                                dropout=config.dropout_rate,
                                                recurrent_dropout=config.dropout_rate)
        # RNN layer
        self.recurrentLayer = layers.RNN(self.dec_cell_traj,return_sequences=True,return_state=True)
        self.M = 1
        if (self.add_social):
            self.M=self.M+1

        # Attention layer
        self.focal_attention = FocalAttention(config,self.M)
        # Dropout layer
        self.dropout = layers.Dropout(config.dropout_rate,name="dropout_decoder_h")
        # Mapping from h to positions
        self.h_to_xy = layers.Dense(config.P,
            activation=tf.identity,
            name='h_to_xy')

        # Input layers
        # Position input
        dec_input_shape      = (1,config.P)
        self.input_layer_pos = layers.Input(dec_input_shape,name="position")
        enc_last_state_shape = (config.dec_hidden_size)
        # Proposals for inital states
        self.input_layer_hid1= layers.Input(enc_last_state_shape,name="initial_state_h")
        self.input_layer_hid2= layers.Input(enc_last_state_shape,name="initial_state_c")
        # Context shape: [N,M,T1,h_dim]
        ctxt_shape = (self.M,config.obs_len,config.enc_hidden_size)
        # Context input
        self.input_layer_ctxt = layers.Input(ctxt_shape,name="context")
        self.out = self.call((self.input_layer_pos,(self.input_layer_hid1,self.input_layer_hid2),self.input_layer_ctxt))
        # Call init again. This is a workaround for being able to use summary
        super(DecoderAtt, self).__init__(
                    inputs= [self.input_layer_pos,self.input_layer_hid1,self.input_layer_hid2,self.input_layer_ctxt],
                    outputs=self.out)
예제 #13
0
    def __init__(self,
                 audio_features=195,
                 audio_window_size=8,
                 stage2_window_size=64,
                 num_face_ids=76,
                 num_landmarks=76,
                 num_phonemes=21,
                 num_visemes=20,
                 dropout_rate=0.5,
                 data_format="channels_last",
                 **kwargs):
        super(VisemeNet, self).__init__(**kwargs)
        stage1_rnn_hidden_size = 256
        stage1_fc_mid_channels = 256
        stage2_rnn_in_features = (audio_features + num_landmarks + stage1_fc_mid_channels) * \
                                 stage2_window_size // audio_window_size
        self.audio_window_size = audio_window_size
        self.stage2_window_size = stage2_window_size

        self.stage1_rnn = nn.RNN([
            nn.LSTMCell(units=stage1_rnn_hidden_size,
                        dropout=dropout_rate,
                        name="stage1_rnn{}".format(i + 1)) for i in range(3)
        ])

        self.lm_branch = VisemeDenseBranch(
            in_channels=(stage1_rnn_hidden_size + num_face_ids),
            out_channels_list=[stage1_fc_mid_channels, num_landmarks],
            data_format=data_format,
            name="lm_branch")
        self.ph_branch = VisemeDenseBranch(
            in_channels=(stage1_rnn_hidden_size + num_face_ids),
            out_channels_list=[stage1_fc_mid_channels, num_phonemes],
            data_format=data_format,
            name="ph_branch")

        self.cls_branch = VisemeRnnBranch(
            in_channels=stage2_rnn_in_features,
            out_channels_list=[256, 200, num_visemes],
            rnn_num_layers=1,
            dropout_rate=dropout_rate,
            data_format=data_format,
            name="cls_branch")
        self.reg_branch = VisemeRnnBranch(
            in_channels=stage2_rnn_in_features,
            out_channels_list=[256, 200, 100, num_visemes],
            rnn_num_layers=3,
            dropout_rate=dropout_rate,
            data_format=data_format,
            name="reg_branch")
        self.jali_branch = VisemeRnnBranch(in_channels=stage2_rnn_in_features,
                                           out_channels_list=[128, 200, 2],
                                           rnn_num_layers=3,
                                           dropout_rate=dropout_rate,
                                           data_format=data_format,
                                           name="jali_branch")
예제 #14
0
 def _build_encoder(self, encoder, layer_size, num_layers):
     if encoder is None:
         enc_rnn_cells = [
             layers.LSTMCell(layer_size, name=("enc_lstm_%d" % i))
             for (i, layer_size) in enumerate([layer_size] * num_layers)
         ]
         encoder = layers.RNN(enc_rnn_cells,
                              return_state=True,
                              return_sequences=True)
     return encoder
예제 #15
0
 def __init__(self):
     super().__init__()
     self.embed_layer = layers.Embedding(10,
                                         32,
                                         batch_input_shape=[None, None])
     self.rnncell = layers.LSTMCell(64)
     self.rnn_layer = layers.RNN(self.rnncell, return_sequences=True)
     self.dense1 = layers.Dense(64, activation='relu')
     self.dense2 = layers.Dense(32, activation='relu')
     self.dense3 = layers.Dense(10)
 def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0):
     super(Seq2SeqAttentionDecoder, self).__init__()
     self.attention = AdditiveAttention(num_hiddens=8, dropout=0.1)
     self.embed = layers.Embedding(input_dim=vocab_size, output_dim=embed_size)
     self.rnn = layers.RNN(
         layers.StackedRNNCells([layers.GRUCell(units=num_hiddens, dropout=dropout) for _ in range(num_layers)])
         , return_state=True
         , return_sequences=True
     )
     self.dense = layers.Dense(units=vocab_size)
예제 #17
0
    def __init__(self, config):
        super(DecoderOf, self).__init__(name="trajectory_decoder")
        self.rnn_type = config.rnn_type
        # Linear embedding of the encoding resulting observed trajectories
        self.traj_xy_emb_dec = layers.Dense(
            config.emb_size,
            activation=config.activation_func,
            name='trajectory_position_embedding')
        # RNN cell
        # Condition for cell type
        if self.rnn_type == 'gru':
            # GRU cell
            self.dec_cell_traj = layers.GRUCell(
                config.dec_hidden_size,
                recurrent_initializer='glorot_uniform',
                dropout=config.dropout_rate,
                recurrent_dropout=config.dropout_rate,
                name='trajectory_decoder_cell_with_GRU')
        else:
            # LSTM cell
            self.dec_cell_traj = layers.LSTMCell(
                config.dec_hidden_size,
                recurrent_initializer='glorot_uniform',
                name='trajectory_decoder_cell_with_LSTM',
                dropout=config.dropout_rate,
                recurrent_dropout=config.dropout_rate)
        # RNN layer
        self.recurrentLayer = layers.RNN(self.dec_cell_traj,
                                         return_sequences=True,
                                         return_state=True)
        # Dropout layer
        self.dropout = layers.Dropout(config.dropout_rate,
                                      name="dropout_decoder_h")
        # Mapping from h to positions
        self.h_to_xy = layers.Dense(config.P,
                                    activation=tf.identity,
                                    name='h_to_xy')

        # Input layers
        # Position input
        dec_input_shape = (1, config.P)
        self.input_layer_pos = layers.Input(dec_input_shape, name="position")
        enc_last_state_shape = (config.dec_hidden_size)
        # Proposals for inital states
        self.input_layer_hid1 = layers.Input(enc_last_state_shape,
                                             name="initial_state_h")
        self.input_layer_hid2 = layers.Input(enc_last_state_shape,
                                             name="initial_state_c")
        self.out = self.call((self.input_layer_pos, (self.input_layer_hid1,
                                                     self.input_layer_hid2)))
        # Call init again. This is a workaround for being able to use summary
        super(DecoderOf, self).__init__(inputs=[
            self.input_layer_pos, self.input_layer_hid1, self.input_layer_hid2
        ],
                                        outputs=self.out)
예제 #18
0
def create_decoder_complex(params: dict,
                           zs_sample: tf.Tensor,  # a sample from q(f)
                           z1: tf.Tensor,  # == z1 output
                           ext_input,  # Not implemented. Must be tensor (n_times, 0)
                           kernel_initializer: str = 'lecun_normal',
                           bias_initializer: str = 'zeros',
                           recurrent_regularizer: str = 'l2')\
        -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:

    if not params['dec_rnn_type'].lower().startswith('complex'):
        raise ValueError("Please use `create_generator` for non-complex cell")

    # LFADS' ComplexCell includes the z2 RNN, the generator RNN, and the to-factors Dense layer.
    # As the ComplexCell is run through a recurrent loop, it has a state similar to any RNN cell. However, the
    #  "complex state" includes the individual z2 RNN state and the generator RNN state. This can be confusing.
    # The initial "complex state" is zeros except the first portion which is a sample of q(f) provided in zs_sample.
    #  On this and subsequent steps, this part of the "complex state" containing zs_sample goes through a dropout
    #  layer and the to-factors Dense layer giving us prev_factors. prev_factors are then concatenated with z1 (z1
    #  is not a dist, obtained from z_enc input), run through dropout, and finally used as inputs to z2 RNN. The z2
    #  initial state is simply zeros. The z2 output is used to parameterize q(z_t).
    #  TODO: LFADS' z2 initial state is stored in a tf.Variable!
    # The generator inputs are a concatenation of a sample from q(z) and any external inputs if present. The
    #  generator initial state is the same sample from q(f) in f_enc used to calculate the prev_factors.
    # As a overly-simplified comparison with the other VAE formulations, we can say that the generator RNN
    #  gets its inputs from q(z) and its initial state from q(f).
    from indl.model.lfads.complex import ComplexCell
    custom_cell = ComplexCell(
        params['dec_rnn_units'],
        params['encd_rnn2_units'],
        params['n_factors'],
        params['zd_size'],
        params['ext_input_dim'],  # External input dimension.
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
        recurrent_regularizer=recurrent_regularizer,
        dropout=params['dropout_rate'],
        clip_value=params['gru_clip_value'])
    complex_rnn = tfkl.RNN(
        custom_cell,
        return_sequences=True,
        # recurrent_regularizer=tf.keras.regularizers.l2(l=gen_l2_reg),
        name='complex_rnn')
    # Get RNN inputs
    ext_input_do = tfkl.Dropout(params['dropout_rate'])(ext_input)
    complex_input = tfkl.Concatenate()([z1, ext_input_do])
    # Get the RNN init states
    complex_init_states = complex_rnn.get_initial_state(complex_input)
    complex_init_states[0] = tfkl.Dense(params['dec_rnn_units'])(zs_sample)

    complex_output = complex_rnn(complex_input,
                                 initial_state=complex_init_states)
    gen_outputs, z2_state, z_latent_mean, z_latent_logvar, q_z_sample, factors = complex_output

    # We change the order on the output to match the vanilla `create_generator` output first 2 elements.
    return gen_outputs, factors, z2_state, z_latent_mean, z_latent_logvar, q_z_sample
예제 #19
0
def RNNModel():
    cell = RNNCell([64, 32, 10])
    rnn = layers.RNN(cell)

    inputs = keras.Input((28, 28))
    x = layers.Flatten()(inputs)
    # x = inputs
    print(x)
    outputs = rnn(x)

    model = keras.models.Model(inputs, outputs)
    model.summary()
 def __init__(self, vocab_size, hidden_dim=10):
     super(WordRNN, self).__init__()
     # Hyperparameters
     self.hidden_dim = hidden_dim
     self.vocab_size = vocab_size
     self.embedding = layers.Embedding(vocab_size,
                                       EMBEDDING_SIZE,
                                       input_length=MAX_DOCUMENT_LENGTH)
     # Weight variables and RNN cell
     self.rnn = layers.RNN(tf.keras.layers.GRUCell(self.hidden_dim),
                           unroll=True)
     self.dense = layers.Dense(MAX_LABEL, activation=None)
예제 #21
0
def build_model_RNN(units,input_dim,output_size):

    RNN_layer = layers.RNN(
        layers.SimpleRNNCell(units), input_shape=(None, input_dim)
    )
    model = keras.models.Sequential(
        [
            RNN_layer,
            layers.BatchNormalization(),
            layers.Dense(output_size),
        ]
    )
    return model
예제 #22
0
def get_encoder(hidden_size,vocab_size,
                 num_tokens=7,nlayers=1,dropout=0.2,
                 bsize=57, msize=552, ssize=3190, dsize=404):
    len_input=keras.Input(shape=(),name='len',dtype=tf.int64)
    pieces_input=[keras.Input(shape=(num_pieces,),name='piece{}'.format(i+1)) for i in range(num_tokens)]

    embedding=layers.Embedding(vocab_size,200,mask_zero=True)
    pieces=[embedding(piece) for piece in pieces_input]
    cells=[layers.LSTMCell(hidden_size,dropout=dropout) for _ in range(nlayers-1)]
    cells.append(layers.LSTMCell(hidden_size))
    lstm=layers.RNN(cells,return_sequences=False,return_state=True,name='multi-lstm')



    state=lstm(pieces[0])[-1][-1]
    states=[state]

    pieces.remove(pieces[0])

    zero_state=tf.zeros_like(state)

    sent_len=tf.reshape(len_input,(-1,1))

    for i,piece in enumerate(pieces):
    # for piece in pieces:
        state=tf.where(i+1<sent_len,lstm(piece)[-1][-1],zero_state)
        # state= lstm(piece)[-1][-1]
        states.append(state)

    result=tf.math.add_n(states)
    
    #sent_len=tf.tile(len_input,[1,hidden_size])
    sent_len=tf.cast(sent_len,tf.float32)
    result=tf.divide(result,sent_len)

    feature=Sequential([
        layers.Dense(hidden_size,activations.relu),
        layers.Dropout(dropout),
        layers.Dense(hidden_size,activations.relu,name='final_feature')
    ],name='feature_seq')(result)

    bcate = layers.Dense(bsize,name='bcateid')(feature)
    mcate = layers.Dense(msize,name='mcateid')(feature)
    scate = layers.Dense(ssize,name='scateid')(feature)
    dcate = layers.Dense(dsize,name='dcateid')(feature)

    inputs=[len_input]+pieces_input

    model=Model(inputs=inputs,outputs=[bcate,mcate,scate,dcate])
    return model
예제 #23
0
    def build(self, input_shape):
        # We expect to receive (X, A)
        # A - Attention (may be 2 matrices if we use reverse
        #                diffusion) (, N, N) or (, 2, N, N)
        # X - graph signal (, N, F)

        # getting number of nodes N, again.
        x_shape = input_shape[0]
        self.N = x_shape[-2]

        # d
        self.cell = GRUCell(self.N, self.F_h, self.K)

        self.RNN = layers.RNN(self.cell, self.kwargs)
예제 #24
0
def get_encoder(hidden_size,vocab_size,
                 num_tokens=7,nlayers=2,dropout=0.2,
                 bsize=57, msize=552, ssize=3190, dsize=404):
    len_input=keras.Input(shape=(),name='len',dtype=tf.int64)
    pieces_input=[keras.Input(shape=(num_pieces,),name='piece{}'.format(i+1)) for i in range(num_tokens)]
    img_input=keras.Input(shpae=(2048,),name='img')

    embedding=layers.Embedding(vocab_size,hidden_size,mask_zero=True)
    pieces=[embedding(piece) for piece in pieces_input]
    cells=[layers.GRUCell(hidden_size,dropout=dropout) for _ in range(nlayers-1)]
    cells.append(layers.LSTMCell(hidden_size))
    lstm=layers.RNN(cells,return_sequences=False,return_state=True,name='multi-gru')



    state=lstm(pieces[0])
    states=[state[-1][-1]]

    pieces.remove(pieces[0])

    for piece in pieces:
        state=lstm(piece)
        states.append(state[-1][-1])

    result=tf.math.add_n(states)
    sent_len=tf.reshape(len_input,(-1,1))
    #sent_len=tf.tile(len_input,[1,hidden_size])
    sent_len=tf.cast(sent_len,tf.float32)
    text_feat=tf.divide(result,sent_len,name='text_feature')

    img_feat=layers.Dense(hidden_size,activations.relu,name='img_feature')(img_input)

    text_plus_img=layers.concat([text_feat,img_feat],1)

    feature=Sequential([
        layers.Dense(hidden_size,activations.relu),
        layers.Dropout(dropout),
        layers.Dense(hidden_size,activations.relu,name='final_feature')
    ],name='feature_seq')(text_plus_img)

    bcate = layers.Dense(bsize,name='bcateid')(feature)
    mcate = layers.Dense(msize,name='mcateid')(feature)
    scate = layers.Dense(ssize,name='scateid')(feature)
    dcate = layers.Dense(dsize,name='dcateid')(feature)

    inputs=[len_input,img_input]+pieces_input

    model=Model(inputs=inputs,outputs=[bcate,mcate,scate,dcate])
    return model
예제 #25
0
 def __init__(self,
              emb_dim,
              lstm_dim,
              vocal_size,
              NEG,
              maxlen,
              dropout=0.0):
     super(LSTM_DSSM, self).__init__()
     self.word2emb = layers.Embedding(vocal_size, emb_dim, mask_zero=True)
     self.lstm = layers.Bidirectional(layers.RNN(
         (tf.keras.experimental.PeepholeLSTMCell(
             lstm_dim, dropout=dropout, recurrent_dropout=dropout)),
         time_major=False),
                                      merge_mode='ave')
     self.NEG = NEG
     self.maxlen = maxlen
예제 #26
0
 def __init__(self, config):
     super(SocialEncoder, self).__init__(name="social_encoder")
     # Linear embedding of the social part
     self.traj_social_emb_enc = layers.Dense(config.emb_size,
         activation=config.activation_func,
         name='social_feature_embedding')
     # LSTM cell, including dropout
     self.lstm_cell = layers.LSTMCell(config.enc_hidden_size,
         name   = 'social_encoder_cell',
         dropout= config.dropout_rate,
         recurrent_dropout= config.dropout_rate)
     # Recurrent neural network using the previous cell
     self.lstm      = layers.RNN(self.lstm_cell,
         name   = 'social_encoder_rnn',
         return_sequences= True,
         return_state    = True)
예제 #27
0
    def _neighbor_model(self, neigh_vecs):
        dims = tf.shape(neigh_vecs)
        batch_size = dims[0]
        initial_state = self.cell.zero_state(batch_size, tf.float32)
        used = tf.sign(tf.reduce_max(tf.abs(neigh_vecs), axis=2))
        length = tf.reduce_sum(used, axis=1)
        length = tf.maximum(length, tf.constant(1.))
        length = tf.cast(length, tf.int32)

        rnn_outputs = layers.RNN(self.cell, time_major=False)(neigh_vecs, initial_state=initial_state)

        batch_size = tf.shape(rnn_outputs)[0]
        max_len = tf.shape(rnn_outputs)[1]
        out_size = int(rnn_outputs.get_shape()[2])
        index = tf.range(0, batch_size) * max_len + (length - 1)
        flat = tf.reshape(rnn_outputs, [-1, out_size])
        return tf.gather(flat, index)
예제 #28
0
 def __init__(self, inter_neurons, command_neurons, motor_neurons,
              sensory_fanout, inter_fanout, recurrent_command_synapses,
              motor_fanin):
     self._name = 'ncp_layer'
     ncp_arch = kncp.wirings.NCP(
         inter_neurons=inter_neurons,  # Number of inter neurons
         command_neurons=command_neurons,  # Number of command neurons
         motor_neurons=motor_neurons,  # Number of motor neurons
         sensory_fanout=
         sensory_fanout,  # How many outgoing synapses has each sensory neuron
         inter_fanout=
         inter_fanout,  # How many outgoing synapses has each inter neuron
         recurrent_command_synapses=recurrent_command_synapses,
         # Now many recurrent synapses are in the command neuron layer
         motor_fanin=
         motor_fanin,  # How many incomming syanpses has each motor neuron
     )
     self._ncp_cell = tfkl.RNN(kncp.LTCCell(ncp_arch),
                               return_sequences=True)
예제 #29
0
    def __init__(self, hp, name='taco2_encoder'):
        super(Tacotron2Encoder, self).__init__(name=name)
        # embedding layer
        self.embed_layer = layers.Embedding(hp.num_symbols,
                                            hp.embedding_dim,
                                            mask_zero=True)

        # 3-layer conv1d
        cnns_num, ksize, channels = hp.encoder_cnns
        self.cnns = [
            cs.ConvBlock('cabd', '1D', channels, ksize, hp.dropout_rate)
            for i in range(cnns_num)
        ]

        # 1-layer bi-lstm
        units, zo_rate = hp.encoder_rnns_units, hp.zoneout_rate
        single_layer = layers.RNN(cs.ZoneoutLSTMCell(units, zo_rate),
                                  return_sequences=True)
        # with mask, outputs zero for time step that mask is 0
        self.rnn = layers.Bidirectional(single_layer, name='bilstm')
예제 #30
0
    def __init__(self,
                 in_channels,
                 out_channels_list,
                 rnn_num_layers,
                 dropout_rate,
                 data_format="channels_last",
                 **kwargs):
        super(VisemeRnnBranch, self).__init__(**kwargs)
        assert (in_channels is not None)

        self.rnn = nn.RNN([
            nn.LSTMCell(units=out_channels_list[0],
                        dropout=dropout_rate,
                        name="rnn{}".format(i + 1))
            for i in range(rnn_num_layers)
        ])
        self.fc_branch = VisemeDenseBranch(
            in_channels=out_channels_list[0],
            out_channels_list=out_channels_list[1:],
            data_format=data_format,
            name="fc_branch")