Пример #1
0
def generate_model(width, height):
    model = Sequential()

    model.add(
        Dense(width * height, input_shape=(width, height),
              activation='linear'))

    model.add(
        Bidirectional(SimpleRNN((height + 1) * (width + 1),
                                input_shape=(width, height),
                                activation='relu',
                                return_sequences=True,
                                recurrent_initializer='random_uniform',
                                unroll=True),
                      merge_mode='sum'))
    model.add(
        SimpleRNN(height * width,
                  input_shape=(width, height),
                  activation='relu',
                  return_sequences=True,
                  recurrent_initializer='random_uniform',
                  unroll=True))
    model.add(Dense((width + 1) * (height + 1), activation='relu'))
    #model.add(Dropout(0.1))
    #model.add(BatchNormalization(momentum=0.995))
    model.add(Dense(height, activation='sigmoid'))

    model.summary()
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
Пример #2
0
def construct_model(maxlen, input_dimension, output_dimension,
                    lstm_vector_output_dim):
    """
        Склеены три слова
    """
    input = Input(shape=(maxlen, input_dimension), name='input')

    # lstm_encode = LSTM(lstm_vector_output_dim)(input)
    lstm_encode = SimpleRNN(lstm_vector_output_dim,
                            activation='sigmoid')(input)

    encoded_copied = RepeatVector(n=maxlen)(lstm_encode)

    # lstm_decode = LSTM(output_dim=output_dimension, return_sequences=True, activation='softmax')(encoded_copied)
    lstm_decode = SimpleRNN(output_dim=output_dimension,
                            return_sequences=True,
                            activation='softmax')(encoded_copied)

    decoded = TimeDistributed(Dense(output_dimension,
                                    activation='softmax'))(lstm_decode)

    encoder_decoder = Model(input, decoded)

    adam = Adam()
    encoder_decoder.compile(loss='categorical_crossentropy', optimizer=adam)

    return encoder_decoder
Пример #3
0
def create_model():
    model = Sequential()
    model.add(SimpleRNN(X_train.shape[1], input_dim=X_train.shape[1]))
    model.add(Activation('relu'))
    model.add(SimpleRNN(20000))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(SimpleRNN(nb_classes))
    model.add(Activation('softmax'))
    model.compile(loss=loss, optimizer=optim, metrics=['accuracy'])
    return model
Пример #4
0
def build_stacked_rnn(dx, dh, do, length, weights=None):
    model = Sequential()
    model.add(SimpleRNN(dh, input_dim=dx, return_sequences=True))
    model.add(SimpleRNN(
        do,
        input_dim=dh,
        return_sequences=True,
    ))
    if weights is not None:
        model.set_weights(weights)
    return model
Пример #5
0
def get_simplernn(units):
    model = Sequential()
    model.add(
        SimpleRNN(units[1], input_shape=(units[0], 1), return_sequences=True))
    model.add(SimpleRNN(units[2], return_sequences=True))
    model.add(SimpleRNN(units[2], return_sequences=True))
    model.add(SimpleRNN(units[2]))
    model.add(Dropout(0.2))
    model.add(Dense(units[3], activation='sigmoid'))

    return model
Пример #6
0
 def BiRNN_model(self, conf, arm_shape):
     road_num = arm_shape[0]
     input_x = Input((road_num, conf.observe_length, 1))
     output = MyReshape(conf.batch_size)(input_x)
     # output = SimpleRNN(32, return_sequences=True)(output)
     output1 = SimpleRNN(conf.observe_length)(output)
     output2 = SimpleRNN(conf.observe_length, go_backwards=True)(output)
     output = Add()([output1, output2])
     # output = Dropout(0.1)(output)
     output = Dense(conf.predict_length, activation="tanh")(output)
     output = MyInverseReshape(conf.batch_size)(output)
     model = Model(inputs=input_x, outputs=output)
     return model
Пример #7
0
def build_simple_model(dropout = 0.05):
    model = Sequential()
    #input layer and first (only in this model) 
    model.add(SimpleRNN(units = 200, return_sequences = True, input_shape = (200, len(TILES))))
    model.add(Dropout(dropout))
    model.add(SimpleRNN(units = 200, return_sequences = True))
    model.add(Dropout(dropout))
    model.add(SimpleRNN(units = 200, return_sequences = False))
    model.add(Dropout(dropout))
    #ouput layers
    model.add(Dense(units = len(TILES)))
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    return model
def model_stateless_RNN(model, dim_in, dim_out):
    model_stateless = Sequential()
    model_stateless.add(
        SimpleRNN(input_shape=(None, dim_in), return_sequences=True,
                  units=100))
    model_stateless.add(Dropout(0.3))
    model_stateless.add(
        SimpleRNN(input_shape=(None, dim_in), return_sequences=True, units=50))
    model_stateless.add(Dropout(0.3))
    model_stateless.add(
        TimeDistributed(Dense(activation='linear', units=dim_out)))
    model_stateless.compile(loss='mse', optimizer='adam')
    model_stateless.set_weights(model.get_weights())
    return model_stateless
def rnn(plot=False, classes=4):
    model_RNN = Sequential()
    model_RNN.add(
        Embedding(output_dim=128,
                  input_dim=len(tokenizer.word_index),
                  input_length=max_length))
    model_RNN.add(SimpleRNN(60, return_sequences=True))
    model_RNN.add(Dropout(0.7))
    model_RNN.add(SimpleRNN(30))
    model_RNN.add(Dense(units=32, activation='relu'))
    model_RNN.add(Dense(units=classes, activation='softmax'))
    print(model_RNN.summary())
    if plot:
        plot_model(model_RNN, to_file='model_RNN.png', show_shapes=True)
    return model_RNN
Пример #10
0
    def __init__(self, *args, **kwargs):
        super(TestRecursive, self).__init__(*args, **kwargs)
        self.input_dim = 2
        self.state_dim = 2
        self.model = Recursive(return_sequences=True)
        self.model.add_input('input', ndim=3)  # Input is 3D tensor
        self.model.add_state('h', dim=self.state_dim)
        self.model.add_node(Dense(self.input_dim + self.state_dim,
                                  self.state_dim,
                                  init='one'),
                            name='rec',
                            inputs=['input', 'h'],
                            return_state='h')
        self.model.add_node(Activation('linear'),
                            name='out',
                            input='rec',
                            create_output=True)

        self.model2 = Sequential()
        self.model2.add(
            SimpleRNN(input_dim=self.input_dim,
                      activation='linear',
                      inner_init='one',
                      output_dim=self.state_dim,
                      init='one',
                      return_sequences=True))
Пример #11
0
    def build_train_on_batch(self):

        ## first layer for the input (x_t)
        x = Input(batch_shape=(None, None, self.input_dim), name='x')
        masked = (Masking(mask_value=-1,
                          input_shape=(None, None, self.input_dim)))(x)
        lstm_out = SimpleRNN(self.hidden_layer_size,
                             input_shape=(None, None, self.input_dim),
                             return_sequences=True)(masked)
        dense_out = Dense(self.input_dim_order,
                          input_shape=(None, None, self.hidden_layer_size),
                          activation='sigmoid')(lstm_out)
        y_order = Input(batch_shape=(None, None, self.input_dim_order),
                        name='y_order')
        merged = multiply([dense_out, y_order])

        def reduce_dim(x):
            x = K.max(x, axis=2, keepdims=True)
            return x

        def reduce_dim_shape(input_shape):
            shape = list(input_shape)
            shape[-1] = 1
            print("reduced_shape", shape)
            return tuple(shape)

        earlyStopping = EarlyStopping(monitor='val_loss',
                                      patience=2,
                                      verbose=0,
                                      mode='auto')
        reduced = Lambda(reduce_dim, output_shape=reduce_dim_shape)(merged)
        self.model = Model(inputs=[x, y_order], outputs=reduced)
        self.model.compile(optimizer='rmsprop',
                           loss='binary_crossentropy',
                           metrics=['accuracy'])
Пример #12
0
def trainer3(X, Y, N, maxlen):
    X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=0.1)
    print(X_train.shape, X_validation.shape, Y_train.shape, Y_validation.shape)

    n_in = 2
    n_hidden = 100
    n_out = 1

    epochs = 200
    batch_size = 100

    model = Sequential([
        SimpleRNN(n_hidden, input_shape=(maxlen, n_in)),
        Dense(n_out),
        Activation('linear')
    ])
    model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999))
    execute_train(model, X_train, Y_train, batch_size, epochs, X_validation, Y_validation, 'loss_rnn.png')

    print ('predict')
    for i in range(10, 20):
        expected = i
        predicted =  model.predict(sin_predict(shift=0, T=expected))
        print('expected:', expected, 'predict:', predicted)

    N = 10000
    maxlen = 100
    X, Y = row_data(N, maxlen)
Пример #13
0
  def build_model(self, params):
    hidden_layers = params['hidden_layers']
    input_dim = params['feat_size']
    output_dim = params['phone_vocab_size']
    drop_prob = params['drop_prob_encoder']
    self.nLayers = len(hidden_layers)

    # First Layer is an encoder layer
    
    self.model.add(TimeDistributedDense(hidden_layers[0], init='glorot_uniform', input_dim=input_dim))
    self.model.add(Dropout(drop_prob))
    
    # Second Layer is the Recurrent Layer 
    if params.get('recurrent_type','simple') == 'simple':
        self.model.add(SimpleRNN(hidden_layers[1], init='glorot_uniform', inner_init='orthogonal',
            activation='sigmoid', weights=None, truncate_gradient=-1, return_sequences=False, 
            input_dim=hidden_layers[0], input_length=None))
    elif params.get('recurrent_type','simple') == 'lstm':
        self.model.add(LSTM(hidden_layers[1], init='glorot_uniform', inner_init='orthogonal',
            input_dim=hidden_layers[0], input_length=None))

    # Then we add dense projection layer to map the RNN outputs to Vocab size 
    self.model.add(Dropout(drop_prob))
    self.model.add(Dense(output_dim, input_dim=hidden_layers[1], init='uniform'))
    self.model.add(Activation('softmax'))
  
    self.solver = getSolver(params)
    self.model.compile(loss='categorical_crossentropy', optimizer=self.solver)
    #score = model.evaluate(test_x)
    self.f_train = self.model.train_on_batch

    return self.f_train
Пример #14
0
def build_test_rnn_mse(dx, dh, do, weights=None):
    model = Sequential()
    model.add(SimpleRNN(dh, input_dim=dx, return_sequences=True))
    model.add(TimeDistributed(Dense(do)))
    if weights is not None:
        model.set_weights(weights)
    return model
Пример #15
0
def RNN():
    model = Sequential()
    result = []
    (x_train, y_train), (x_test, y_test) = get_data()
    model.add(
        Embedding(output_dim=32,
                  input_dim=x_train.shape[0],
                  input_length=x_train.shape[1]))
    model.add(Dropout(0.25))
    model.add(SimpleRNN(units=32))
    model.add(Dense(units=256, activation='relu'))
    model.add(Dropout(0.25))
    model.add(Dense(units=1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.fit(x_train,
              y_train,
              batch_size=30,
              epochs=1,
              verbose=2,
              validation_split=0.2)
    y_pred = model.predict_classes(x_test)
    result.append(accuracy_score(y_test, y_pred))

    print('RNN done')
    return result
Пример #16
0
    def create_model(self):
        if self.saved_model is not None:
            self.model = load_model(self.saved_model)
        else:
            input_cnn = Input(shape=(90, 160, 3))
            input_seq = Input(shape=(self.seq_len, 90, 160, 3))

            x = Convolution2D(self.nb_filters[0], 3, 3, border_mode="same")(input_cnn)
            #x = Activation(self.activation)(x)
            x = MaxPooling2D((2, 2))(x)
            #if self.dropout > 0:
            #    x = Dropout(self.dropout)(x)
            for nb_filter in self.nb_filters[1:]:
                x = Convolution2D(nb_filter, 3, 3, border_mode="same")(x)
                #x = Activation(self.activation)(x)
                x = MaxPooling2D((2, 2))(x)
                #if self.dropout > 0:
                #    x = Dropout(self.dropout)(x)
            x = Flatten()(x)

            cnn_model = Model(input=input_cnn,output=x)

            self.model = Sequential()

            self.model.add(TimeDistributed(cnn_model,input_shape=(self.seq_len, 90, 160, 3)))
            self.model.add(SimpleRNN(self.n_classes, return_sequences=True, activation='softmax'))
            #x = Dense(self.n_classes, activation='softmax')(x)


            self.model.compile(loss='categorical_crossentropy',
                               optimizer=self.optimizer,
                               metrics=['accuracy', 'fmeasure', 'categorical_accuracy'],
                               learning_rate=self.learning_rate)

            self.model.summary()
def make_model(factors, answers, input_dim):
    model = Sequential()
    model.add(
        SimpleRNN(n_hidden_unit,
                  batch_input_shape=(None, affect_length, input_dim),
                  return_sequences=False))
    # model.add(LSTM(n_hidden_unit, batch_input_shape=(None, affect_length, input_dim), return_sequences=False))

    model.add(Dense(output_dim=10, input_dim=input_dim))
    model.add(Activation("relu"))
    model.add(Dense(output_dim=1, input_dim=10))
    model.add(Activation("linear"))
    optimizer = Adam(lr=lr)
    model.compile(loss="mean_absolute_error", optimizer=optimizer)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   mode='auto',
                                   patience=30)
    ipdb.set_trace()
    model.fit(factors,
              answers,
              batch_size=1,
              epochs=1,
              validation_split=0.2,
              callbacks=[early_stopping])
    pred = model.predict(factors)
    return pred
Пример #18
0
def create_rnn_model(X, Y, embedding_size, num_nodes, act, dropout=0.5):
    Y = to_categorical(Y)
    X = numpy.asarray(X)
    print "train X shape: " + str(X.shape)

    print "RNN: nodes: " + str(num_nodes) + " embedding: " + str(
        embedding_size)
    #print "vocab: " + str(vocab_size)
    print "max_seq_len: " + str(max_seq_len)
    # TEMP for no embedding
    embedding_size = 200
    nn = Sequential(
        [  #Embedding(vocab_size, embedding_size, input_length=max_seq_len, mask_zero=False),
            SimpleRNN(num_nodes,
                      activation=act,
                      return_sequences=False,
                      input_shape=(
                          max_seq_len,
                          embedding_size)),  # Dense(200, activation='tanh'),
            #LSTM(256, input_dim=200, activation='sigmoid', inner_activation='hard_sigmoid'),
            Dropout(dropout),
            Dense(Y.shape[1], activation='softmax')
        ])
    nn.compile(optimizer='rmsprop',
               loss='categorical_crossentropy',
               metrics=['accuracy'])

    nn.fit(X, Y)
    nn.summary()
    return nn, X, Y
Пример #19
0
def model_RNN_FFNN(maxLendesc, vocab_size):
    # Image feature extraction
    in1 = Input(shape=(2048, ))
    featureExtracted1 = Dropout(0.5)(in1)
    featureExtracted2 = Dense(256, activation='relu')(featureExtracted1)

    # Captions
    in2 = Input(shape=(maxLendesc, ))
    sentenceExtraction1 = Embedding(vocab_size, 256, mask_zero=True)(
        in2)  # input_dim = vocab_size, output_dim = 256
    sentenceExtraction2 = Dropout(0.5)(sentenceExtraction1)
    sentenceExtraction3 = SimpleRNN(256)(sentenceExtraction2)

    # Input to the feed forward NN
    ff1 = add([featureExtracted2, sentenceExtraction3])
    ff2 = Dense(256, activation="relu")(ff1)
    ff_out = Dense(vocab_size, activation="softmax")(ff2)

    model = Model(
        inputs=[in1, in2],
        outputs=ff_out)  # All the inputs required to compute the output
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])  #Configures the model for training

    print(model.summary())
    return model
Пример #20
0
def baseline_model(nb_units=50):
    img_rows = 28
    img_cols = 28

    # create model
    model = Sequential()

    # Recurrent layers supported: SimpleRNN, LSTM, GRU:
    model.add(SimpleRNN(nb_units, input_shape=(img_rows, img_cols)))

    # To stack multiple RNN layers, all RNN layers except the last one need
    # to have "return_sequences=True".  An example of using two RNN layers:
    #model.add(SimpleRNN(16,
    #                    input_shape=(img_rows, img_cols),
    #                    return_sequences=True))
    #model.add(SimpleRNN(32))

    model.add(Dense(units=num_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print(model.summary())
    return model
Пример #21
0
def build_RNN_model(vocab_size, embedding_dims, rnn_layer_dim, num_classes):
    """Build the RNN model"""
    model = Sequential()  # Sequential model
    # Embedding layer
    model.add(Embedding(vocab_size, embedding_dims))
    # Recurrent layer
    model.add(
        SimpleRNN(int(rnn_layer_dim),
                  init='glorot_uniform',
                  inner_init='orthogonal',
                  activation='tanh',
                  W_regularizer=None,
                  U_regularizer=None,
                  b_regularizer=None,
                  dropout_W=0.0,
                  dropout_U=0.0,
                  return_sequences=True,
                  stateful=False))
    # Time distributed dense layer (activation is softmax, since it is a classification problem)
    model.add(
        TimeDistributedDense(num_classes,
                             init='glorot_uniform',
                             activation='softmax'))

    return model
Пример #22
0
    def construct_rnn(self):
        # define the model
        print('Build model Lstm2')
        inputs = Input(shape=(self.input_dim, ))
        if self.embedding == "keras":
            l1 = Embedding(self.max_words,
                           self.embedding_dims,
                           input_length=self.p.input_dim)(inputs)
            l1 = SpatialDropout1D(Dropout(self.dropout))(l1)
        else:
            l1 = inputs
        outputs = SimpleRNN(self.input_dim * 20,
                            init='glorot_uniform',
                            inner_init='orthogonal',
                            activation='tanh',
                            W_regularizer=None,
                            U_regularizer=None,
                            b_regularizer=None,
                            dropout_W=0.0,
                            dropout_U=0.0)(l1)

        outputs = Dense(self.outputdim,
                        activation=self.activation_sortie,
                        kernel_initializer='normal')(outputs)
        self.model = Model(inputs=inputs, outputs=outputs)
Пример #23
0
def build_model(input_shape, hidden_layer_count):
    model = Sequential()
    model.add(SimpleRNN(hidden_layer_count, input_shape=input_shape))
    model.add(Dense(input_shape[1]))
    model.add(Activation('linear'))
    model.compile(loss='mse', optimizer=Adam())
    return model
Пример #24
0
    def get_model(self, normalize=False):
        model = Sequential()
        nlayers = 1

        if 'layers' in self.parameters.keys():
            nlayers = self.parameters['layers']

        dropout = [0.2] * nlayers
        if 'dropout' in self.parameters.keys():
            dropout = self.parameters['dropout']

        if self.nn_type == 'RNN':
            model.add(SimpleRNN(512, input_shape=self.x_train[0].shape, activation='relu'))
            model.add(Dropout(dropout[0]))
            if 'normalization' in self.parameters.keys():
                if self.parameters['normalization'] == True:
                    model.add(BatchNormalization())
        
        if self.nn_type == 'LSTM':
            for i in range(nlayers-1):
                model.add(LSTM(128, input_shape=self.x_train[0].shape, activation='relu', return_sequences=True))
                model.add(Dropout(dropout[i]))

            model.add(LSTM(128, input_shape=self.x_train[0].shape, activation='relu'))
            model.add(Dropout(dropout[nlayers-1]))

            return

        model.add(Dense(self.num_classes, activation='softmax'))
        model.summary()

        return model
Пример #25
0
def rnn(nb_chars, length, input_chars, nums_interation, char2index,
        index2char):
    # 构建模型
    model = Sequential()
    model.add(
        SimpleRNN(units=128,
                  return_sequences=False,
                  input_shape=(length, nb_chars),
                  unroll=True))
    model.add(Dense(nb_chars, activation="softmax"))
    # 编译模型
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    # 训练模型
    for iteration in range(nums_interation):
        print("=" * 50)
        print("Iteration :", iteration)
        model.fit(x, y, batch_size=128, epochs=1)

        test_idx = np.random.randint(len(input_chars))
        test_chars = input_chars[test_idx]
        print("Generating from seed :", test_chars)
        print(test_chars, end="")
        for t in range(100):
            xtest = np.zeros((1, length, nb_chars), dtype=np.bool)
            for i, ch in enumerate(test_chars):
                xtest[0, i, char2index[ch]] = 1
            pred = model.predict(xtest, verbose=0)
            ypred = index2char[np.argmax(pred)]
            print(ypred, end="")
            # 使用test_chars + ypred继续作为输入
            test_chars = test_chars[1:] + ypred
        # 换行
        print()
Пример #26
0
def build_softmax_rnn(dx, dh, do, length, weights=None):
    model = Sequential()
    model.add(SimpleRNN(dh, input_dim=dx, return_sequences=True))
    model.add(TimeDistributed(Dense(do), activation='softmax'))

    if weights is not None:
        model.set_weights(weights)
    return model
Пример #27
0
    def rnn(self):
        start_cr_a_fit_net = time.time()
        self.split_dataset_rnn()

        rnn_model = Sequential()

        # RNN层设计
        rnn_model.add(
            SimpleRNN(15,
                      input_shape=(None, self.look_back),
                      return_sequences=True))
        rnn_model.add(
            SimpleRNN(10,
                      input_shape=(None, self.look_back),
                      return_sequences=True))
        # SN层
        if self.isdropout:
            rnn_model.add(SwitchNormalization(axis=-1))
        rnn_model.add(
            SimpleRNN(15,
                      input_shape=(None, self.look_back),
                      return_sequences=True))
        rnn_model.add(SimpleRNN(10, input_shape=(None, self.look_back)))
        rnn_model.add(Dense(1))
        # dropout层
        if self.isdropout:
            rnn_model.add(GaussianDropout(0.2))

        rnn_model.summary()
        rnn_model.compile(loss='mean_squared_error', optimizer='adam')
        rnn_model.fit(self.x_train,
                      self.y_train,
                      epochs=self.epochs,
                      batch_size=self.batch_size,
                      verbose=1)
        end_cr_a_fit_net = time.time() - start_cr_a_fit_net
        print(
            'Running time of creating and fitting the RNN network: %.2f Seconds'
            % (end_cr_a_fit_net))

        # LSTM prediction/LSTM进行预测
        trainPredict = rnn_model.predict(
            self.x_train)  # Predict by training data set/训练集预测
        testPredict = rnn_model.predict(
            self.x_test)  # Predict by test data set/测试集预测
        return trainPredict, testPredict, self.y_train, self.y_test
Пример #28
0
def build_train_rnn_mse(dx, dh, do, span=1, weights=None, batch_size=2):
    model = Sequential()
    model.add(SimpleRNN(dh, input_dim=dx, return_sequences=False))
    model.add(Dense(do))

    if weights is not None:
        model.set_weights(weights)
    return model
Пример #29
0
def buildRNN(units, steps, out_dim, act, opt, los):
    print('Build RNN...')
    model = Sequential()
    model.add(SimpleRNN(out_dim, input_shape=(steps, out_dim)))

    model.compile(loss='mean_absolute_error', optimizer='rmsprop')
    model.summary()
    return model
Пример #30
0
def buildRNN(timesteps, data_dim, out_dim):
    print('Build RNN...')
    model = Sequential()
    model.add(SimpleRNN(out_dim, input_shape=(timesteps, data_dim)))

    model.compile(loss='mean_absolute_error', optimizer='rmsprop')

    return model
Пример #31
0
    def test_basic(self):
        """Just check that the Bidirectional layer can compile and run"""
        nb_samples, timesteps, input_dim, output_dim = 3, 3, 10, 5

        for ret_seq in [True, False]:
            rnn1 = SimpleRNN(output_dim, return_sequences=ret_seq,
                             input_shape=(None, input_dim))
            rnn2 = SimpleRNN(output_dim, return_sequences=ret_seq,
                             input_shape=(None, input_dim))
            layer = Bidirectional(rnn1, rnn2, return_sequences=ret_seq)
            layer.input = theano.shared(value=np.ones((nb_samples, timesteps, input_dim)))
            rnn1.input = layer.input
            rnn2.input = layer.input
            _ = layer.get_config()

            for train in [True, False]:
                out = layer.get_output(train).eval()
                # Make sure the output has the desired shape
                if ret_seq:
                    assert(out.shape == (nb_samples, timesteps, output_dim*2))
                else:
                    assert(out.shape == (nb_samples, output_dim*2))
                _ = layer.get_output_mask(train)