Exemple #1
0
 def build_critic(self):
     """Build critic network
     
     recieve convereted tensor: raw_data, smooted_data, and downsampled_data
     """
     # lower layer
     lower_model = [
         self.build_network(self.model_config['critic_lower'],
                            input_shape=(self.history_length, self.n_stock,
                                         1))
         for _ in range(1 + self.n_smooth + self.n_down)
     ]
     merged = Merge(lower_model, mode='concat')
     # upper layer
     upper_model = self.build_network(self.model_config['critic_upper'],
                                      model=merged)
     # action layer
     action = self.build_network(self.model_config['critic_action'],
                                 input_shape=(self.n_stock, ),
                                 is_conv=False)
     # output layer
     merged = Merge([upper_model, action], mode='mul')
     model = Sequential()
     model.add(merged)
     model.add(Dense(1))
     return model
Exemple #2
0
    def build(self):
        print("building the multiplication model")
        enc_size = self.size_of_env_observation()
        argument_size = IntegerArguments.size_of_arguments
        input_enc = InputLayer(batch_input_shape=(self.batch_size, enc_size), name='input_enc')
        input_arg = InputLayer(batch_input_shape=(self.batch_size, argument_size), name='input_arg')
        input_prg = Embedding(input_dim=PROGRAM_VEC_SIZE, output_dim=PROGRAM_KEY_VEC_SIZE, input_length=1,
                              batch_input_shape=(self.batch_size, 1))

        f_enc = Sequential(name='f_enc')
        f_enc.add(Merge([input_enc, input_arg], mode='concat'))
        f_enc.add(MaxoutDense(128, nb_feature=FIELD_ROW))
        self.f_enc = f_enc

        program_embedding = Sequential(name='program_embedding')
        program_embedding.add(input_prg)

        f_enc_convert = Sequential(name='f_enc_convert')
        f_enc_convert.add(f_enc)
        f_enc_convert.add(RepeatVector(1))

        f_lstm = Sequential(name='f_lstm')
        f_lstm.add(Merge([f_enc_convert, program_embedding], mode='concat'))
        f_lstm.add(LSTM(256, return_sequences=False, stateful=True, W_regularizer=l2(0.0000001)))
        f_lstm.add(Activation('relu', name='relu_lstm_1'))
        f_lstm.add(RepeatVector(1))
        f_lstm.add(LSTM(256, return_sequences=False, stateful=True, W_regularizer=l2(0.0000001)))
        f_lstm.add(Activation('relu', name='relu_lstm_2'))
        plot(f_lstm, to_file='f_lstm.png', show_shapes=True)

        f_end = Sequential(name='f_end')
        f_end.add(f_lstm)
        f_end.add(Dense(1, W_regularizer=l2(0.001)))
        f_end.add(Activation('sigmoid', name='sigmoid_end'))

        f_prog = Sequential(name='f_prog')
        f_prog.add(f_lstm)
        f_prog.add(Dense(PROGRAM_KEY_VEC_SIZE, activation="relu"))
        f_prog.add(Dense(PROGRAM_VEC_SIZE, W_regularizer=l2(0.0001)))
        f_prog.add(Activation('softmax', name='softmax_prog'))
        plot(f_prog, to_file='f_prog.png', show_shapes=True)

        f_args = []
        for ai in range(1, IntegerArguments.max_arg_num+1):
            f_arg = Sequential(name='f_arg%s' % ai)
            f_arg.add(f_lstm)
            f_arg.add(Dense(IntegerArguments.depth, W_regularizer=l2(0.0001)))
            f_arg.add(Activation('softmax', name='softmax_arg%s' % ai))
            f_args.append(f_arg)
        plot(f_arg, to_file='f_arg.png', show_shapes=True)

        self.model = Model([input_enc.input, input_arg.input, input_prg.input],
                           [f_end.output, f_prog.output] + [fa.output for fa in f_args],
                           name="npi")
        self.compile_model()
        plot(self.model, to_file='model.png', show_shapes=True)
def create_base_network(input_dim):
    '''
    Base network for feature extraction.
    '''
    input = Input(shape=(input_dim, ))
    dense1 = Dense(128)(input)
    bn1 = BatchNormalization(mode=2)(dense1)
    relu1 = Activation('relu')(bn1)

    dense2 = Dense(128)(relu1)
    bn2 = BatchNormalization(mode=2)(dense2)
    res2 = merge([relu1, bn2], mode='sum')
    relu2 = Activation('relu')(res2)

    dense3 = Dense(128)(relu2)
    bn3 = BatchNormalization(mode=2)(dense3)
    res3 = Merge(mode='sum')([relu2, bn3])
    relu3 = Activation('relu')(res3)

    feats = merge([relu3, relu2, relu1], mode='concat')
    bn4 = BatchNormalization(mode=2)(feats)

    model = Model(input=input, output=bn4)

    return model
Exemple #4
0
 def create_emb_layer(self):
     iw = Input(shape=(self.max_ngram_num, ),
                dtype='int32',
                name="inputword")
     emb_in = embeddings.Embedding(output_dim=self.vector_size,
                                   input_dim=self.ngram_size,
                                   init="uniform",
                                   mask_zero=True,
                                   name="input_layer")
     vv_iw = emb_in(iw)
     zm = ZeroMaskedEntries()
     zm.build((None, self.max_ngram_num, self.vector_size))
     zero_masked_emd = zm(vv_iw)
     conv_l1 = convolutional.Convolution1D(self.vector_size,
                                           self.max_ngram_num,
                                           border_mode='valid')
     conv_l2 = convolutional.Convolution1D(self.vector_size,
                                           self.max_ngram_num,
                                           border_mode='valid')
     conv1 = conv_l1(zero_masked_emd)
     conv2 = conv_l2(zero_masked_emd)
     sigm_conv = Activation("sigmoid")(conv2)
     mult_l = Merge(mode='mul')
     mult = mult_l([conv1, sigm_conv])
     return ([iw], emb_in, mult)
Exemple #5
0
def _maybe_merge_inputs(inputs):
    if isinstance(inputs, list) and len(inputs) > 1:
        return Merge(mode='concat')(inputs)
    elif isinstance(inputs, list) and len(inputs) == 1:
        return inputs[0]
    else:
        return inputs
 def __build_network(self):
     embedding_layer = Embedding(
         self.corpus_size,
         EMBEDDING_DIM,
         weights=[self.embedding_matrix],
         input_length=MAX_SEQUENCE_LENGTH,
         trainable=False)
     # train a 1D convnet with global maxpooling
     sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')
     embedded_sequences = embedding_layer(sequence_input)
     conv_blocks = []
     for sz in self.filter_sizes:
         conv = Convolution1D(
             self.num_filters, sz, activation="relu")(embedded_sequences)
         conv = MaxPooling1D(2)(conv)
         conv = Flatten()(conv)
         conv_blocks.append(conv)
     z = Merge(
         mode='concat', concat_axis=1)(conv_blocks) if len(
             conv_blocks) > 1 else conv_blocks[0]
     z = Dropout(0.5)(z)
     z = Dense(self.hidden_dims, activation="relu")(z)
     preds = Dense(self.class_num, activation="softmax")(z)
     rmsprop = RMSprop(lr=0.001)
     self.model = Model(sequence_input, preds)
     self.model.compile(
         loss='categorical_crossentropy',
         optimizer=rmsprop,
         metrics=['acc'])
Exemple #7
0
    def build(self,input_dims):
        #句子特征
        model=Sequential()
        model.add(Convolution2D(100,1,input_dims[0],input_shape=(self.num_channel,100,input_dims[0]),activation='relu'))
        model.add(Dropout(0.5))
        model.add(MaxPooling2D(pool_size=(50,1)))
        model.add(Flatten())
        model.add(Dropout(0.5))
        model.add(Dense(100,activation='tanh'))
        model.add(Dropout(0.5))

        #用户整体特征
        model2=Sequential()
        model2.add(Dense(100,input_dim=input_dims[1],activation='tanh'))
        model2.add(Dropout(0.5))
        
        #时间地域特征
        model3=Sequential()
        model3.add(Dense(output_dim=800,input_dim=input_dims[2],activation='tanh'))
        model3.add(Dropout(0.5))
        model3.add(Dense(output_dim=300,activation='tanh'))
        model3.add(Dropout(0.5))

        merged_model= Sequential()
        merged_model.add(Merge([model, model2,model3], mode='concat', concat_axis=1))
        merged_model.add(Dense(self.num_class))
        merged_model.add(Activation('softmax'))

        merged_model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'],)
        
        self.model=merged_model
        self.earlyStopping=EarlyStopping(monitor='val_loss', patience=25, verbose=0, mode='auto')
        self.checkpoint=ModelCheckpointPlus(filepath='weights.hdf5',monitor='val_loss',verbose_show=20)
Exemple #8
0
def test_works():
    x = Input(shape=(30, 1), name="input")
    e = GRU(128, return_sequences=True)(x)
    s = Slice("[-1,:]")(e)
    # s = Slice('[-1,:]')(e)
    # s = theano.printing.Print("s")(s)
    r = RepeatVector(30)(s)
    m = Merge(mode='concat', concat_axis=2)([r, x])
    d = GRU(128, return_sequences=True)(m)
    p = Ptr_Layer(30)([x, e, d])

    model = Model(input=x, output=p, name='test')

    # print(Sort(nb_out=5).get_output_shape_for((1,2,3)))

    inp = np.random.randint(size=(10000, 30, 1), low=0, high=100)
    indicies = np.argsort(inp[:, :, 0])
    # print(indicies)
    target = np.array(
        [np.take(inp[i], indicies[i], axis=-2) for i in range(inp.shape[0])])
    # print("Input")
    # print(inp)
    # print("Target")
    # print(target)
    model.compile(optimizer=optimizers.Adam(), loss='mse')
    model.fit(inp, target, nb_epoch=500, batch_size=100)
def buildModel(self):
        '''
        Define the exact structure of your model here. We create an image
        description generation model by merging the VGG image features with
        a word embedding model, with an LSTM over the sequences.
        '''
        logger.info('Building Keras model...')

                
        
        code_model = Sequential()
        code_model.add(Embedding(self.max_features, self.embed_size, input_length=maxlen,dropout=0.2))
         
        code_model.add(Convolution1D(nb_filter=nb_filter,
                                filter_length=filter_length,
                                border_mode='valid',
                                subsample_length=1))
        #model.add(BatchNormalization())
        code_model.add(Activation('relu'))
        
        code_model.add(Convolution1D(nb_filter=nb_filter,
                                filter_length=filter_length,
                                border_mode='valid',
                                subsample_length=1))
        # model.add(BatchNormalization())
        code_model.add(Activation('relu'))
        
        code_model.add(MaxPooling1D(pool_length=code_model.output_shape[1]))
        code_model.add(Flatten())
        # We add a vanilla hidden layer:
        code_model.add(Dense(128))
        
        
        # next, let's define a RNN model that encodes sequences of words
        # into sequences of 128-dimensional word vectors.
        language_model = Sequential()
        language_model.add(Embedding(vocab_size, 256, input_length=max_caption_len))
        language_model.add(LSTM(output_dim=128, return_sequences=True))
        language_model.add(TimeDistributed(Dense(128)))
        
        # let's repeat the image vector to turn it into a sequence.
        code_model.add(RepeatVector(max_caption_len))
        
        # the output of both models will be tensors of shape (samples, max_caption_len, 128).
        # let's concatenate these 2 vector sequences.
        model = Sequential()
        model.add(Merge([code_model, language_model], mode='concat', concat_axis=-1))
        # let's encode this vector sequence into a single vector
        model.add(LSTM(256, return_sequences=False))
        # which will be used to compute a probability
        # distribution over what the next word in the caption should be!
        model.add(Dense(self.vocab_size))
        model.add(Activation('softmax'))
        
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"])
Exemple #10
0
    def build_cnn_model(self):
        if self.is_building_mlp:
            Sn = Input(shape=self.nn_input_size)
            for i, layer in enumerate(self.nn_layers):
                if not len(layer) == 2:
                    raise ValueError(
                        "NN layers must be list of 2, containing number preceptrons and activation"
                    )
                if i == 0:
                    h = Dense(layer[0], activation=layer[1])(Sn)
                else:
                    h = Dense(layer[0], activation=layer[1])(h)
            nn_model_ = h

        S = Input(shape=self.cnn_input_size)
        for i, layer in enumerate(self.cnn_layers):
            if len(layer) == 4:
                if i == 0:
                    h = Convolution2D(layer[0],
                                      layer[1],
                                      layer[1],
                                      subsample=(layer[2], layer[2]),
                                      border_mode='same',
                                      activation=layer[3])(S)
                else:
                    h = Convolution2D(layer[0],
                                      layer[1],
                                      layer[1],
                                      subsample=(layer[2], layer[2]),
                                      border_mode='same',
                                      activation=layer[3])(h)
                h = MaxPooling2D(pool_size=(2, 2))(h)
            elif len(layer) == 2:
                h = Flatten()(h)
                if self.is_building_mlp:
                    h = Merge(mode='concat')([h, nn_model_])
                h = Dense(layer[0], activation=layer[1])(h)
            else:
                raise ValueError(
                    "cnn_layers can have 4 parameters for Convolutional or 2 parameters for Dense layer. given %d"
                    % len(layer))
        V = Dense(self.number_of_actions)(h)

        if self.is_building_mlp:
            self.model = Model([S, Sn], V)
        else:
            self.model = Model(S, V)
        plot(self.model, to_file='{}.png'.format(self.cnn_save_name))
        try:
            self.model.load_weights('{}.h5'.format(self.cnn_save_name))
            print "loading from {}.h5".format(self.cnn_save_name)
        except:
            print "Training a new model"
Exemple #11
0
 def build_actor(self):
     """Build actor network
     
     recieve convereted tensor: raw_data, smooted_data, and downsampled_data
     """
     # lower layer
     lower_model = [self.build_network(self.model_config['actor_lower'], input_shape=(self.history_length, self.n_stock, 1)) 
                    for _ in range(1  + self.n_smooth + self.n_down)]
     merged = Merge(lower_model, mode='concat')
     # upper layer
     model = self.build_network(self.model_config['actor_upper'],  model=merged)
     return model
Exemple #12
0
def get_multi_conv_model():
    model_16_to_1 = get_16_to_1_model()
    model_32_to_1 = get_32_to_1_model()
    model_64_to_1 = get_64_to_1_model()
    print(model_16_to_1.output_shape)
    print(model_32_to_1.output_shape)
    print(model_64_to_1.output_shape)
    merged = Merge([model_16_to_1, model_32_to_1, model_64_to_1], mode='ave')
    final_model = Sequential()
    final_model.add(merged)
    final_model.add(Reshape((config['win_len'], 64)))
    final_model.add(LSTM(64, return_sequences=True))
    final_model.add(LSTM(64))
    final_model.add(Dense(config['num_classify'], W_regularizer=l2(0.01)))
    return final_model
Exemple #13
0
    def create_emb_layer(self):
        iw3 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword3")
        iw4 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword4")
        iw5 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword5")
        iw6 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword6")
        emb_in = embeddings.Embedding(output_dim=self.vector_size,
                                      input_dim=self.ngram_size,
                                      init="uniform",
                                      mask_zero=True,
                                      name="input_layer")

        vv_iw3 = emb_in(iw3)
        vv_iw4 = emb_in(iw4)
        vv_iw5 = emb_in(iw5)
        vv_iw6 = emb_in(iw6)

        zm = ZeroMaskedEntries()
        zm.build((None, self.max_ngram_num, self.vector_size))

        zero_masked_emd3 = zm(vv_iw3)
        zero_masked_emd4 = zm(vv_iw4)
        zero_masked_emd5 = zm(vv_iw5)
        zero_masked_emd6 = zm(vv_iw6)

        lstm_l3 = recurrent.GRU(self.vector_size, return_sequences=False)
        lstm_l4 = recurrent.GRU(self.vector_size, return_sequences=False)
        lstm_l5 = recurrent.GRU(self.vector_size, return_sequences=False)
        lstm_l6 = recurrent.GRU(self.vector_size, return_sequences=False)

        lstm3 = lstm_l3(zero_masked_emd3)
        lstm4 = lstm_l4(zero_masked_emd4)
        lstm5 = lstm_l5(zero_masked_emd5)
        lstm6 = lstm_l6(zero_masked_emd6)

        merge_conv = Merge(mode='ave', concat_axis=1)
        merged = merge_conv([lstm3, lstm4, lstm5, lstm6])

        reshaped = Reshape((1, self.vector_size))(merged)
        return ([iw3, iw4, iw5, iw6], emb_in, reshaped)
Exemple #14
0
def concatFeatModel(numFeatures_bsif, numFeatures_lpq, numFeatures_wld):
    # Model for bsif
    model_bsif = Sequential()
    model_bsif.add(
        Dense(numFeatures_bsif,
              input_dim=numFeatures_bsif,
              init='glorot_uniform',
              activation='relu'))
    model_bsif.add(GaussianNoise(3))
    model_bsif.add(Dropout(0.3))

    # Model for lpq
    model_lpq = Sequential()
    model_lpq.add(
        Dense(numFeatures_lpq,
              input_dim=numFeatures_lpq,
              init='glorot_uniform',
              activation='relu'))
    model_lpq.add(GaussianNoise(3))
    model_lpq.add(Dropout(0.3))

    # Model for wld
    model_wld = Sequential()
    model_wld.add(
        Dense(numFeatures_wld,
              input_dim=numFeatures_wld,
              init='glorot_uniform',
              activation='relu'))
    model_wld.add(GaussianNoise(3))
    model_wld.add(Dropout(0.3))
    model_wld.add(Dense(numFeatures_wld, activation='relu'))
    model_wld.add(Dropout(0.3))

    #Merge all models
    model_concat = Sequential()
    model_concat.add(
        Merge([model_bsif, model_lpq, model_wld], mode='concat',
              concat_axis=1))
    model_concat.add(Dense(1, activation='sigmoid'))

    #compile model
    opt = Adadelta()
    model_concat.compile(loss='binary_crossentropy',
                         optimizer=opt,
                         metrics=['accuracy'])

    return model_concat
Exemple #15
0
def get_questions_combined(INPUT_DIM, weights_path=None):
    question_1 = Sequential()
    question_1.add(Embedding(INPUT_DIM, 100, input_length=40, dropout=0.2))
    question_1.add(LSTM(100, dropout_W=0.2, dropout_U=0.2))

    question_2 = Sequential()
    question_2.add(Embedding(INPUT_DIM, 100, input_length=40, dropout=0.2))
    question_2.add(LSTM(100, dropout_W=0.2, dropout_U=0.2))

    questions_combined = Sequential()
    questions_combined.add(Merge([question_1, question_2], mode='concat'))
    questions_combined.add(BatchNormalization())

    questions_combined.add(Dense(100))
    questions_combined.add(PReLU())
    questions_combined.add(Dropout(0.2))
    questions_combined.add(BatchNormalization())

    questions_combined.add(Dense(100))
    questions_combined.add(PReLU())
    questions_combined.add(Dropout(0.2))
    questions_combined.add(BatchNormalization())

    questions_combined.add(Dense(100))
    questions_combined.add(PReLU())
    questions_combined.add(Dropout(0.2))
    questions_combined.add(BatchNormalization())

    questions_combined.add(Dense(100))
    questions_combined.add(PReLU())
    questions_combined.add(Dropout(0.2))
    questions_combined.add(BatchNormalization())

    questions_combined.add(Dense(100))
    questions_combined.add(PReLU())
    questions_combined.add(Dropout(0.2))
    questions_combined.add(BatchNormalization())

    questions_combined.add(Dense(1))
    questions_combined.add(Activation('sigmoid'))

    if weights_path:
        questions_combined.load_weights(weights_path)

    return questions_combined
Exemple #16
0
    def __build_keras_model(self):
        models = []

        model_artist_id = Sequential()
        model_artist_id.add(Embedding(100, 10, input_length=1))
        model_artist_id.add(Reshape(target_shape=(10, )))
        models.append(model_artist_id)

        model_week = Sequential()
        model_week.add(Embedding(7, 2, input_length=1))
        model_week.add(Reshape(target_shape=(6, )))
        models.append(model_week)

        #         model_gender = Sequential()
        #         model_gender.add(Embedding(1, 3, input_length=1))
        #         model_gender.add(Reshape(target_shape=(3,)))
        #         models.append(model_gender)

        model_day = Sequential()
        model_day.add(Embedding(1, 10, input_length=1))
        model_day.add(Reshape(target_shape=(10, )))
        models.append(model_day)

        #         model_language = Sequential()
        #         model_language.add(Embedding(1, 3, input_length=1))
        #         model_language.add(Reshape(target_shape=(3,)))
        #         models.append(model_language)

        model_others = Sequential()
        model_others.add(
            Reshape((self.others_dim, ), input_shape=(self.others_dim, )))
        models.append(model_others)

        self.model = Sequential()
        self.model.add(Merge(models, mode='concat'))
        self.model.add(Dense(100, init='uniform'))
        self.model.add(Activation('relu'))
        self.model.add(Dense(200, init='uniform'))
        self.model.add(Activation('relu'))
        self.model.add(Dense(1))

        self.model.compile(loss='mean_absolute_error', optimizer='adam')
def createModel(input_shape, tf_ordering=True, second_phase = False):
    print("Creating new model with input shape", input_shape)

    axis = -1
    if not(tf_ordering):
        axis = 1
    alpha = 0.1
    w_reg = 0.0001
    
    print("Hyperparameters: alpha=%f, w_reg=%f"%(alpha, w_reg))
    
    path1 = Sequential()
    path1.add(Convolution2D(64, 7, 7, border_mode='valid', input_shape = input_shape, W_regularizer=l1l2(l1 = w_reg, l2 = w_reg), trainable=not(second_phase)))
    path1.add(Dropout(alpha))
    path1.add(Activation('relu'))
    path1.add(MaxPooling2D(pool_size=(4,4), strides=(1,1), border_mode='valid'))

    path1.add(Convolution2D(64, 3, 3, border_mode='valid', W_regularizer=l1l2(l1 = w_reg, l2 = w_reg), trainable=not(second_phase)))
    path1.add(Dropout(alpha))
    path1.add(Activation('relu'))
    path1.add(MaxPooling2D(pool_size=(2,2), strides=(1,1), border_mode='valid'))
   
    path2 = Sequential()
    path2.add(Convolution2D(160, 13, 13, border_mode='valid', input_shape = input_shape, W_regularizer=l1l2(l1 = w_reg, l2 = w_reg), trainable=not(second_phase)))
    path2.add(Dropout(alpha))
    path2.add(Activation('relu'))
    
    classification_layer = Sequential()
    classification_layer.add(Merge([path1, path2], mode='concat', concat_axis=axis))
    classification_layer.add(Convolution2D(5, 21, 21, border_mode='valid', W_regularizer=l1l2(l1 = w_reg, l2 = w_reg)))
    classification_layer.add(Dropout(alpha))
    classification_layer.add(Flatten())
    classification_layer.add(Activation('softmax'))
    
    sgd = SGD(lr=0.005, decay = 1e-1, momentum=0.5, nesterov=True)
    adam = Adam(lr=0.0005)
    classification_layer.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy', dice])
    
    classification_layer.summary()
    
    return classification_layer
Exemple #18
0
 def create_emb_layer(self):
     iw = Input(shape=(self.max_ngram_num, ),
                dtype='int32',
                name="inputword")
     emb_in = embeddings.Embedding(output_dim=self.vector_size,
                                   input_dim=self.ngram_size,
                                   init="uniform",
                                   mask_zero=True,
                                   name="input_layer")
     vv_iw = emb_in(iw)
     zm = ZeroMaskedEntries()
     zm.build((None, self.max_ngram_num, self.vector_size))
     zero_masked_emd = zm(vv_iw)
     conv_l = convolutional.Convolution1D(self.vector_size,
                                          30,
                                          border_mode='same')
     conv = conv_l(zero_masked_emd)
     sigm_conv = Activation("sigmoid")(conv)
     mult_l = Merge(mode='mul')
     mult = mult_l([conv, sigm_conv])
     pool = pooling.AveragePooling1D(self.max_ngram_num, border_mode="same")
     pool_res = pool(mult)
     return ([iw], emb_in, pool_res)
Exemple #19
0
def joint():

    input_bone = Input(shape=(None, 1))
    bone = Convolution1D(1, 255, border_mode='same',
                         input_shape=ishape)(input_bone)
    input_air = Input(shape=(None, 1))
    air = Convolution1D(1, 55, border_mode='same',
                        input_shape=ishape)(input_air)
    avg = Merge()([bone, air])

    conv1 = Convolution1D(30, 55, border_mode='same')(avg)
    batch1 = BatchNormalization(mode=2, axis=-1)(conv1)
    relu1 = LeakyReLU()(batch1)

    conv2 = Convolution1D(15, 55, border_mode='same')(relu1)
    batch2 = BatchNormalization(mode=2, axis=-1)(conv2)
    relu2 = LeakyReLU()(batch2)

    conv3 = Convolution1D(1, 55, border_mode='same')(relu2)
    pred = Activation('tanh')(conv3)

    joint = keras.models.Model(inputs=[input1, input2], outputs=pred)

    return joint
Exemple #20
0
q2s_tok = sequence.pad_sequences(q2s_tok, maxlen=205)

model1 = Sequential()
model1.add(Embedding(6000, 128, dropout=0.1))
model1.add(LSTM(128, dropout_W=0.3, dropout_U=0.3, return_sequences=True))
model1.add(LSTM(128, dropout_W=0.3, dropout_U=0.3, return_sequences=True))
model1.add(LSTM(128, dropout_W=0.3, dropout_U=0.3))

model2 = Sequential()
model2.add(Embedding(6000, 128, dropout=0.1))
model2.add(LSTM(128, dropout_W=0.3, dropout_U=0.3, return_sequences=True))
model2.add(LSTM(128, dropout_W=0.3, dropout_U=0.3, return_sequences=True))
model2.add(LSTM(128, dropout_W=0.3, dropout_U=0.3))

merged_model = Sequential()
merged_model.add(Merge([model1, model2], mode='concat'))
merged_model.add(Dense(1))

merged_model.add(Activation('sigmoid'))

merged_model.compile(loss=log_loss, optimizer='adam', metrics=['accuracy'])

merged_model.fit([q1s_tok, q2s_tok],
                 y=y_train,
                 batch_size=64,
                 nb_epoch=20,
                 verbose=1,
                 shuffle=True,
                 validation_split=0.1)

merged_model.save('quora.h5')
Exemple #21
0
 def build_critic(self):
     """Build critic network
     
     recieve transformed tensor: raw_data, smooted_data, and downsampled_data
     """
     nf = self.n_feature
     # layer1
     # smoothed input
     sm_model = [Sequential() for _ in range(self.n_smooth)]
     for m in sm_model:
         m.add(
             Lambda(lambda x: x,
                    input_shape=(self.history_length, self.n_stock, 1)))
         m.add(
             Convolution2D(nb_filter=nf,
                           nb_row=self.k_w,
                           nb_col=1,
                           border_mode='same'))
         m.add(BatchNormalization(mode=2, axis=-1))
         m.add(PReLU())
     # down sampled input
     dw_model = [Sequential() for _ in range(self.n_down)]
     for m in dw_model:
         m.add(
             Lambda(lambda x: x,
                    input_shape=(self.history_length, self.n_stock, 1)))
         m.add(
             Convolution2D(nb_filter=nf,
                           nb_row=self.k_w,
                           nb_col=1,
                           border_mode='same'))
         m.add(BatchNormalization(mode=2, axis=-1))
         m.add(PReLU())
     # raw input
     state = Sequential()
     nf = self.n_feature
     state.add(
         Lambda(lambda x: x,
                input_shape=(self.history_length, self.n_stock, 1)))
     state.add(
         Convolution2D(nb_filter=nf,
                       nb_row=self.k_w,
                       nb_col=1,
                       border_mode='same'))
     state.add(BatchNormalization(mode=2, axis=-1))
     state.add(PReLU())
     merged = Merge([
         state,
     ] + sm_model + dw_model,
                    mode='concat',
                    concat_axis=-1)
     # layer2
     nf = nf * 2
     model = Sequential()
     model.add(merged)
     model.add(
         Convolution2D(nb_filter=nf,
                       nb_row=self.k_w,
                       nb_col=1,
                       border_mode='same'))
     model.add(BatchNormalization(mode=2, axis=-1))
     model.add(PReLU())
     model.add(Flatten())
     # layer3
     model.add(Dense(self.n_hidden))
     model.add(BatchNormalization(mode=1, axis=-1))
     model.add(PReLU())
     # layer4
     model.add(Dense(int(np.sqrt(self.n_hidden))))
     model.add(PReLU())
     # output
     model.add(Dense(2 * self.n_stock))
     model.add(Reshape((self.n_stock, 2)))
     return model
Exemple #22
0
def main():
	train, val = get_data()
	index_to_word = np.load('data/ixtoword.npy').tolist()
	word_index = dict([(w,i) for i,w in enumerate(index_to_word)])
	X_train = np.asarray(train[0])
	feats = np.asarray(train[1])
	y_train = train[2]
	X_train = pad_sequences(X_train, maxlen=83, padding='post')
	y_train = pad_sequences(y_train, maxlen=84, padding='post')
	dim_embed = 256
	dim_hidden = 256
	learning_rate = 0.01
	EMBEDDING_DIM =100
	maxlen = 84	
	#X = np.zeros((len(X_train), maxlen,  len(index_to_word)), dtype=np.bool)
	y = np.zeros((len(y_train), maxlen,  len(index_to_word)), dtype=np.bool)
	#for i, line in enumerate(X_train):
	#	for t, word in enumerate(line):
	#		X[i, t, word] = 1
	for i, line in enumerate(y_train):
		for t, word in enumerate(line):
			y[i, t, word] = 1
			
			
	GLOVE_DIR = './glove.6B/'
	embeddings_index = {}
	f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
	for line in f:
	    values = line.split()
	    word = values[0]
	    coefs = np.asarray(values[1:], dtype='float32')																																																																																																																																																																																																																																																																																														
	    embeddings_index[word] = coefs
	f.close()

	print('Found %s word vectors.' % len(embeddings_index))
	ctr= 0
	embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
	for word, i in word_index.items():
	    embedding_vector = embeddings_index.get(word)
	    if embedding_vector is not None:
		ctr +=1
	        # words not found in embedding index will be all-zeros.
	        embedding_matrix[i] = embedding_vector
	print(ctr)
	print(len(index_to_word))
	image_model = Sequential()
	image_model.add(Dense(256,input_dim=4096))
	image_model.add(Reshape((1,256)))
	print('Build model...')
	lang_model = Sequential()
	lang_model.add(Embedding(len(word_index) + 1,EMBEDDING_DIM,weights=[embedding_matrix],input_length=83,mask_zero=True,trainable=True))
	lang_model.add(Masking(mask_value=0.))
	#lang_model.add(LSTM(256, dropout_W=0.2, return_sequences=True, activation='tanh'))
	lang_model.add(TimeDistributed(Dense(256)))
	model = Sequential()
	model.add(Merge([image_model, lang_model], mode='concat', concat_axis=1)) 
	model.add(LSTM(256, dropout_W=0.2, return_sequences=True, activation='tanh'))
	model.add(TimeDistributed(Dense(len(index_to_word))))
	model.add(Activation('softmax'))
	model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['acc'])
	chkpointer = ModelCheckpoint(filepath='wt_epochend.hdf5',monitor='val_acc',verbose = 1, save_best_only=True, mode='max')
	print('Train...')
#	model.fit([feats, X_train], y, batch_size=10, nb_epoch=100,validation_split=0.1,shuffle='True',callbacks=[chkpointer])
#	model.save_weights('wt_epochend.hdf5')
	model.load_weights('wt_epochend.hdf5')
	dataX=[]
	test_feat='./guitar_player.npy'
	fts = [np.load(test_feat)][0]
	start = np.random.randint(0, len(X_train)-1)
	pattern = X_train[start]
	print "Seed:"
	print "\"", ''.join([index_to_word[value] for value in pattern]), "\""
	# generate characters
	for i in range(1000):
		x = np.reshape(pattern, (1, len(pattern)))
		x = x / float(3010)
		prediction = model.predict([np.asarray(fts),x], verbose=0)
		index = np.argmax(prediction)
		result = index_to_word[index]
		seq_in = [index_to_word[value] for value in pattern]
		sys.stdout.write(result)
		pattern = np.append(pattern, index)
		pattern = pattern[1:len(pattern)]
	print "\nDone."
from keras.layers.core import Dense, Reshape
from keras.layers.embeddings import Embedding
from keras.models import Sequential
import keras.backend as K

vocab_size = 5000
embed_size = 300

word_model = Sequential()
word_model.add(Embedding(vocab_size, embed_size,
                         init="glorot_uniform",
                         input_length=1))
word_model.add(Reshape((embed_size,)))

context_model = Sequential()
context_model.add(Embedding(vocab_size, embed_size,
                            init="glorot_uniform",
                            input_length=1))
context_model.add(Reshape((embed_size,)))

model = Sequential()
model.add(Merge([word_model, context_model], mode="dot"))
model.add(Dense(1, init="glorot_uniform", activation="sigmoid"))

model.compile(loss="mean_squared_error", optimizer="adam")

merge_layer = model.layers[0]
word_model = merge_layer.layers[0]
word_embed_layer = word_model.layers[0]
weights = word_embed_layer.get_weights()[0]
def get_model(time_len=1):

    time, ch, row, col = time_len, 3, 160, 320  # camera format

    model1 = Sequential()
    model1.add(
        Lambda(lambda x: x / 127.5 - 1.,
               input_shape=(ch, row, col),
               output_shape=(ch, row, col)))
    model1.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
    model1.add(ELU())
    model1.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
    model1.add(ELU())
    model1.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
    model1.add(Flatten())
    model1.add(Dropout(.2))
    model1.add(ELU())
    model1.add(Dense(512))
    model1.add(Dropout(.5))
    model1.add(ELU())
    model1.add(Dense(1))
    model1.load_weights(
        "./outputs/steering_model_trained/steering_angle.keras")
    #model.compile(optimizer="adam", loss="mse")
    #rnn.add(TimeDistributed(vgg_model, input_shape=(10, 3, 224, 224)))
    #rnn.add(LSTM(10, activation='tanh'))
    #rnn.add(Dense(1, activation='sigmoid'))

    #Loading Trained Weights :: Model2
    model2 = Sequential()
    model2.add(
        Lambda(lambda x: x / 127.5 - 1.,
               input_shape=(time, ch, row, col),
               output_shape=(time, ch, row, col)))
    model2.add(
        TimeDistributed(
            Convolution2D(16,
                          8,
                          8,
                          subsample=(4, 4),
                          border_mode="same",
                          weights=model1.layers[1].get_weights(),
                          trainable=False)))
    model2.add(TimeDistributed(ELU()))
    model2.add(
        TimeDistributed(
            Convolution2D(32,
                          5,
                          5,
                          subsample=(2, 2),
                          border_mode="same",
                          weights=model1.layers[3].get_weights(),
                          trainable=False)))
    model2.add(TimeDistributed(ELU()))
    model2.add(
        TimeDistributed(
            Convolution2D(64,
                          5,
                          5,
                          subsample=(2, 2),
                          border_mode="same",
                          weights=model1.layers[5].get_weights(),
                          trainable=False)))
    model2.add(TimeDistributed(Flatten()))
    model2.add(TimeDistributed(Dropout(.2)))
    model2.add(TimeDistributed(ELU()))
    model2.add(
        TimeDistributed(
            Dense(512, weights=model1.layers[9].get_weights(),
                  trainable=False)))
    #model2.add(TimeDistributed(RepeatVector(time_len)))

    #Merge Model:: Model3
    model3 = Sequential()
    #model3.add(Dense(1, input_dim=(time_len,1))) --> need to check this layer
    model3.add(
        Lambda(lambda x: x,
               input_shape=(time_len, 1),
               output_shape=(time_len, 1)))
    merge = Sequential()
    merge.add(Merge([model2, model3], mode='concat', concat_axis=2))
    merge.add(LSTM(output_dim=1, unroll=True, return_sequences=True))

    merge.compile(optimizer="adam", loss="mse")
    return merge
Exemple #25
0
    def create_emb_layer(self):
        iw3 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword3")
        iw4 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword4")
        iw5 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword5")
        iw6 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword6")
        emb_in = embeddings.Embedding(output_dim=self.vector_size,
                                      input_dim=self.ngram_size,
                                      init="uniform",
                                      mask_zero=True,
                                      name="input_layer")

        vv_iw3 = emb_in(iw3)
        vv_iw4 = emb_in(iw4)
        vv_iw5 = emb_in(iw5)
        vv_iw6 = emb_in(iw6)

        zm = ZeroMaskedEntries()
        zm.build((None, self.max_ngram_num, self.vector_size))

        zero_masked_emd3 = zm(vv_iw3)
        zero_masked_emd4 = zm(vv_iw4)
        zero_masked_emd5 = zm(vv_iw5)
        zero_masked_emd6 = zm(vv_iw6)

        conv_l3 = convolutional.Convolution1D(self.vector_size,
                                              10,
                                              border_mode='same')
        conv3 = conv_l3(zero_masked_emd3)
        conv_l4 = convolutional.Convolution1D(self.vector_size,
                                              10,
                                              border_mode='same')
        conv4 = conv_l4(zero_masked_emd4)
        conv_l5 = convolutional.Convolution1D(self.vector_size,
                                              10,
                                              border_mode='same')
        conv5 = conv_l5(zero_masked_emd5)
        conv_l6 = convolutional.Convolution1D(self.vector_size,
                                              10,
                                              border_mode='same')
        conv6 = conv_l6(zero_masked_emd6)
        pool3 = pooling.AveragePooling1D(self.max_ngram_one_class,
                                         border_mode="same")
        pool_res3 = pool3(conv3)
        pool4 = pooling.AveragePooling1D(self.max_ngram_one_class,
                                         border_mode="same")
        pool_res4 = pool4(conv4)
        pool5 = pooling.AveragePooling1D(self.max_ngram_one_class,
                                         border_mode="same")
        pool_res5 = pool5(conv5)
        pool6 = pooling.AveragePooling1D(self.max_ngram_one_class,
                                         border_mode="same")
        pool_res6 = pool6(conv6)

        merge_conv = Merge(mode='ave', concat_axis=1)
        merged = merge_conv([pool_res3, pool_res4, pool_res5, pool_res6])
        return ([iw3, iw4, iw5, iw6], emb_in, merged)
model4.add(Dense(200))
model4.add(Dropout(0.2))
model4.add(BatchNormalization())
model5 = Sequential()
model5.add(
    Embedding(len(word_index) + 1, 300, input_length=max_len, dropout=0.2))
model5.add(LSTM(300, dropout_W=0.2, dropout_U=0.2))

model6 = Sequential()
model6.add(
    Embedding(len(word_index) + 1, 300, input_length=max_len, dropout=0.2))
model6.add(LSTM(300, dropout_W=0.2, dropout_U=0.2))

merged_model = Sequential()
merged_model.add(Merge([model5, model6],
                       mode='concat'))  # model1, model2,, model5, model6
merged_model.add(BatchNormalization())

merged_model.add(Dense(200))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())

merged_model.add(Dense(200))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())

#merged_model.add(Dense(200))
#merged_model.add(PReLU())
#merged_model.add(Dropout(0.2))
    netSize * numChan, dropout, bias, initStyle, l1Reg, l2Reg
)  #Create the base network that is to be used multiple times (numSampleContext denotes the number of usages)

if numSampleContext == 1:
    inputTensor = Input(shape=(netSize * numChan, ))
    processed = baseNet(inputTensor)
    out = processed
    modelA = Model(input=inputTensor, output=out)
else:
    inputTensor = [
        Input(shape=(netSize * numChan, )) for i in range(numSampleContext)
    ]  #Generate list of input tensors
    processedTensor = [
        baseNet(inputTensor[i]) for i in range(numSampleContext)
    ]  #Generate list of baseNet applications to their respective input tensor
    lay = Merge(mode='concat')(
        processedTensor)  #Merge multiple baseNet instances
    modelA = Model(input=inputTensor, output=lay)  #create Model
if loss == 1:  # Compilation
    modelA.compile(optimizer='Nadam', loss='mse')  #mse Model
elif loss == 2:
    modelA.compile(optimizer='Nadam', loss=corr_loss)  #CorrLoss Model

checkLow = ModelCheckpoint(
    filepath=workingDir + 'weights_lowAcc' + str(GPU) + '.hdf5',
    verbose=0,
    save_best_only=True,
    mode='min',
    monitor='val_loss')  #Checkpoints for the lowest achieved evaluation loss
early = EarlyStopping(
    monitor='val_loss', patience=earlyPatience, mode='min'
)  #Daemon to stop before the maximum number of epochs is reached. It checks if the validation loss did not decrese for the last 'earlyPatience' trials
input_tensor_45 = Input(batch_shape=(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]) , dtype='float32', name='input_tensor_45') 

input_0 = Lambda(lambda x: x,output_shape=(NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]),batch_input_shape=(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]),name='lambda_input_0')
input_45 = Lambda(lambda x: x,output_shape=(NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1]),batch_input_shape=(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]),name='lambda_input_45')

model1 = Sequential()
model1.add(input_0)

model2 = Sequential()
model2.add(input_45)

if debug :print model1.output_shape

model = Sequential()

model.add(Merge([model1, model2], mode=kaggle_input , output_shape=lambda x: (BATCH_SIZE*4*N_INPUT_VARIATION, NUM_INPUT_FEATURES, PART_SIZE, PART_SIZE) , arguments={'part_size':PART_SIZE, 'n_input_var': N_INPUT_VARIATION, 'include_flip':False, 'random_flip':True}  ))

if debug : print model.output_shape

model.add(Convolution2D(nb_filter=32, nb_row=6, nb_col=6,activation='relu')) #FIXME add relu for conv layers 
model.add(MaxPooling2D())

model.add(Convolution2D(nb_filter=64, nb_row=5, nb_col=5,activation='relu'))
model.add(MaxPooling2D())

model.add(Convolution2D(nb_filter=128, nb_row=3, nb_col=3,activation='relu'))
model.add(Convolution2D(nb_filter=128, nb_row=3, nb_col=3,activation='relu'))

if debug : print model.output_shape

model.add(MaxPooling2D())
Exemple #29
0
def what_d(runtimes=1, renew=True, maxlen=100, file_id=3):

    [glove,[char_X_100, char_X_010, char_X_001, char_Y_10, char_Y_01],[word_lens]] = readfile(file_id=file_id)

    char_X_010 = min(char_X_010, maxlen)

    vocab = []
    X = np.zeros((char_X_100, char_X_010, char_X_001), dtype=np.bool)
    y = np.zeros((char_Y_10, char_Y_01 ), dtype=np.float64)

    ii = 0
    for i in range(0, word_lens):
        ttt = glove[i].split()
        ttt_lens = len(ttt)
        lists = ["".join(ttt[0:ttt_lens - char_Y_01])] + ttt[ttt_lens - char_Y_01:]
        lists[0] = re.sub("[^0-9a-zA-Z]", "", lists[0].lower())
        if 0 < len(lists[0]) <= maxlen:
            #print(ii, i)
            vocab.append(lists[0])
            text = lists[0].ljust(char_X_010)
            for j in range(0, char_X_010):
                X[ii, j, char_indices[text[j]]] = 1
            for k in range(1, char_Y_01 + 1):
                y[ii, k - 1] = lists[k]
            ii = ii + 1
            if i % 40000 == 0:
                print(i)

    # Find par.
    lens = []
    for word in vocab:
        lens.append(len(word))
    print(max(lens))   # min(maxlen, char_X_010)
    print(len(vocab))  # 399488
    char_X_100 = len(vocab)
    char_Y_10 = len(vocab)
    X = X[0:len(vocab)]
    y = y[0:len(vocab)]


    # First time: build the model: a bidirectional SimpleRNN
    if renew == True:
        print('Build model...')
        left = Sequential()
        left.add(LSTM(char_Y_01, input_shape=(char_X_010, char_X_001), activation='tanh',
                           inner_activation='sigmoid', dropout_W=0.5, dropout_U=0.5))
                           #dropout_W=0.5, dropout_U=0.5))
        right = Sequential()
        right.add(LSTM(char_Y_01, input_shape=(char_X_010, char_X_001), activation='tanh',
                            inner_activation='sigmoid', dropout_W=0.5, dropout_U=0.5, go_backwards=True))
                            #dropout_W=0.5, dropout_U=0.5, go_backwards=True))
        model = Sequential()
        model.add(Merge([left, right], mode='sum'))
        model.add(Dense((char_Y_01), activation='sigmoid'))
        model.compile('Adadelta', 'MSE', metrics=['accuracy'])
        model.fit([X, X], y, batch_size=512, nb_epoch=1)
        model.save(path + "layer_2/bi_LSTM_merge__dense" + str(file_id) + ".pk")


    # Not first time: build the model: a bidirectional LSTM

    print('Load model...')
    model = load_model(path+"layer_2/bi_LSTM_merge__dense" + str(file_id) + ".pk")
    for j in range(0,runtimes-1):
        print('Build model...')
        model.fit([X,X], y,
                  batch_size=512,
                  nb_epoch=1)
        model.save(path + "layer_2/bi_LSTM_merge__dense" + str(file_id) + ".pk")


    # Test cosine similarity, train set

    print('Test cosine similarity, train set')
    cos = []
    for i in range(0, len(vocab)):
        text = vocab[i].ljust(char_X_010)
        x = np.zeros((1, char_X_010, char_X_001), dtype=np.bool)
        for j in range(0, len(text)):
            x[0, j, char_indices[text[j]]] = 1
        map_LSTM = model.predict([x, x], verbose=0)

        map_GloVe = y[i]

        cos.append(1 - spatial.distance.cosine(map_LSTM, map_GloVe))
    f = open(path+"layer_2/cosine.txt", 'a')
    f.write("20 times bi_LSTM_merge__dense" + str(file_id) + " cosine similarity: "+str(sum(cos)/len(cos))+"\n")
    f.close()


    # Test cosine similarity, misspelling

    print('Test cosine similarity, misspelling')
    cos = []
    change_engs = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
                   'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
    for i in range(0, len(vocab)):
        misspelling = vocab[i]
        if len(misspelling)>4:
            loc = int(np.random.uniform(0,1,1)*len(misspelling))
            cha = int(np.random.uniform(0,1,1)*26)

            tem = list(misspelling)
            tem[loc] = change_engs[cha]

            misspelling = "".join(tem)
            text = misspelling.ljust(char_X_010)
            x = np.zeros((1, char_X_010, char_X_001), dtype=np.bool)
            for j in range(0, len(text)):
                x[0, j, char_indices[text[j]]] = 1
            map_LSTM = model.predict([x, x], verbose=0)
            map_GloVe = y[i]

            cos.append(1 - spatial.distance.cosine(map_LSTM, map_GloVe))
    f = open(path+"layer_2/cosine.txt", 'a')
    f.write("20 times bi_LSTM_merge_dense" + str(file_id) + " misspelling cosine similarity : "+str(sum(cos)/len(cos))+", len: "+str(len(cos))+"\n")
    f.close()
model4.add(Dropout(0.2))

model4.add(Dense(300))
model4.add(Dropout(0.2))
model4.add(BatchNormalization())
model5 = Sequential()
model5.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2))
model5.add(LSTM(300, dropout_W=0.2, dropout_U=0.2))

model6 = Sequential()
model6.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2))
model6.add(LSTM(300, dropout_W=0.2, dropout_U=0.2))

merged_model = Sequential()
merged_model.add(
    Merge([model1, model2, model3, model4, model5, model6], mode='concat'))
merged_model.add(BatchNormalization())

merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())

merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())

merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))