Beispiel #1
0
def MNIST_CNY19(classes, input_shape, weights=None):
    model = Sequential()

    model.add(
        Convolution2D(40, (5, 5),
                      strides=(1, 1),
                      input_shape=input_shape,
                      activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(20, (5, 5), strides=(1, 1), activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(320, activation='relu'))
    model.add(Dense(160, activation='relu'))
    model.add(Dense(80, activation='relu'))
    model.add(Dense(40, activation='relu'))
    model.add(Dense(classes, activation='softmax'))

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Beispiel #2
0
    def __init__(self, latent_dim=49):
        config = ConfigProto()
        config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)

        # ENCODER
        inp = Input((896, 896, 1))
        e = Conv2D(32, (10, 10), activation='relu')(inp)
        e = MaxPooling2D((10, 10))(e)
        e = Conv2D(64, (6, 6), activation='relu')(e)
        e = MaxPooling2D((10, 10))(e)
        e = Conv2D(64, (3, 3), activation='relu')(e)
        l = Flatten()(e)
        l = Dense(49, activation='softmax')(l)
        # DECODER
        d = Reshape((7, 7, 1))(l)
        d = Conv2DTranspose(64, (3, 3),
                            strides=8,
                            activation='relu',
                            padding='same')(d)
        d = BatchNormalization()(d)
        d = Conv2DTranspose(64, (3, 3),
                            strides=8,
                            activation='relu',
                            padding='same')(d)
        d = BatchNormalization()(d)
        d = Conv2DTranspose(64, (3, 3),
                            strides=2,
                            activation='relu',
                            padding='same')(d)
        d = BatchNormalization()(d)
        d = Conv2DTranspose(32, (3, 3), activation='relu', padding='same')(d)
        decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(d)

        self.CAD = tf.keras.Model(inp, decoded)
        opt = tf.keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)

        self.CAD.compile(loss="binary_crossentropy",
                         optimizer=opt,
                         metrics=["accuracy"])

        self.Flow = tf.keras.Sequential([
            tf.keras.layers.LSTM(32, input_shape=(3, 2),
                                 return_sequences=True),
            tf.keras.layers.Dropout(0.4),
            tf.keras.layers.Bidirectional(
                tf.keras.layers.LSTM(32, return_sequences=True)),
            tf.keras.layers.Dropout(0.4),
            tf.keras.layers.TimeDistributed(
                tf.keras.layers.Dense(10, activation='relu')),
            tf.keras.layers.Flatten(),
            tf.keras.layers.Dense(2, activation='relu')
        ])
        opt = tf.keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)
        self.Flow.compile(loss="binary_crossentropy",
                          optimizer="adam",
                          metrics=["accuracy"])

        print(self.Flow.summary())
        print(self.CAD.summary())
def CIFAR_CNY19(classes, input_shape, weights=None):
    model = Sequential()

    model.add(
        Convolution2D(40, (5, 5), strides=(1, 1), input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    model.add(Convolution2D(20, (5, 5), strides=(1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(240, activation='relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(84, activation='relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(classes, activation='softmax'))

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    return model
Beispiel #4
0
def build_mnist_model(layer_data: List[int],
                      num_classes: int,
                      input_shape: Any,
                      learning_rate: float,
                      regularized: bool = False) -> Model:
    model: Model = Sequential()
    model.add(Flatten(input_shape=input_shape))
    if regularized:
        for nodes in layer_data:
            model.add(
                Dense(nodes,
                      activation="relu",
                      kernel_regularizer=keras.regularizers.l1(0.001)))
        model.add(
            Dense(num_classes,
                  activation="softmax",
                  kernel_regularizer=keras.regularizers.l1(0.001)))
    else:
        for nodes in layer_data:
            model.add(Dense(nodes, activation="relu"))
        model.add(Dense(num_classes, activation="softmax"))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adam(learning_rate),
                  metrics=["accuracy"])
    return model
Beispiel #5
0
    def __init__(self):
        super(Z2Model, self).__init__()
        # self.gcnn1 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn2 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn3 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn4 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn5 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn6 = tf.keras.layers.Conv2D(filters=20, kernel_size=(3, 3), activation='relu')
        # self.gcnn7 = tf.keras.layers.Conv2D(filters=20, kernel_size=(4, 4), activation='relu')

        self.gcnn1 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn2 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn3 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn4 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn5 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn6 = ConvBatchLayer(
            conv=Conv2D(filters=20, kernel_size=(3, 3), activation='relu'))
        self.gcnn7 = ConvBatchLayer(conv=Conv2D(filters=9, kernel_size=(3, 3)))
        self.flatten = Flatten()
        self.dense = Dense(9)
def build_policy_network(shape, action_size, regularizer):
    policy_input = Input(shape)
    c1 = Conv2D(filters=1, kernel_size=1, padding='same', activation='linear',
                kernel_regularizer=regularizer)(policy_input)
    b1 = BatchNormalization(axis=-1)(c1)
    l1 = LeakyReLU()(b1)
    f1 = Flatten()(l1)
    d1 = Dense(action_size, use_bias=False, activation='sigmoid', kernel_regularizer=regularizer)(f1)
    policy_model = Model(inputs=policy_input, outputs=d1)
    return policy_model
def build_value_network(shape, value_support_size):
    value_input = Input(shape)
    c1 = Conv2D(filters=1, kernel_size=1, padding='same', activation='linear')(value_input)
    b1 = BatchNormalization(axis=-1)(c1)
    l1 = LeakyReLU()(b1)
    f1 = Flatten()(l1)
    d2 = Dense(20, use_bias=False, activation='linear')(f1)
    l2 = LeakyReLU()(d2)
    d2 = Dense(value_support_size, use_bias=False, activation='tanh')(l2)
    value_model = Model(inputs=value_input, outputs=d2)
    return value_model
Beispiel #8
0
def build_model():
    model = keras.Sequential()
    model.add(
        Conv2D(64, kernel_size=3, activation='relu', input_shape=(28, 28, 1)))
    model.add(Conv2D(32, kernel_size=3, activation='relu'))
    model.add(Flatten())
    model.add(Dense(10, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
def build_reward_network(shape, filter_size1=3, filter_size2=1):
    input = Input(shape)
    c1 = Conv2D(filters=filter_size1, kernel_size=(3, 3), strides=2, padding='same', activation='relu',
                input_shape=shape)(input)
    a1 = AveragePooling2D(strides=2)(c1)
    c2 = Conv2D(filters=filter_size2, kernel_size=(3, 3), strides=1, padding='same', activation='relu',
                input_shape=shape)(a1)
    a2 = AveragePooling2D(strides=2)(c2)
    f1 = Flatten()(a2)

    model = Model(inputs=input, outputs=f1)
    return model
def SingleOutputCNN(
    input_shape,
    output_shape,
    cnns_per_maxpool=1,
    maxpool_layers=1,
    dense_layers=1,
    dense_units=64,
    dropout=0.25,
    regularization=False,
    global_maxpool=False,
    name='',
) -> Model:
    function_name = cast(types.FrameType,
                         inspect.currentframe()).f_code.co_name
    model_name = f"{function_name}-{name}" if name else function_name
    # model_name  = seq([ function_name, name ]).filter(lambda x: x).make_string("-")  # remove dependency on pyfunctional - not in Kaggle repo without internet

    inputs = Input(shape=input_shape)
    x = inputs

    for cnn1 in range(0, maxpool_layers):
        for cnn2 in range(1, cnns_per_maxpool + 1):
            x = Conv2D(32 * cnn2,
                       kernel_size=(3, 3),
                       padding='same',
                       activation='relu')(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = BatchNormalization()(x)
        x = Dropout(dropout)(x)

    if global_maxpool:
        x = GlobalMaxPooling2D()(x)

    x = Flatten()(x)

    for nn1 in range(0, dense_layers):
        if regularization:
            x = Dense(dense_units,
                      activation='relu',
                      kernel_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l1(0.01))(x)
        else:
            x = Dense(dense_units, activation='relu')(x)

        x = BatchNormalization()(x)
        x = Dropout(dropout)(x)

    x = Dense(output_shape, activation='softmax')(x)

    model = Model(inputs, x, name=model_name)
    # plot_model(model, to_file=os.path.join(os.path.dirname(__file__), f"{name}.png"))
    return model
def construct_keras_model(model_type, embedding_weights):
    keras_model = keras.Sequential()
    keras_model.add(
        Embedding(creative_id_window,
                  embedding_size,
                  input_length=max_len,
                  weights=[embedding_weights],
                  trainable=False))
    if model_type == 'MLP':
        keras_model.add(Flatten())
    elif model_type == 'GM':
        keras_model.add(GlobalMaxPooling1D())
    elif model_type == 'GA':
        keras_model.add(GlobalAveragePooling1D())
    elif model_type == 'Conv1D':
        keras_model.add(Conv1D(64, 2))
        keras_model.add(MaxPooling1D())
        keras_model.add(Conv1D(64, 2))
        keras_model.add(MaxPooling1D())
        keras_model.add(Flatten())
    else:
        raise Exception("错误的网络模型类型")

    # keras_model.add(Dropout(0.5))
    # keras_model.add(BatchNormalization())
    # keras_model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.001)))
    keras_model.add(Dense(32, activation='relu', kernel_regularizer=l2(0.001)))
    # keras_model.add(Dropout(0.5))
    # keras_model.add(BatchNormalization())
    keras_model.add(
        Dense(1, activation='sigmoid', kernel_regularizer=l2(0.001)))
    keras_model.summary()
    # print("保存模型的原始结构:", keras_model.save('model/word2vec/{0}_m0_{1}.h5'.format(model_type, label_name)))
    keras_model.compile(optimizer=optimizers.RMSprop(lr=RMSProp_lr),
                        loss=losses.binary_crossentropy,
                        metrics=[metrics.binary_accuracy])
    return keras_model
Beispiel #12
0
    def Train(self, input, target):
        X_train, X_test, Y_train, Y_test = train_test_split(input, target, train_size=0.75)
        Y_train = np.asarray(Y_train)
        Y_test = np.array(Y_test)
        X_train = np.reshape(X_train, [-1, X_train[0].shape[0], X_train[0].shape[1]])
        X_test = np.reshape(X_test, [-1, X_train[0].shape[0], X_train[0].shape[1]])

        model = Sequential()
        model.add(Conv1D(16, 3, padding='same', input_shape=input[0].shape))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization())
        model.add(GRU(16, return_sequences=True))
        # model.add(Activation("sigmoid"))
        # model.add(LSTM(lstm_out))

        model.add(Flatten())
        model.add(Dense(8, activity_regularizer=l2(0.001)))
        # model.add(GRU(lstm_out, return_sequences=True))
        # model.add(LSTM(lstm_out))
        # model.add(Dense(20, activity_regularizer=l2(0.001)))
        model.add(Activation("relu"))
        model.add(Dense(2))

        model.compile(loss=mean_absolute_error, optimizer='nadam',
                      metrics=[RootMeanSquaredError(), MAE])
        print(model.summary())

        batch_size = 12
        epochs = 100
        reduce_lr_acc = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=epochs / 10, verbose=1, min_delta=1e-4, mode='max')
        model.fit(X_train, Y_train,
                  epochs=epochs,
                  batch_size=batch_size, validation_data=(X_test, Y_test), callbacks=[reduce_lr_acc])
        model.save("PositionEstimation.h5", overwrite=True)
        # acc = model.evaluate(X_test,
        #                      Y_test,
        #                      batch_size=batch_size,
        #                      verbose=0)

        predicted = model.predict(X_test, batch_size=batch_size)
        # predicted = out.ravel()

        res = pd.DataFrame({"predicted_x": predicted[:, 0],
                            "predicted_y": predicted[:, 1],
                            "original_x": Y_test[:, 0],
                            "original_y": Y_test[:, 1]})
        res.to_excel("res.xlsx")
Beispiel #13
0
    def get_model(self):
        model = Sequential()
        model.add(Conv2D(32, kernel_size=(2, 2), activation='relu',
                         input_shape=(self.feature_dim_1, self.feature_dim_2, self.channel)))
        model.add(Conv2D(64, kernel_size=(2, 2), activation='relu'))
        model.add(Conv2D(128, kernel_size=(2, 2), activation='relu'))
        model.add(MaxPool2D(pool_size=(1, 1)))
        model.add(Dropout(0.5))
        model.add(Conv2D(128, kernel_size=(2, 2), activation='relu'))
        model.add(Conv2D(256, kernel_size=(2, 2), activation='relu'))
        model.add(MaxPool2D(pool_size=(1, 1)))
        model.add(Dropout(0.5))
        model.add(Conv2D(128, kernel_size=(2, 2), activation='relu'))
        model.add(Conv2D(256, kernel_size=(4, 4), activation='relu'))
        model.add(MaxPool2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dropout(0.5))
        model.add(Dense(256, kernel_regularizer=regularizers.l2(0.2), activation='relu'))
        model.add(Dense(32, kernel_regularizer=regularizers.l2(0.2), activation='relu'))
        model.add(Dense(self.num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='RMSProp', metrics=['accuracy'])
        return model
Beispiel #14
0
    def _build_model(self, hl1_dims, hl2_dims, hl3_dims, input_layer_size,
                     output_layer_size, optimizer, loss):

        input_v = Input(shape=(1, input_layer_size))

        branch_v = Flatten()(input_v)
        branch_v = Dense(hl1_dims, activation='relu')(branch_v)
        branch_v = BatchNormalization()(branch_v)
        branch_v = Dense(hl2_dims, activation='relu')(branch_v)
        branch_v = BatchNormalization()(branch_v)
        out_v = Dense(hl3_dims, activation='relu')(branch_v)
        out_v = BatchNormalization()(out_v)
        out_v = Dense(output_layer_size, activation='linear')(out_v)
        model = Model(inputs=input_v, outputs=out_v)

        #model = Model(inputs=m_v.inputs, outputs=out_v)
        # print(model.summary())

        model.compile(
            optimizer=optimizer, loss=loss
        )  # Use Huber Loss Function for DQN based on TensorBoard analysis

        return model
Beispiel #15
0
        def __init__(self,
                     hidden_layer_sizes=(100, ),
                     activation="relu",
                     solver='adam',
                     alpha=0.0001,
                     batch_size='auto',
                     learning_rate="constant",
                     learning_rate_init=0.001,
                     power_t=0.5,
                     max_iter=200,
                     shuffle=True,
                     random_state=None,
                     tol=1e-4,
                     verbose=False,
                     warm_start=False,
                     momentum=0.9,
                     nesterovs_momentum=True,
                     early_stopping=False,
                     validation_fraction=0.1,
                     beta_1=0.9,
                     beta_2=0.999,
                     epsilon=1e-8,
                     n_iter_no_change=10,
                     max_fun=15000,
                     conf=None):
            super().__init__(hidden_layer_sizes=hidden_layer_sizes,
                             activation=activation,
                             solver=solver,
                             alpha=alpha,
                             batch_size=batch_size,
                             learning_rate=learning_rate,
                             learning_rate_init=learning_rate_init,
                             power_t=power_t,
                             max_iter=max_iter,
                             loss='log_loss',
                             shuffle=shuffle,
                             random_state=random_state,
                             tol=tol,
                             verbose=verbose,
                             warm_start=warm_start,
                             momentum=momentum,
                             nesterovs_momentum=nesterovs_momentum,
                             early_stopping=early_stopping,
                             validation_fraction=validation_fraction,
                             beta_1=beta_1,
                             beta_2=beta_2,
                             epsilon=epsilon,
                             n_iter_no_change=n_iter_no_change,
                             max_fun=max_fun)
            # Load model
            self.conf = conf
            self.logger = loggerElk(__name__, True)

            # Building the model
            self.classifier = Sequential()

            # Creating the method for model
            # Step 1- Convolution
            self.classifier.add(
                Convolution2D(128, (5, 5),
                              input_shape=(self.conf.nn_image_size,
                                           self.conf.nn_image_size, 1),
                              activation='relu'))
            # adding another layer
            self.classifier.add(Convolution2D(64, (4, 4), activation='relu'))
            # Pooling it
            self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
            # Adding another layer
            self.classifier.add(Convolution2D(32, (3, 3), activation='relu'))
            # Pooling
            self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
            # Adding another layer
            self.classifier.add(Convolution2D(32, (3, 3), activation='relu'))
            # Pooling
            self.classifier.add(MaxPooling2D(pool_size=(2, 2)))
            # Step 2- Flattening
            self.classifier.add(Flatten())
            # Step 3- Full connection
            self.classifier.add(Dense(units=128, activation='relu'))
            # For the output step
            self.classifier.add(
                Dense(units=self.conf.nn_class_size, activation='softmax'))
            self.classifier.add(Dropout(0.02))
            # Add reularizers
            # classifier.add(Dense(128,
            #                input_dim = 128,
            #                kernel_regularizer = regularizers.l1(0.001),
            #                activity_regularizer = regularizers.l1(0.001),
            #                activation = 'relu'))

            self.classifier.compile(optimizer='adam',
                                    loss='categorical_crossentropy',
                                    metrics=['accuracy'])
        plt.imshow(ims[i], interpolation=None if interp else 'none')

#  one-hot encoding [x,y] = [cat,dog] ==> of [1, 0], and cats are represented by [0,1]
#plots(imgs, titles=labels)
#plt.show()

model = Sequential([
    #  2-dimensional convolutional layer because of 2D image
    # You can experiment by choosing different values for these parameters.
    # 32 output filters
    # Kernel of 3*3
    # input_shape is resizing the image so it will take less time. FOr example to 64*64
    Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(224,224,3)),
    # We’ll then Flatten the output from the convolutional layer and pass it to a Dense layer
    # Take previous layer to 1d tensor
    Flatten(),
    # Output for 2 options: cats or dogs
    Dense(2, activation='softmax'),
    ])

model.summary()

model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])

model.fit_generator(generator=train_batches, steps_per_epoch=4,
    validation_data=valid_batches, validation_steps=4, epochs=5, verbose=2)

test_imgs, test_labels = next(test_batches)
# show images with encoding
#plots(test_imgs, titles=test_labels)
#plt.show()
# dataset reshape
# array -> category
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

# scaling
X_train = X_train / 255.0
X_test = X_test / 255.0

# modeling
model = Sequential()
model.add(Dense(784, input_shape=(
    28,
    28,
), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.summary()

#compiling models
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# input dataset in model
model.fit(X_train, y_train, batch_size=200, epochs=1, validation_split=0.2)
'''
48000/48000 [==============================] - 22s 462us/sample - loss: 0.2445 - accuracy: 0.9248 - val_loss: 0.1263 - val_accuracy: 0.9633
'''
    def build_model(self, model_name, query_dim, terms_dim, output_dim,
                    word_embedding):

        self.model_name = model_name

        query_input = Input(shape=(query_dim, ), name='query_input')
        terms_input = Input(shape=(terms_dim, ), name='terms_input')

        if model_name == 'lstm':
            embedding_feature_block = Sequential(layers=[
                Embedding(word_embedding.vocabulary_size,
                          word_embedding.dimensions,
                          weights=[word_embedding.embedding_matrix],
                          trainable=True,
                          mask_zero=False),
                BatchNormalization(),
                LSTM(64, return_sequences=True)
            ])

        elif model_name == 'bilstm':
            embedding_feature_block = Sequential(layers=[
                Embedding(word_embedding.vocabulary_size,
                          word_embedding.dimensions,
                          weights=[word_embedding.embedding_matrix],
                          trainable=True,
                          mask_zero=False),
                BatchNormalization(),
                Bidirectional(LSTM(64, return_sequences=True))
            ])

        else:  # default cnn
            embedding_feature_block = Sequential(layers=[
                Embedding(word_embedding.vocabulary_size,
                          word_embedding.dimensions,
                          weights=[word_embedding.embedding_matrix],
                          trainable=True,
                          mask_zero=False),
                BatchNormalization(),
                Conv1D(filters=64, kernel_size=3, strides=1),
                MaxPooling1D(pool_size=3)
            ])

        # Features
        query_feature = embedding_feature_block(query_input)
        terms_feature = embedding_feature_block(terms_input)

        # Query-Terms alignment
        attention = Dot(axes=-1)([query_feature, terms_feature])
        softmax_attention = Lambda(lambda x: softmax(x, axis=1),
                                   output_shape=unchanged_shape)(attention)
        terms_aligned = Dot(axes=1)([softmax_attention, terms_feature])

        # Aligned features
        if model_name == 'lstm':
            flatten_layer = LSTM(128, return_sequences=False)(terms_aligned)

        elif model_name == 'bilstm':
            flatten_layer = Bidirectional(LSTM(
                128, return_sequences=False))(terms_aligned)

        else:  # default cnn
            merged_cnn = Conv1D(filters=128, kernel_size=3,
                                strides=1)(terms_aligned)
            merged_cnn = MaxPooling1D(pool_size=3)(merged_cnn)
            flatten_layer = Flatten()(merged_cnn)

        # Output
        dense = BatchNormalization()(flatten_layer)
        dense = Dense(64, activation='sigmoid')(dense)
        out = Dense(output_dim, activation='linear')(dense)

        self.model = Model(inputs=[query_input, terms_input], outputs=out)
        self.model.compile(optimizer='adam', loss=losses.mean_squared_error)
        self.model.summary()