示例#1
0
def create_malstm_model(max_seq_length, embedding_dims, embeddings):
    # Parameters
    dropout_lstm = 0.23
    dropout_dense = 0.23
    regularizing = 0.002

    n_hidden = 300
    # Input layers
    left_input = layers.Input(shape=(max_seq_length, ), dtype='int32')
    right_input = layers.Input(shape=(max_seq_length, ), dtype='int32')

    embedding_layer = layers.Embedding(len(embeddings),
                                       embedding_dims,
                                       weights=[embeddings],
                                       input_length=max_seq_length,
                                       trainable=False)

    # Embedded version of the inputs
    encoded_left = embedding_layer(left_input)
    encoded_right = embedding_layer(right_input)

    # Since this is a siamese network, both sides share the same LSTM
    shared_lstm = layers.LSTM(n_hidden,
                              dropout=dropout_lstm,
                              kernel_regularizer=regularizers.l2(regularizing),
                              recurrent_dropout=dropout_lstm)

    left_output = shared_lstm(encoded_left)
    right_output = shared_lstm(encoded_right)

    # Concatenate the two question representations and the engineered features if they exist
    concatenated = layers.Concatenate()([left_output, right_output])
    concatenated = layers.Dropout(dropout_dense)(concatenated)
    concatenated = layers.BatchNormalization()(concatenated)

    concatenated = layers.Dense(
        150,
        kernel_regularizer=regularizers.l2(regularizing),
        activation='relu')(concatenated)
    concatenated = layers.Dropout(dropout_dense)(concatenated)
    concatenated = layers.BatchNormalization()(concatenated)

    concatenated = layers.Dense(
        70,
        kernel_regularizer=regularizers.l2(regularizing),
        activation='relu')(concatenated)
    concatenated = layers.Dropout(dropout_dense)(concatenated)
    concatenated = layers.BatchNormalization()(concatenated)

    concatenated = layers.Dense(
        35,
        kernel_regularizer=regularizers.l2(regularizing),
        activation='relu')(concatenated)
    concatenated = layers.Dropout(dropout_dense)(concatenated)
    concatenated = layers.BatchNormalization()(concatenated)

    output = layers.Dense(1, activation='sigmoid')(concatenated)

    return Model([left_input, right_input], output)
示例#2
0
 def __init__(self,_lambda,layer_dims,num_classes=10,kind='res'):
     super(ResNet,self).__init__()
     self.stem=Sequential([layers.Conv2D(64,(3,3),strides=(1,1)),layers.BatchNormalization(),layers.ReLU(),layers.MaxPool2D(pool_size=(2,2),strides=(1,1),padding='same')])
     self.layer1=self.bulid_block(64,layer_dims[0],kind=kind)
     self.layer2=self.bulid_block(128,layer_dims[1],stride=2,kind=kind)
     self.layer3=self.bulid_block(256,layer_dims[2],stride=2,kind=kind)
     self.layer4=self.bulid_block(512,layer_dims[3],stride=2,kind=kind)
     self.avg_pool=layers.GlobalAveragePooling2D()
     self.fcm=layers.Dense(1000,activation='relu',kernel_regularizer=regularizers.l2(_lambda))
     self.fc=layers.Dense(num_classes)
示例#3
0
def modify_model(model: Model, class_index: int,
                 importance_type: ImportanceType) -> Model:
    gamma_initializer: str = "zeros"
    if importance_type & ImportanceType.GAMMA:
        gamma_initializer = "ones"

    gamma_regularizer = None
    if importance_type & ImportanceType.L1 and not importance_type & ImportanceType.L2:
        gamma_regularizer = l1()
    if not importance_type & ImportanceType.L1 and importance_type & ImportanceType.L2:
        gamma_regularizer = l2()
    if importance_type & ImportanceType.L1 and importance_type & ImportanceType.L2:
        gamma_regularizer = l1_l2()

    max_layer: int = len(model.layers)
    last_output: Input = None
    network_input: Input = None
    for i, layer in enumerate(model.layers):
        if i == 0:
            last_output = layer.output
            network_input = layer.input
        if 0 < i < max_layer:
            new_layer: Union[BatchNormalization,
                             BatchNormalization] = BatchNormalization(
                                 center=(importance_type
                                         & ImportanceType.CENTERING),
                                 gamma_initializer=gamma_initializer,
                                 gamma_regularizer=gamma_regularizer)
            last_output = new_layer(last_output)
        if i == max_layer - 1:
            new_end_layer: Dense = Dense(2,
                                         activation="softmax",
                                         name="binary_output_layer")
            last_output = new_end_layer(last_output)

            old_weights = layer.get_weights()
            old_weights[0] = np.transpose(old_weights[0], (1, 0))
            new_weights: List[np.array] = [
                np.append(old_weights[0][class_index:class_index + 1],
                          np.subtract(
                              np.sum(old_weights[0], axis=0, keepdims=True),
                              old_weights[0][class_index:class_index + 1]),
                          axis=0),
                np.append(old_weights[1][class_index:class_index + 1],
                          np.subtract(
                              np.sum(old_weights[1], axis=0, keepdims=True),
                              old_weights[1][class_index:class_index + 1]),
                          axis=0)
            ]
            new_weights[0] = np.transpose(new_weights[0], (1, 0))
            new_end_layer.set_weights(new_weights)
        elif i > 0:
            last_output = layer(last_output)

    return Model(inputs=network_input, outputs=last_output)
def SingleOutputCNN(
    input_shape,
    output_shape,
    cnns_per_maxpool=1,
    maxpool_layers=1,
    dense_layers=1,
    dense_units=64,
    dropout=0.25,
    regularization=False,
    global_maxpool=False,
    name='',
) -> Model:
    function_name = cast(types.FrameType,
                         inspect.currentframe()).f_code.co_name
    model_name = f"{function_name}-{name}" if name else function_name
    # model_name  = seq([ function_name, name ]).filter(lambda x: x).make_string("-")  # remove dependency on pyfunctional - not in Kaggle repo without internet

    inputs = Input(shape=input_shape)
    x = inputs

    for cnn1 in range(0, maxpool_layers):
        for cnn2 in range(1, cnns_per_maxpool + 1):
            x = Conv2D(32 * cnn2,
                       kernel_size=(3, 3),
                       padding='same',
                       activation='relu')(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = BatchNormalization()(x)
        x = Dropout(dropout)(x)

    if global_maxpool:
        x = GlobalMaxPooling2D()(x)

    x = Flatten()(x)

    for nn1 in range(0, dense_layers):
        if regularization:
            x = Dense(dense_units,
                      activation='relu',
                      kernel_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l1(0.01))(x)
        else:
            x = Dense(dense_units, activation='relu')(x)

        x = BatchNormalization()(x)
        x = Dropout(dropout)(x)

    x = Dense(output_shape, activation='softmax')(x)

    model = Model(inputs, x, name=model_name)
    # plot_model(model, to_file=os.path.join(os.path.dirname(__file__), f"{name}.png"))
    return model
示例#5
0
    def get_model(self):
        model = Sequential()
        model.add(Conv2D(32, kernel_size=(2, 2), activation='relu',
                         input_shape=(self.feature_dim_1, self.feature_dim_2, self.channel)))
        model.add(Conv2D(64, kernel_size=(2, 2), activation='relu'))
        model.add(Conv2D(128, kernel_size=(2, 2), activation='relu'))
        model.add(MaxPool2D(pool_size=(1, 1)))
        model.add(Dropout(0.5))
        model.add(Conv2D(128, kernel_size=(2, 2), activation='relu'))
        model.add(Conv2D(256, kernel_size=(2, 2), activation='relu'))
        model.add(MaxPool2D(pool_size=(1, 1)))
        model.add(Dropout(0.5))
        model.add(Conv2D(128, kernel_size=(2, 2), activation='relu'))
        model.add(Conv2D(256, kernel_size=(4, 4), activation='relu'))
        model.add(MaxPool2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dropout(0.5))
        model.add(Dense(256, kernel_regularizer=regularizers.l2(0.2), activation='relu'))
        model.add(Dense(32, kernel_regularizer=regularizers.l2(0.2), activation='relu'))
        model.add(Dense(self.num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='RMSProp', metrics=['accuracy'])
        return model
def construct_keras_model(model_type, embedding_weights):
    keras_model = keras.Sequential()
    keras_model.add(
        Embedding(creative_id_window,
                  embedding_size,
                  input_length=max_len,
                  weights=[embedding_weights],
                  trainable=False))
    if model_type == 'MLP':
        keras_model.add(Flatten())
    elif model_type == 'GM':
        keras_model.add(GlobalMaxPooling1D())
    elif model_type == 'GA':
        keras_model.add(GlobalAveragePooling1D())
    elif model_type == 'Conv1D':
        keras_model.add(Conv1D(64, 2))
        keras_model.add(MaxPooling1D())
        keras_model.add(Conv1D(64, 2))
        keras_model.add(MaxPooling1D())
        keras_model.add(Flatten())
    else:
        raise Exception("错误的网络模型类型")

    # keras_model.add(Dropout(0.5))
    # keras_model.add(BatchNormalization())
    # keras_model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.001)))
    keras_model.add(Dense(32, activation='relu', kernel_regularizer=l2(0.001)))
    # keras_model.add(Dropout(0.5))
    # keras_model.add(BatchNormalization())
    keras_model.add(
        Dense(1, activation='sigmoid', kernel_regularizer=l2(0.001)))
    keras_model.summary()
    # print("保存模型的原始结构:", keras_model.save('model/word2vec/{0}_m0_{1}.h5'.format(model_type, label_name)))
    keras_model.compile(optimizer=optimizers.RMSprop(lr=RMSProp_lr),
                        loss=losses.binary_crossentropy,
                        metrics=[metrics.binary_accuracy])
    return keras_model
示例#7
0
def get_compiled_model():
    model = keras.Sequential([
        keras.layers.Dense(9,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(0.0001)),
        keras.layers.Dense(500,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(0.0001)),
        keras.layers.Dropout(0.5),
        keras.layers.Dense(500,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(0.0001)),
        keras.layers.Dropout(0.5),
        keras.layers.Dense(500,
                           activation='relu',
                           kernel_regularizer=regularizers.l2(0.0001)),
        keras.layers.Dropout(0.5),
        keras.layers.Dense(1, activation='sigmoid')
    ])

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
示例#8
0
文件: helper.py 项目: jcoady/notebook
    def __init__(self,
                 state_size: int,
                 action_size: int,
                 representation_size: int,
                 max_value: int,
                 hidden_neurons: int = 64,
                 weight_decay: float = 1e-4,
                 representation_activation: str = 'tanh'):
        self.state_size = state_size
        self.action_size = action_size
        self.value_support_size = math.ceil(math.sqrt(max_value)) + 1

        regularizer = regularizers.l2(weight_decay)
        representation_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(representation_size,
                  activation=representation_activation,
                  kernel_regularizer=regularizer)
        ])
        value_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(self.value_support_size, kernel_regularizer=regularizer)
        ])
        policy_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(action_size, kernel_regularizer=regularizer)
        ])
        dynamic_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(representation_size,
                  activation=representation_activation,
                  kernel_regularizer=regularizer)
        ])
        reward_network = Sequential([
            Dense(16, activation='relu', kernel_regularizer=regularizer),
            Dense(1, kernel_regularizer=regularizer)
        ])

        super().__init__(representation_network, value_network, policy_network,
                         dynamic_network, reward_network)
示例#9
0
    def Train(self, input, target):
        X_train, X_test, Y_train, Y_test = train_test_split(input, target, train_size=0.75)
        Y_train = np.asarray(Y_train)
        Y_test = np.array(Y_test)
        X_train = np.reshape(X_train, [-1, X_train[0].shape[0], X_train[0].shape[1]])
        X_test = np.reshape(X_test, [-1, X_train[0].shape[0], X_train[0].shape[1]])

        model = Sequential()
        model.add(Conv1D(16, 3, padding='same', input_shape=input[0].shape))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization())
        model.add(GRU(16, return_sequences=True))
        # model.add(Activation("sigmoid"))
        # model.add(LSTM(lstm_out))

        model.add(Flatten())
        model.add(Dense(8, activity_regularizer=l2(0.001)))
        # model.add(GRU(lstm_out, return_sequences=True))
        # model.add(LSTM(lstm_out))
        # model.add(Dense(20, activity_regularizer=l2(0.001)))
        model.add(Activation("relu"))
        model.add(Dense(2))

        model.compile(loss=mean_absolute_error, optimizer='nadam',
                      metrics=[RootMeanSquaredError(), MAE])
        print(model.summary())

        batch_size = 12
        epochs = 100
        reduce_lr_acc = ReduceLROnPlateau(monitor='val_loss', factor=0.9, patience=epochs / 10, verbose=1, min_delta=1e-4, mode='max')
        model.fit(X_train, Y_train,
                  epochs=epochs,
                  batch_size=batch_size, validation_data=(X_test, Y_test), callbacks=[reduce_lr_acc])
        model.save("PositionEstimation.h5", overwrite=True)
        # acc = model.evaluate(X_test,
        #                      Y_test,
        #                      batch_size=batch_size,
        #                      verbose=0)

        predicted = model.predict(X_test, batch_size=batch_size)
        # predicted = out.ravel()

        res = pd.DataFrame({"predicted_x": predicted[:, 0],
                            "predicted_y": predicted[:, 1],
                            "original_x": Y_test[:, 0],
                            "original_y": Y_test[:, 1]})
        res.to_excel("res.xlsx")
示例#10
0
    def __init__(self,
                 state_size: int,
                 action_size: int,
                 representation_size: (int, int),
                 max_value: int,
                 hidden_neurons: int = 64,
                 weight_decay: float = 1e-4,
                 representation_activation: str = 'tanh',
                 directory: str = None):
        self.state_size = state_size
        self.representation_size = representation_size
        self.action_size = action_size
        self.value_support_size = math.ceil(math.sqrt(max_value)) + 1

        if directory is not None:
            print('loading network from ' + directory)
            representation_network = self.load_model(directory +
                                                     "/representation")
            value_network = self.load_model(directory + "/value")
            policy_network = self.load_model(directory + "/policy")
            dynamic_network = self.load_model(directory + "/dynamic")
            reward_network = self.load_model(directory + "/reward")
        else:
            regularizer = regularizers.l2(weight_decay)

            representation_network = build_representation_network(
                representation_size)

            # Ignore batch size when setting network inputs
            hidden_rep_shape = representation_network.output_shape[1:]
            value_network = build_value_network(hidden_rep_shape,
                                                self.value_support_size)
            policy_network = build_policy_network(hidden_rep_shape,
                                                  self.action_size,
                                                  regularizer)

            # Shape when actions are stacked on top of hidden rep
            stacked_hidden_rep_shape = (hidden_rep_shape[0],
                                        hidden_rep_shape[1],
                                        hidden_rep_shape[2] + 1)
            dynamic_network = build_dynamic_network(stacked_hidden_rep_shape)
            reward_network = build_reward_network(stacked_hidden_rep_shape)

        super().__init__(representation_network, value_network, policy_network,
                         dynamic_network, reward_network)
示例#11
0
numeric_layer = tf.keras.layers.DenseFeatures(numeric_columns)
numeric_layer(train_batch).numpy()

categorical_columns = []
for feature, vocab in CATEGORIES.items():
  cat_col = tf.feature_column.categorical_column_with_vocabulary_list(
        key=feature, vocabulary_list=vocab)
  categorical_columns.append(tf.feature_column.indicator_column(cat_col))

categorical_layer = tf.keras.layers.DenseFeatures(categorical_columns)
preprocessing_layer = tf.keras.layers.DenseFeatures(categorical_columns+numeric_columns)

model = tf.keras.Sequential([
  preprocessing_layer,
  layers.Dense(256, activation='relu',kernel_regularizer=regularizers.l2(0.005)),
  layers.Dropout(0.5),
   layers.Dense(128, activation='relu',kernel_regularizer=regularizers.l2(0.005)),
  layers.Dropout(0.5),
   layers.Dense(128, activation='selu',kernel_regularizer=regularizers.l2(0.005)),
  layers.Dropout(0.4),
  layers.Dense(1, activation='sigmoid')
])

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

train_data = packed_train_data.shuffle(500)
test_data = packed_test_data

print("--Train--")
model.fit(train_data, epochs=500)
示例#12
0
    cat_col = tf.feature_column.categorical_column_with_vocabulary_list(
        key=feature, vocabulary_list=vocab)
    categorical_columns.append(tf.feature_column.indicator_column(cat_col))

preprocessing_layer = tf.keras.layers.DenseFeatures(categorical_columns +
                                                    numeric_columns)
#print(preprocessing_layer(example_batch).numpy()[0]

print('\n~~~~~~~~BuildingModel~~~~~~~~~')
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
    0.001, decay_steps=(int(1e4) / 5) * 10000, decay_rate=1, staircase=False)

model = tf.keras.Sequential([
    preprocessing_layer,
    tf.keras.layers.Dense(128,
                          kernel_regularizer=regularizers.l2(0.0001),
                          activation='elu'),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(128,
                          kernel_regularizer=regularizers.l2(0.0001),
                          activation='elu'),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(128,
                          kernel_regularizer=regularizers.l2(0.0001),
                          activation='elu'),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(1, activation='sigmoid')
])

model.compile(
    optimizer=tf.keras.optimizers.Adam(lr_schedule),
def construct_keras_api_model(embedding_weights):
    # input_no_time_no_repeat = Input(shape=max_len, dtype='int32')
    # embedded_no_time_no_repeat = Embedding(
    #     creative_id_window,embedding_size,weights=[embedding_weights],trainable=False
    # )(input_no_time_no_repeat)
    # ==================================================================================
    Input_fix_creative_id = Input(shape=(math.ceil(time_id_max / period_days) *
                                         period_length),
                                  dtype='int32',
                                  name='input_fix_creative_id')
    Embedded_fix_creative_id = Embedding(
        creative_id_window,
        embedding_size,
        weights=[embedding_weights],
        trainable=False)(Input_fix_creative_id)
    # ==================================================================================
    # input_no_time_with_repeat = Input(shape=max_len, dtype='int32')
    # embedded_no_time_with_repeat = Embedding(creative_id_window,embedding_size,weights=[embedding_weights],trainable=False)(input_no_time_with_repeat)

    # ----------------------------------------------------------------------
    GM_x = keras.layers.GlobalMaxPooling1D()(Embedded_fix_creative_id)
    GM_x = Dropout(0.5)(GM_x)
    GM_x = Dense(embedding_size // 2, kernel_regularizer=l2(0.001))(GM_x)
    GM_x = BatchNormalization()(GM_x)
    GM_x = Activation('relu')(GM_x)
    GM_x = Dropout(0.5)(GM_x)
    GM_x = Dense(embedding_size // 4, kernel_regularizer=l2(0.001))(GM_x)
    GM_x = BatchNormalization()(GM_x)
    GM_x = Activation('relu')(GM_x)
    GM_x = Dense(1, 'sigmoid')(GM_x)

    # ----------------------------------------------------------------------
    GA_x = GlobalAveragePooling1D()(Embedded_fix_creative_id)
    GA_x = Dropout(0.5)(GA_x)
    GA_x = Dense(embedding_size // 2, kernel_regularizer=l2(0.001))(GA_x)
    GA_x = BatchNormalization()(GA_x)
    GA_x = Activation('relu')(GA_x)
    GA_x = Dropout(0.5)(GA_x)
    GA_x = Dense(embedding_size // 4, kernel_regularizer=l2(0.001))(GA_x)
    GA_x = BatchNormalization()(GA_x)
    GA_x = Activation('relu')(GA_x)
    GA_x = Dense(1, 'sigmoid')(GA_x)

    # ==================================================================================
    Conv_creative_id = Conv1D(embedding_size, 15, 5,
                              activation='relu')(Embedded_fix_creative_id)
    # ----------------------------------------------------------------------
    Conv_GM_x = MaxPooling1D(7)(Conv_creative_id)
    Conv_GM_x = Conv1D(embedding_size, 2, 1, activation='relu')(Conv_GM_x)
    Conv_GM_x = GlobalMaxPooling1D()(Conv_GM_x)
    Conv_GM_x = Dropout(0.5)(Conv_GM_x)
    Conv_GM_x = Dense(embedding_size // 2,
                      kernel_regularizer=l2(0.001))(Conv_GM_x)
    Conv_GM_x = BatchNormalization()(Conv_GM_x)
    Conv_GM_x = Activation('relu')(Conv_GM_x)
    Conv_GM_x = Dropout(0.5)(Conv_GM_x)
    Conv_GM_x = Dense(embedding_size // 4,
                      kernel_regularizer=l2(0.001))(Conv_GM_x)
    Conv_GM_x = BatchNormalization()(Conv_GM_x)
    Conv_GM_x = Activation('relu')(Conv_GM_x)
    Conv_GM_x = Dense(1, 'sigmoid')(Conv_GM_x)

    # ----------------------------------------------------------------------
    Conv_GA_x = AveragePooling1D(7)(Conv_creative_id)
    Conv_GA_x = Conv1D(embedding_size, 2, 1, activation='relu')(Conv_GA_x)
    Conv_GA_x = GlobalAveragePooling1D()(Conv_GA_x)
    Conv_GA_x = Dropout(0.5)(Conv_GA_x)
    Conv_GA_x = Dense(embedding_size // 2,
                      kernel_regularizer=l2(0.001))(Conv_GA_x)
    Conv_GA_x = BatchNormalization()(Conv_GA_x)
    Conv_GA_x = Activation('relu')(Conv_GA_x)
    Conv_GA_x = Dropout(0.5)(Conv_GA_x)
    Conv_GA_x = Dense(embedding_size // 4,
                      kernel_regularizer=l2(0.001))(Conv_GA_x)
    Conv_GA_x = BatchNormalization()(Conv_GA_x)
    Conv_GA_x = Activation('relu')(Conv_GA_x)
    Conv_GA_x = Dense(1, 'sigmoid')(Conv_GA_x)

    # ----------------------------------------------------------------------
    LSTM_x = Conv1D(embedding_size, 14, 7, activation='relu')(Conv_creative_id)
    LSTM_x = LSTM(embedding_size, return_sequences=True)(LSTM_x)
    LSTM_x = LSTM(embedding_size, return_sequences=True)(LSTM_x)
    LSTM_x = LSTM(embedding_size)(LSTM_x)
    LSTM_x = Dropout(0.5)(LSTM_x)
    LSTM_x = Dense(embedding_size // 2, kernel_regularizer=l2(0.001))(LSTM_x)
    LSTM_x = BatchNormalization()(LSTM_x)
    LSTM_x = Activation('relu')(LSTM_x)
    LSTM_x = Dropout(0.5)(LSTM_x)
    LSTM_x = Dense(embedding_size // 4, kernel_regularizer=l2(0.001))(LSTM_x)
    LSTM_x = BatchNormalization()(LSTM_x)
    LSTM_x = Activation('relu')(LSTM_x)
    LSTM_x = Dense(1, 'sigmoid')(LSTM_x)

    # ----------------------------------------------------------------------
    concatenated = concatenate([
        GM_x,
        GA_x,
        Conv_GM_x,
        Conv_GA_x,
        LSTM_x,
    ],
                               axis=-1)
    output_tensor = Dense(1, 'sigmoid')(concatenated)

    keras_api_model = Model(
        [
            # input_no_time_no_repeat,
            Input_fix_creative_id,
            # input_no_time_with_repeat,
        ],
        output_tensor)
    keras_api_model.summary()
    plot_model(keras_api_model, to_file='model/keras_api_word2vec_model.png')
    print('-' * 5 + ' ' * 3 + "编译模型" + ' ' * 3 + '-' * 5)
    keras_api_model.compile(optimizer=optimizers.RMSprop(lr=RMSProp_lr),
                            loss=losses.binary_crossentropy,
                            metrics=[metrics.binary_accuracy])
    return keras_api_model
def create_malstm_features_model(max_seq_length, embedding_dims, embeddings,
                                 numb_engineered_features):
    # Parameters
    dropout_lstm = 0.23
    dropout_dense = 0.23
    regularizing = 0.002

    n_hidden = 300
    # Input layers
    left_input = layers.Input(shape=(max_seq_length, ), dtype='int32')
    right_input = layers.Input(shape=(max_seq_length, ), dtype='int32')
    engineered_features_input = layers.Input(
        shape=(numb_engineered_features, ))

    # Embedding layer
    embedding_layer = layers.Embedding(len(embeddings),
                                       embedding_dims,
                                       weights=[embeddings],
                                       input_length=max_seq_length,
                                       trainable=False)
    encoded_left = embedding_layer(left_input)
    encoded_right = embedding_layer(right_input)

    # Since this is a siamese network, both sides share the same LSTM
    shared_lstm = layers.LSTM(n_hidden,
                              kernel_regularizer=regularizers.l2(regularizing),
                              dropout=dropout_lstm,
                              recurrent_dropout=dropout_lstm,
                              name="Siamese_LSTM")
    left_output = shared_lstm(encoded_left)
    right_output = shared_lstm(encoded_right)

    # One fully connected layer to transform the engineered features
    encoded_engineered = layers.Dense(
        70, activation='relu', name="FeatureDense")(engineered_features_input)

    # Concatenate the two question representations and the engineered features if they exist
    concatenated = layers.Concatenate()(
        [left_output, right_output, encoded_engineered])
    concatenated = layers.Dropout(dropout_dense)(concatenated)
    concatenated = layers.BatchNormalization()(concatenated)

    concatenated = layers.Dense(
        150,
        kernel_regularizer=regularizers.l2(regularizing),
        activation='relu',
        name="ConcatenatedDense_1")(concatenated)
    concatenated = layers.Dropout(dropout_dense)(concatenated)
    concatenated = layers.BatchNormalization(name="BatchNorm1")(concatenated)

    concatenated = layers.Dense(
        70,
        kernel_regularizer=regularizers.l2(regularizing),
        activation='relu',
        name="ConcatenatedDense_2")(concatenated)
    concatenated = layers.Dropout(dropout_dense)(concatenated)
    concatenated = layers.BatchNormalization(name="BatchNorm2")(concatenated)

    concatenated = layers.Dense(
        35,
        kernel_regularizer=regularizers.l2(regularizing),
        activation='relu',
        name="ConcatenatedDense_3")(concatenated)
    concatenated = layers.Dropout(dropout_dense)(concatenated)
    concatenated = layers.BatchNormalization(name="BatchNorm3")(concatenated)

    output = layers.Dense(1, activation='sigmoid',
                          name="Sigmoid")(concatenated)

    return Model([left_input, right_input, engineered_features_input], output)
示例#15
0
categorical_columns = []
for feature, vocab in CATEGORIES.items():
    cat_col = tf.feature_column.categorical_column_with_vocabulary_list(
        key=feature, vocabulary_list=vocab)
    categorical_columns.append(tf.feature_column.indicator_column(cat_col))

preprocessing_layer = tf.keras.layers.DenseFeatures(categorical_columns+numeric_columns)

def create_ds(dataframe, batch_size=1):
    dataframe = dataframe.copy()
    labels = dataframe.pop('chd')
    return tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)).shuffle(buffer_size=len(dataframe)).batch(batch_size)

model = tf.keras.Sequential([
    preprocessing_layer,
    tf.keras.layers.Dense(24, kernel_regularizer=regularizers.l2(0.01), activation = 'elu'),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(24, kernel_regularizer=regularizers.l2(0.01), activation = 'elu'),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(1, activation='sigmoid')
    ])
    

model.compile(optimizer='adamax',
                loss='binary_crossentropy',
                metrics=['accuracy'])

print("--Fit model--")
model.fit(packed_train_data, epochs=20, steps_per_epoch=128)

print("--Evaluate model--")