コード例 #1
0
def cnn():
    cnn = Sequential([
        Input(shape=(200, 4)),
        Reshape((200, 4, 1)),
        Conv2D(128, kernel_size=(10, 2), activation="relu"),
        Conv2D(128, kernel_size=(10, 2), activation="relu"),
        Dropout(0.4),
        Conv2D(64, kernel_size=(10, 2), strides=(2, 1), activation="relu"),
        Conv2D(64, kernel_size=(10, 1), activation="relu"),
        Conv2D(64, kernel_size=(10, 1), activation="relu"),
        Dropout(0.4),
        Flatten(),
        Dense(64, activation="relu"),
        Dense(32, activation="relu"),
        Dense(1, activation="sigmoid")
    ], "CNN")

    cnn.compile(
        optimizer="nadam",
        loss="binary_crossentropy",
        metrics=[
            "accuracy",
            AUC(curve="ROC", name="auroc"),
            AUC(curve="PR", name="auprc"),   
        ]
    )
    return cnn
コード例 #2
0
    def get_metrics(y, y_pred):

        cnf_matrix = confusion_matrix(y, y_pred)

        false_positive = cnf_matrix.sum(
            axis=0) - np.diag(cnf_matrix).astype(float)
        false_negative = cnf_matrix.sum(
            axis=1) - np.diag(cnf_matrix).astype(float)
        true_positive = np.diag(cnf_matrix).astype(float)
        true_negative = (
            cnf_matrix.sum() -
            (false_positive + false_negative + true_positive)).astype(float)

        y = to_categorical(y, num_classes=5)
        y_pred = to_categorical(y_pred, num_classes=5)

        auc = AUC()
        _ = auc.update_state(y, y_pred)
        acc = CategoricalAccuracy()
        _ = acc.update_state(y, y_pred)

        return {
            'accuracy': acc.result().numpy(),
            'auc': auc.result().numpy(),
            'sensitivity': true_positive / (true_positive + false_negative),
            'specificity': true_negative / (true_negative + false_positive)
        }
コード例 #3
0
def get_cnn(shape_value):
    shape_v = (shape_value, 4)

    cnn = Sequential([
        Input(shape=shape_v),
        Reshape((*shape_v, 1)),
        Conv2D(64, kernel_size=(10, 2), activation="relu"),
        Dropout(0.3),
        Conv2D(32, kernel_size=(10, 2), strides=(2, 1), activation="relu"),
        Conv2D(32, kernel_size=(10, 1), activation="relu"),
        Dropout(0.3),
        Flatten(),
        Dense(32, activation="relu"),
        Dense(16, activation="relu"),
        Dense(1, activation="sigmoid")
    ], "CNN")

    cnn.compile(optimizer="nadam",
                loss="binary_crossentropy",
                metrics=[
                    "accuracy",
                    AUC(curve="ROC", name="auroc"),
                    AUC(curve="PR", name="auprc")
                ])

    return cnn
コード例 #4
0
def FFNN():
    ffnn = Sequential([
        Input(shape=(200, 4)),
        Flatten(),
        Dense(1024, activation="relu"),
        Dense(512, activation="relu"),
        BatchNormalization(),
        Dropout(0.4),
        Dense(256, activation="relu"),
        Dense(128, activation="relu"),
        BatchNormalization(),
        Dense(64, activation="relu"),
        Dropout(0.4),
        Dense(32, activation="relu"),
        Dense(1, activation="sigmoid")
    ], "FFNN")

    ffnn.compile(
        optimizer="nadam",
        loss="binary_crossentropy",
        metrics=[
            "accuracy",
            AUC(curve="ROC", name="auroc"),
            AUC(curve="PR", name="auprc")
        ]
    )

    ffnn.summary()
    return ffnn
コード例 #5
0
        def get_model(
                input_shape: Tuple[int],
                name: str = None,
                optimizer: str = get_default('optimizer'),
                loss: str = get_default('loss'),
                metrics: List = None,
                epochs: int = get_default('epochs'),
                batch_size: int = get_default('batch_size'),
                validation_split: float = get_default('validation_split'),
                shuffle: bool = get_default('shuffle'),
                verbose: bool = get_default('verbose'),
                **kwargs):
            name = name or default_name
            input_layers = (Input(shape=input_shape), )
            output_layers = (Dense(1, activation="sigmoid"), )
            layers = (*input_layers, *hidden_layers, *output_layers)
            model = Sequential(layers, name)

            metrics = metrics or [
                "accuracy",
                AUC(curve="ROC", name="auroc"),
                AUC(curve="PR", name="auprc")
            ]
            model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

            kwargs.update({
                'epochs': epochs,
                'batch_size': batch_size,
                'validation_split': validation_split,
                'shuffle': shuffle,
                'verbose': verbose
            })
            model.summary()
            return Model(name, model, **kwargs)
コード例 #6
0
def evaluate_legacy(model, weights, test_gen, loss, optimiser, loss_weights):
    # Get loss weights
    metrics = [
        'accuracy',
        AUC(curve='ROC', name='auroc'),
        AUC(curve='PR', name='aupr')
    ]

    # Compile model
    model.compile(loss=loss,
                  optimizer=optimiser,
                  metrics=metrics,
                  loss_weights=loss_weights)

    # Evaluate
    print(f'Evaluating weights file: {weights}')
    output = model.evaluate(test_gen.dataset)

    # Report results
    loss = output[0]
    audio_acc = output[4]
    video_acc = output[7]
    av_acc = output[10]
    auroc = output[11]
    aupr = output[12]
    print_results(weights, loss, audio_acc, video_acc, av_acc, auroc, aupr)
コード例 #7
0
 def _compile_model(self) -> Model:
     self._model.compile(optimizer="nadam",
                         loss="binary_crossentropy",
                         metrics=[
                             "accuracy",
                             AUC(curve="ROC", name="auroc"),
                             AUC(curve="PR", name="auprc")
                         ])
コード例 #8
0
def get_minimal_multiclass_metrics() -> List[Union[AUC, str, BinaryMetric]]:
    """Return minimal list of multiclass metrics.

    The set of metrics includes accuracy, AUROC and AUPRC.
    """
    return [
        "accuracy",
        Recall(name="recall"),
        Precision(name="precision"),
        AUC(curve="roc", name="AUROC"),
        AUC(curve="pr", name="AUPRC")
    ]
コード例 #9
0
def train():
    train_data, valid_data, test_data = load_data()

    mirrored_strategy = tf.distribute.MirroredStrategy()

    with mirrored_strategy.scope():
        model = music_sincnet()
        csv_logger = CSVLogger("./checkpoint/log.csv", append=True)
        checkpointer = make_checkpoint()

        for i in range(4):
            if i == 0:
                optimizer = Adam(learning_rate=1e-4)
                epoch = 60

            elif i == 1:
                optimizer = SGD(learning_rate=0.001,
                                momentum=0.9,
                                nesterov=True)
                epoch = 20
            elif i == 2:
                optimizer = SGD(learning_rate=0.0001,
                                momentum=0.9,
                                nesterov=True)
                epoch = 20

            else:
                optimizer = SGD(learning_rate=0.00001,
                                momentum=0.9,
                                nesterov=True)
                epoch = 100

            model.compile(
                loss="binary_crossentropy",
                optimizer=optimizer,
                metrics=[AUC(name='roc_auc'),
                         AUC(name='pr_auc', curve='PR')])
            model.fit(
                train_data,
                batch_size=16,
                epochs=epoch,
                callbacks=[checkpointer, csv_logger],
            )
            loss, roc_auc, pr_auc = model.evaluate(valid_data)
            print('roc_auc : {}, pr_auc : {}, loss : {}'.format(
                roc_auc, pr_auc, loss))
            model.save("../models/model" + str(i) + ".h5")

        loss, roc_auc, pr_auc = model.evaluate(test_data)
        print('@TEST RESULT\n roc_auc : {}, pr_auc : {}, loss : {}'.format(
            roc_auc, pr_auc, loss))
コード例 #10
0
def train(epochs):
    train_ds, test_ds, field_dict, field_index = get_data()

    model = DeepFM(embedding_size=config.EMBEDDING_SIZE, num_feature=len(field_index),
                   num_field=len(field_dict), field_index=field_index)

    optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)

    print("Start Training: Batch Size: {}, Embedding Size: {}".format(config.BATCH_SIZE, config.EMBEDDING_SIZE))
    start = perf_counter()
    for i in range(epochs):
        acc = BinaryAccuracy(threshold=0.5)
        auc = AUC()
        loss_history = []

        for x, y in train_ds:
            loss = train_on_batch(model, optimizer, acc, auc, x, y)
            loss_history.append(loss)

        print("Epoch {:03d}: 누적 Loss: {:.4f}, Acc: {:.4f}, AUC: {:.4f}".format(
            i, np.mean(loss_history), acc.result().numpy(), auc.result().numpy()))

    test_acc = BinaryAccuracy(threshold=0.5)
    test_auc = AUC()
    for x, y in test_ds:
        y_pred = model(x)
        test_acc.update_state(y, y_pred)
        test_auc.update_state(y, y_pred)

    print("테스트 ACC: {:.4f}, AUC: {:.4f}".format(test_acc.result().numpy(), test_auc.result().numpy()))
    print("Batch Size: {}, Embedding Size: {}".format(config.BATCH_SIZE, config.EMBEDDING_SIZE))
    print("걸린 시간: {:.3f}".format(perf_counter() - start))
    model.save_weights('weights/weights-epoch({})-batch({})-embedding({}).h5'.format(
        epochs, config.BATCH_SIZE, config.EMBEDDING_SIZE))
コード例 #11
0
def SegNet3D(shape, weights=None):
    inputs = Input(shape)
    conv, pool = inputs, inputs

    # encoder
    for numOfFilters in [4, 8, 16, 32]:
        conv = SegNet3DBlock(pool, layers=2, filters=numOfFilters)
        pool = MaxPooling3D((2, 2, 2))(conv)

    conv = SegNet3DBlock(pool, layers=3, filters=128)

    # decoder
    for numOfFilters in [64, 32, 16, 8]:
        upsam = UpSampling3D((2, 2, 2))(conv)
        conv = SegNet3DBlock(upsam, layers=2, filters=numOfFilters)

    conv = SegNet3DBlock(upsam, layers=2, filters=4)

    outputs = Conv3D(1, 1, activation='sigmoid')(conv)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=1e-4), loss='binary_crossentropy',
                  metrics=[Precision(), Recall(), AUC(), Accuracy()])
    model.summary()

    return model
コード例 #12
0
def buildModel():
    inp = Input((MAXIMUM_SEQ_LEN, ))
    #use embeddings
    emb = Embedding(VOCAB_LENGTH,
                    EMBEDDING_DIM,
                    weights=[embedding_matrix],
                    trainable=False)(inp)
    #to drop some embedding instead of particular cells
    emb = SpatialDropout1D(0.2)(emb)
    #generate 100(fwd) + 100(bwd) hidden states
    hidden_states = Bidirectional(
        LSTM(100, return_sequences=True, dropout=0.1,
             recurrent_dropout=0.1))(emb)
    #on each hidden state use 100*64 kernels of size 3
    conv = Conv1D(64,
                  kernel_size=3,
                  padding="valid",
                  kernel_initializer="glorot_uniform")(hidden_states)
    #take maximum for each cell of all hidden state
    x1 = GlobalMaxPool1D()(conv)
    x2 = GlobalAvgPool1D()(conv)
    #cocatenate both polling
    x = Concatenate()([x1, x2])
    x = Dropout(0.2)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(0.1)(x)
    out = Dense(6, activation='sigmoid')(x)
    model = Model(inp, out)

    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=[AUC(name="auc")])
コード例 #13
0
def prepare_model(img_rows, img_cols, img_channels, nb_classes):
    cnn = Sequential()
    cnn.add(
        Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               input_shape=(img_rows, img_cols, img_channels)))
    cnn.add(MaxPooling2D(pool_size=(2, 2)))
    cnn.add(Conv2D(
        64,
        (3, 3),
        padding='same',
        activation='relu',
    ))
    cnn.add(MaxPooling2D(pool_size=(2, 2)))
    cnn.add(Conv2D(
        128,
        (3, 3),
        padding='same',
        activation='relu',
    ))
    cnn.add(MaxPooling2D(pool_size=(2, 2)))
    cnn.add(Flatten())
    cnn.add(Dense(512))
    cnn.add(Dense(512))
    cnn.add(Dense(nb_classes, activation='sigmoid'))
    cnn.compile(optimizer='sgd',
                loss='binary_crossentropy',
                metrics=['accuracy', AUC(name='auc')])
    print(cnn.summary())
    return cnn
コード例 #14
0
def get_conv_model():
    """
    Create convolutional neural network with keras. Layers already set and only adjustable through source file.
    """
    model = Sequential()

    model.add(Conv2D(filters=16, kernel_size=(2,2), kernel_regularizer=l2(0.0001),
                    input_shape=(num_rows, num_columns, num_channels), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))

    model.add(Conv2D(filters=32, kernel_size=(2,2), kernel_regularizer=l2(0.0001), activation='relu'))
    model.add(Dropout(0.2))

    model.add(Conv2D(filters=64, kernel_size=(3,3), kernel_regularizer=l2(0.0001), activation='relu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Dropout(0.2))
    model.add(Flatten())

    model.add(Dense(64, activation='relu')) 
    model.add(Dropout(0.5))

    model.add(Dense(num_labels, activation='softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy', AUC()], optimizer='adam')
    
    return model
コード例 #15
0
def build_model(hp, input_dim1, input_dim2, output_dim, first_layer=False):

    params = hp.values.copy()

    ci = Input((input_dim1, ))
    si = Input((input_dim2, ))
    s = si

    for i in range(hp.Int('num_layers', 0, 2)):
        s = Dense(hp.Choice('branching_units' + str(i + 1),
                            units,
                            default=units[0]),
                  activation='relu')(s)
        s = Dropout(0.2)(s)

    x = Concatenate(axis=-1)([ci, s])

    x1 = Dense(hp.Choice('units_' + str(1), [16, 8], default=16),
               activation='relu')(x)

    x = Dense(output_dim, activation='softmax', name='output_1')(x1)

    model = Model(inputs=[ci, si], outputs=[x])

    model.compile(optimizer=Adam(learning_rate=0.001),
                  loss={'output_1': 'categorical_crossentropy'},
                  metrics=['acc', f1, f2,
                           Precision(),
                           Recall(),
                           AUC()])

    return model
コード例 #16
0
ファイル: train_bak.py プロジェクト: xiaoduli/notekeras
def train_nfm(mode=1):
    # ========================= Hyper Parameters =======================
    dnn_dropout = 0.5
    hidden_units = [256, 128, 64]

    learning_rate = 0.001
    batch_size = 4096
    epochs = 10
    # ========================== Create dataset =======================
    feature_columns, train, test = criteo.build_dataset(mode=mode)
    train_X, train_y = train
    test_X, test_y = test
    # ============================Build Model==========================
    model = NFM(feature_columns, hidden_units, dnn_dropout=dnn_dropout)
    # model.summary()

    # =========================Compile============================
    model.compile(loss=binary_crossentropy, optimizer=Adam(learning_rate=learning_rate),
                  metrics=[AUC()])
    # ===========================Fit==============================
    model.fit(
        train_X,
        train_y,
        epochs=epochs,
        callbacks=[EarlyStopping(
            monitor='val_loss', patience=2, restore_best_weights=True)],  # checkpoint
        batch_size=batch_size,
        validation_split=0.1
    )
    # ===========================Test==============================
    print('test AUC: %f' % model.evaluate(test_X, test_y)[1])
コード例 #17
0
def compiled_model(INPUT_SHAPE: list, QNT_CLASS: int) -> tf.keras.Model:
    """
    A função retorna o modelo compilado.

    Return a compiled model.
  """
    INPUT_SHAPE = tuple(INPUT_SHAPE)

    base_model = MobileNetV2(include_top=False,
                             weights='imagenet',
                             input_tensor=Input(shape=INPUT_SHAPE,
                                                name='inputs'))

    for layer in base_model.layers:
        layer.trainable = False

    mod = base_model.output
    mod = AveragePooling2D()(mod)
    mod = Flatten()(mod)
    mod = Dropout(0.5)(mod)
    mod = Dense(QNT_CLASS, activation='softmax')(mod)

    mod_retorno = Model(inputs=base_model.input, outputs=mod)

    mod_retorno.compile(
        loss=CategoricalCrossentropy(),
        optimizer=Adagrad(),
        metrics=[Accuracy(), Precision(),
                 AUC(), FalseNegatives()])
    return mod_retorno
コード例 #18
0
def UNet3DPatch(shape, weights=None):
    conv_encoder = []
    encoder_filters = np.array([8, 16, 32, 32])
    mid_filters = 32
    decoder_filters = np.array([64, 32, 16, 8])
    bottom_filters = 4

    inputs = Input(shape)
    # encoder
    x = inputs
    for filters in encoder_filters:
        conv = UNet3DBlock(x, layers=2, filters=filters)
        x = MaxPooling3D(pool_size=(2, 2, 2))(conv)
        conv_encoder.append(conv)

    x = UNet3DBlock(x, layers=1, filters=mid_filters)

    # decoder
    for filters in decoder_filters:
        x = UNet3DBlock(x, layers=2, filters=filters)
        x = UpSampling3D(size=(2, 2, 2))(x)
        x = concatenate([conv_encoder.pop(), x])

    x = UNet3DBlock(x, layers=2, filters=bottom_filters)
    outputs = Conv3D(1, 1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(learning_rate=1e-4),
                  loss='binary_crossentropy',
                  metrics=[AUC(), dice_coef])
    model.summary()

    return model
コード例 #19
0
ファイル: basic_mlp.py プロジェクト: Djack1010/basicDL4Man
    def build(self):

        model = models.Sequential()
        model.add(
            layers.Dense(500,
                         input_shape=(self.vector_size, ),
                         activation='relu'))
        model.add(layers.Dense(700, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(1000, activation='relu'))
        model.add(layers.Dense(500, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(250, activation='relu'))
        model.add(layers.Dense(100, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(50, activation='relu'))
        model.add(layers.Dense(self.num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=[
                          'acc',
                          Precision(name="prec"),
                          Recall(name="rec"),
                          AUC(name='auc')
                      ])

        return model
コード例 #20
0
ファイル: experiment_gene_exp.py プロジェクト: mahdi-b/GWASDS
def load_model(arch, LR, X, y, L1_coef, encoder=None):
    print('CREATING NEURAL NET')
    opt = Adam(learning_rate=LR)
    model = None

    if encoder == None:
        inputs = Input(shape=(X.shape[1], ))
        h = layers.Dense(arch[0],
                         kernel_regularizer=l1_l2(l1=L1_coef, l2=0.0),
                         activation='relu')(inputs)
        h = layers.BatchNormalization()(h)
        h = layers.Dense(arch[1], activation='relu')(h)
        h = layers.BatchNormalization()(h)
        h = layers.Dense(arch[2], activation='relu')(h)
        h = layers.BatchNormalization()(h)
        h = layers.Dense(arch[3], activation='relu')(h)
        h = layers.BatchNormalization()(h)
        h = layers.Dense(arch[4], activation='relu')(h)
        output = layers.Dense(5, activation='softmax')(h)
        model = Model(inputs=inputs, outputs=output)

    else:
        print('Transfer encoder to classifier and fine-tuning...')
        model = Sequential([encoder, layers.Dense(5, activation='softmax')])

    model.compile(
        optimizer=opt,
        loss=tf.keras.losses.CategoricalCrossentropy(),
        metrics=['acc', AUC(), Recall(), Precision()],
    )

    return model
コード例 #21
0
def define_stacked_model(neural_nets, features, trainable=True):
    if trainable == False:
        for model in neural_nets:
            for layer in model.layers:
                layer.trainable = False

    ensemble_visible = [model.input for model in neural_nets]
    ensemble_outputs = [model.layers[14].output for model in neural_nets
                        ]  # The final dense layer of size 16.

    merge = layers.concatenate(ensemble_outputs)
    hidden = layers.Dense(32, activation='relu')(merge)
    hidden_drop = layers.Dropout(0.2)(hidden)
    hidden2 = layers.Dense(16, activation='relu')(hidden_drop)
    hidden3 = layers.Dense(4, activation='relu')(hidden2)
    output = layers.Dense(1, activation='sigmoid')(hidden3)
    model = Model(inputs=ensemble_visible, outputs=output)

    model.compile(
        loss='binary_crossentropy',
        optimizer=Adam(learning_rate=0.001),
        metrics=['accuracy',
                 AUC(), Specificity, Sensitivity, F1_metric])

    return model
コード例 #22
0
def classifier(X, Y, clusters):

    X_train = X.sample(frac=0.8)
    Y_train = Y.loc[X_train.index]
    X_val = X.drop(X_train.index)
    Y_val = Y.drop(X_train.index)

    c_w = compute_class_weight('balanced', np.unique(clusters), clusters)
    c_w = dict(enumerate(c_w))

    METRICS = [Recall(name='recall'), AUC(name='auc', multi_label=False)]

    es = EarlyStopping(monitor='weighted_recall',
                       mode='max',
                       verbose=0,
                       patience=6)
    model = Sequential()
    model.add(Dense(32, input_dim=X_train.shape[1], activation='relu'))
    model.add(Dense(16, activation='relu'))
    model.add(Dense(Y_train.shape[1], activation='softmax'))
    # Compile model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=METRICS,
                  weighted_metrics=METRICS)
    model.fit(X_train,
              Y_train,
              epochs=500,
              batch_size=128,
              validation_data=(X_val, Y_val),
              shuffle=False,
              verbose=1,
              callbacks=[es],
              class_weight=c_w)
    return model
コード例 #23
0
def main():
    """ Main function. """

    # datasets = Datasets('../project_data/classificationProcessed')
    datasets = Datasets(ARGS.basepath)

    model = ChestClassModel()
    model(tf.keras.Input(shape=(512, 512, 3)))
    checkpoint_path = "./classificationWeights5/"

    print(model.summary())

    if ARGS.load_checkpoint is not None:
        model.load_weights(ARGS.load_checkpoint)

    # Compile model graph
    # optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
    optimizer = tf.keras.optimizers.Adam(learning_rate=ARGS.learningrate)
    #changed from sparse_categorical
    model.compile(optimizer=optimizer,
                  loss="categorical_crossentropy",
                  metrics=[AUC(multi_label=True), "acc", "binary_accuracy"])

    if ARGS.evaluate:
        test(model, datasets.test_data)
    else:
        train(model, datasets, checkpoint_path)
    model.save_weights('./classificationWeights5/allWeights.h5')
コード例 #24
0
 def calculate(self, *args):
     # prepare data
     train_X, train_Y = self.trainset
     test_X, test_Y = self.testset
     # build model
     mirrored_strategy = tf.distribute.MirroredStrategy()
     with mirrored_strategy.scope():
         self.model = DeepFM_model(self.feature_columns)
         self.model.summary()
         # ======== model checkpoint ===
         # check_path = '../save/fm_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'
         # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True,
         #                                                 verbose=1, period=5)
         # ======== compile
         self.model.compile(loss = binary_crossentropy,optimizer=Adam(learning_rate = global_args.learning_rate), \
                      metrics = [AUC()])
     # ======== model fit
     import pdb
     pdb.set_trace()
     self.model.fit(train_X,
                    train_Y,
                    epochs=global_args.epochs,
                    callbacks=[
                        EarlyStopping(monitor='val_loss',
                                      patience=1,
                                      restore_best_weights=True)
                    ],
                    batch_size=global_args.batch_size,
                    validation_split=0.1)
     # ======== evaluate
     print('test AUC: %f' % self.model.evaluate(
         test_X, test_Y, batch_size=global_args.batch_size)[1])
     return 'done'
コード例 #25
0
    def build(self):
        model = models.Sequential()
        model.add(
            layers.Conv2D(32, (3, 3),
                          activation='relu',
                          input_shape=(self.input_width_height,
                                       self.input_width_height,
                                       self.channels)))
        model.add(layers.MaxPooling2D((2, 2)))
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2)))
        model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        model.add(layers.MaxPooling2D((2, 2)))
        model.add(layers.Flatten())
        model.add(layers.Dropout(0.5))  # Dropout for regularization
        model.add(layers.Dense(512, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(self.num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=[
                          'acc',
                          Precision(name="prec"),
                          Recall(name="rec"),
                          AUC(name='auc')
                      ])

        return model
コード例 #26
0
def main(learning_rate, epochs, hidden_units):
    """
    feature_columns is a list and contains two dict:
    - dense_features: {feat: dense_feature_name}
    - sparse_features: {feat: sparse_feature_name, feat_num: the number of this feature}
    train_X: [dense_train_X, sparse_train_X]
    test_X: [dense_test_X, sparse_test_X]
    """
    feature_columns, train_X, test_X, train_y, test_y = create_dataset()

    # ============================Build Model==========================
    model = Deep_Crossing(feature_columns, hidden_units)
    model.summary()
    # ============================model checkpoint======================
    check_path = 'save/deep_crossing_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'
    checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path,
                                                    save_weights_only=True,
                                                    verbose=1,
                                                    period=5)
    # =========================Compile============================
    model.compile(loss=binary_crossentropy,
                  optimizer=Adam(learning_rate=learning_rate),
                  metrics=[AUC()])
    # ===========================Fit==============================
    model.fit(train_X,
              train_y,
              epochs=epochs,
              callbacks=[checkpoint],
              batch_size=128,
              validation_split=0.2)
    # ===========================Test==============================
    print('test AUC: %f' % model.evaluate(test_X, test_y)[1])
コード例 #27
0
def create_cnn(num_classes: int = 2) -> tf.keras.Model:
    x = Input(shape=(256, ), dtype="int64")
    h = Embedding(en2vec.corpus_size + 1, 128, input_length=256)(x)

    conv1 = Convolution1D(filters=256, kernel_size=10, activation="tanh")(h)
    conv2 = Convolution1D(filters=256, kernel_size=7, activation="tanh")(h)
    conv3 = Convolution1D(filters=256, kernel_size=5, activation="tanh")(h)
    conv4 = Convolution1D(filters=256, kernel_size=3, activation="tanh")(h)

    h = Concatenate()([
        GlobalMaxPooling1D()(conv1),
        GlobalMaxPooling1D()(conv2),
        GlobalMaxPooling1D()(conv3),
        GlobalMaxPooling1D()(conv4),
    ])

    h = Dense(1024, activation="selu", kernel_initializer="lecun_normal")(h)
    h = AlphaDropout(0.1)(h)
    h = Dense(1024, activation="selu", kernel_initializer="lecun_normal")(h)
    h = AlphaDropout(0.1)(h)

    y = Dense(num_classes, activation="softmax")(h)

    model = Model(x, y)
    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy", AUC()])
    return model
コード例 #28
0
    def train_model(self, themes_weight: ThemeWeights,
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):

        article_length = dataset.article_length
        theme_count = dataset.theme_count

        model = tf.keras.Sequential([
            keras.layers.Embedding(input_dim=voc_size,
                                   input_length=article_length,
                                   output_dim=self.embedding_output_dim,
                                   mask_zero=True),
            Dropout(0.3),
            keras.layers.Conv1D(filters=64,
                                kernel_size=3,
                                input_shape=(voc_size,
                                             self.embedding_output_dim),
                                activation=tf.nn.relu),
            #keras.layers.MaxPooling1D(3),
            #keras.layers.Bidirectional(keras.layers.LSTM(64)),
            keras.layers.GlobalAveragePooling1D(),
            Dropout(0.3),
            keras.layers.Dense(theme_count, activation=tf.nn.sigmoid)
        ])

        model.compile(optimizer=tf.keras.optimizers.Adam(clipnorm=1),
                      loss=WeightedBinaryCrossEntropy(
                          themes_weight.weight_array()),
                      metrics=[
                          AUC(multi_label=True),
                          BinaryAccuracy(),
                          TruePositives(),
                          TrueNegatives(),
                          FalseNegatives(),
                          FalsePositives(),
                          Recall(),
                          Precision()
                      ],
                      run_eagerly=True)

        model.summary()
        self.__model__ = model

        if self.__plot_directory is not None:
            self.plot_model(self.__plot_directory)

        # Fix for https://github.com/tensorflow/tensorflow/issues/38988
        model._layers = [
            layer for layer in model._layers if not isinstance(layer, dict)
        ]

        callbacks = [ManualInterrupter(), keras_callback]

        model.fit(dataset.trainData,
                  epochs=self.epochs,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=callbacks)
    def __init__(self, model, data, config):
        self.model_train = model['train']
        self.model_eval = model['eval']
        self.data = data
        self.config = config

        self.loss_fn = BinaryCrossentropy()
        self.optimizer = Adam(learning_rate=self.config.learning_rate)
        self.metrics = ['accuracy', AUC()]
コード例 #30
0
def get_mlp_epi2(shape_value):
    mlp = Sequential([
        Input(shape=(shape_value, )),
        Dense(256, activation="relu"),
        Dense(128, activation="relu"),
        Dense(32, activation="relu"),
        Dense(1, activation="sigmoid")
    ], "MLP2")

    mlp.compile(optimizer="nadam",
                loss="binary_crossentropy",
                metrics=[
                    "accuracy",
                    AUC(curve="ROC", name="auroc"),
                    AUC(curve="PR", name="auprc")
                ])

    return mlp