def train_model(max_length=350, vocab_size=10000):
    model = build_model(max_length=max_length, vocab_size=vocab_size)
    X_ws_train, X_ws_test, trainX, trainY, testX, testY = load_data(
        max_length, vocab_size)
    checkpoint = ModelCheckpoint('best_model.h5',
                                 monitor='val_precision',
                                 mode='max',
                                 verbose=1,
                                 save_best_only=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=[
                      Precision(name="precision", thresholds=0.7),
                      Recall(name="recall")
                  ])
    history = model.fit([trainX, trainX, trainX],
                        array(trainY),
                        validation_data=([testX, testX, testX], testY),
                        epochs=50,
                        verbose=2,
                        callbacks=[
                            EarlyStopping("val_precision",
                                          patience=10,
                                          restore_best_weights=True),
                            checkpoint
                        ])
    return model, history
Exemplo n.º 2
0
def build_classifier(optimizer = "adam", loss = 'binary_crossentropy', activation = 'relu'):
    # Inicializar la RNA 
    classifier = Sequential()    
    
    # Añadir las capas de entrada y primera capa oculta
    classifier.add(Dense(units = 8, kernel_regularizer = None, kernel_initializer = 'uniform', activation = activation, input_dim = 11))
    
    #classifier.add(Dropout(rate = 0.05))
    
    # Añadir la segunda capa oculta
    classifier.add(Dense(units = 8, kernel_regularizer = None, kernel_initializer = 'uniform', activation = activation))    
    
    #classifier.add(Dropout(rate = 0.05))
    
    #classifier.add(Dense(units = 24, kernel_regularizer = None, kernel_initializer = 'uniform', activation = activation))
    
    # Conviene poner una capa de Dropout al final de todas las capas para
    # resolver los problemas de overfitting
    
    classifier.add(Dropout(rate = 0.10)) # Como mucho llegar a 0.5    
    
    # Añadir la capa de salida
    classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
    
    # Compilar la RNA
    classifier.compile(optimizer = optimizer, loss = loss, metrics = [Recall()])
    
    # Devolver el clasificador
    return classifier
Exemplo n.º 3
0
    def __build_model(self):
        input_shape = (self.__patch_size, self.__patch_size, 1)
        filters = 8

        model = Sequential()

        model.add(Conv2D(filters, 3, activation='relu', padding='same', input_shape=input_shape))
        model.add(Conv2D(filters, 3, activation='relu', padding='same', input_shape=input_shape))
        model.add(MaxPooling2D())

        model.add(Conv2D(2 * filters, 3, activation='relu', padding='same'))
        model.add(Conv2D(2 * filters, 3, activation='relu', padding='same'))
        model.add(MaxPooling2D())

        model.add(Conv2D(4 * filters, 3, activation='relu', padding='same'))
        model.add(Conv2D(4 * filters, 3, activation='relu', padding='same'))
        model.add(MaxPooling2D())

        model.add(Flatten())
        model.add(Dense(4 * filters * (self.__patch_size // 8) ** 2, activation='relu'))

        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        model.compile(optimizer=Adam(learning_rate=1e-3), loss='binary_crossentropy',
                      metrics=[Precision(name='precision'), Recall(name='recall'), 'accuracy'])
        return model
    def config(self, layers):
        # Input layer
        input_layer = Input(shape=(layers[0], 1))
        # Dropout

        # Hidden Layer 1
        encoded = Dense(layers[1], activation='relu')(input_layer)
        # Dropout
        # Hidden Layer 2
        encoded = Conv1D(layers[2], 5, activation='relu')(encoded)
        # Dropout
        encoded = Dropout(0.5)(encoded)
        encoded = Flatten()(encoded)
        # Hidden Layer 3
        encoded = Dense(layers[3], activation='relu')(encoded)
        # Dropout
        # encoded = Dropout(0.5)(encoded)
        # Softmax
        softmax = Dense(self.nb_classes, activation='softmax')(encoded)
        # Config the model
        self.model = Model(input=input_layer, output=softmax)
        # autoencoder compilation
        self.model.compile(optimizer='nadam',
                           loss="categorical_crossentropy",
                           metrics=[Recall(), AUC()])
Exemplo n.º 5
0
 def __init__(self,
              maxlen=128,
              batch_size=32,
              w_embed_size=200,
              padding="post",
              h_embed_size=200,
              dropout=0.1,
              patience=1,
              plot=True,
              max_epochs=100):
     self.maxlen = maxlen
     self.METRICS = [
         BinaryAccuracy(name='accuracy'),
         Precision(name='precision'),
         Recall(name='recall'),
         AUC(name='auc')
     ]
     self.w_embed_size = w_embed_size
     self.h_embed_size = h_embed_size
     self.dropout = dropout
     self.vocab_size = -1
     self.padding = padding
     self.patience = patience
     self.model = None
     self.w2i = {}
     self.epochs = max_epochs
     self.i2w = {}
     self.vocab = []
     self.batch_size = batch_size
     self.show_the_model = plot
     self.threshold = 0.2
     self.toxic_label = 2
     self.not_toxic_label = 1
     self.unk_token = "[unk]"
     self.pad_token = "[pad]"
Exemplo n.º 6
0
def create_model(input_len: int, num_classes: int) -> Model:
    input1 = Input(shape=(input_len, ))

    layer1 = Dense(units=128, activation='relu')(input1)

    dropout_1 = Dropout(0.25)(layer1)

    layer2 = Dense(units=256, activation='relu')(dropout_1)

    dropout_2 = Dropout(0.25)(layer2)

    layer3 = Dense(units=num_classes, activation='softmax')(dropout_2)

    model = Model(inputs=[input1], outputs=[layer3])

    metrics = [
        Precision(name='precision'),
        Recall(name='recall'),
        AUC(name='auc')
    ]

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=metrics)
    model.summary()

    return model
Exemplo n.º 7
0
def create_model():
    sequence_input = Input(shape=(max_length, ), dtype='int32')
    embedded_sequences = embedding_layer(sequence_input)
    x = Conv1D(filters=128,
               kernel_size=3,
               input_shape=(max_length, len(word_index) + 1),
               data_format='channels_first')(embedded_sequences)
    x = MaxPooling1D()(x)
    x = Conv1D(filters=128,
               kernel_size=3,
               input_shape=(max_length, len(word_index) + 1),
               data_format='channels_first')(x)
    x = MaxPooling1D()(x)
    x = Conv1D(filters=128,
               kernel_size=3,
               input_shape=(max_length, len(word_index) + 1),
               data_format='channels_first')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(128, activation='relu')(x)
    preds = Dense(3, activation='softmax')(x)

    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc', Precision(), Recall()])
    return model
Exemplo n.º 8
0
def nn_sm(df, df_t):

    X_train = df.iloc[:, :16]
    y_train = df.iloc[:, 16:]

    # SMOTE Technique (OverSampling) After splitting and Cross Validating
    sm = SMOTE(sampling_strategy='minority', random_state=42)
    # Xsm_train, ysm_train = sm.fit_sample(X_train, y_train)

    # This will be the data were we are going to
    Xsm_train, ysm_train = sm.fit_sample(X_train, y_train)

    n_inputs = df.iloc[:, :16].shape[1]

    nn = Sequential([
        Dense(n_inputs, input_shape=(n_inputs, ), activation='relu'),
        Dense(32, activation='relu'),
        Dense(1, activation='sigmoid')
    ])

    nn.compile(optimizer='sgd',
               loss='binary_crossentropy',
               metrics=[Accuracy(), Recall()])

    nn.fit(Xsm_train,
           ysm_train,
           validation_split=0.2,
           batch_size=100,
           epochs=50,
           shuffle=True,
           verbose=2)

    df_t.iloc[:, 6] = np.random.permutation(df_t.iloc[:, 6].values)

    predictions = nn.predict_classes(df_t.iloc[:, :16], verbose=0)

    probas = nn.predict_proba(df_t.iloc[:, :16])

    y_test = df_t.iloc[:, 16:]['y_t+2'].values

    #Metrics
    f1 = f1_score(y_test, predictions)
    acc = accuracy_score(y_test, predictions)
    recall = recall_score(y_test, predictions)
    precision = precision_score(y_test, predictions)
    #roc_auc = roc_auc_score(y_test, predictions)

    return predictions, y_test, f1, acc, recall, precision, probas
Exemplo n.º 9
0
def driverModel():
    ##### ----------------
    parent = "Blood-Cancer_Data"
    ALL_IDB1 = f"{parent}/All_IDB1/im"
    annotate1 = f"{parent}/ALL_IDB1/xyc"
    AML_ALL_img = f"{parent}/SN-AM-BALL-MM"
    classes_AML_ALL, _, __ = os.walk(AML_ALL_img)
    multiple_myeloma = f"{parent}/multiple_myeloma"
    myeloma_annotate = f"{parent}/multiple_myeloma/Annotated_PPT_MM_Data.pdf"
    ##### ----------------

    c3bo = C3BO(main_file=parent, annotate_file=annotate1, classes=None)
    c3bo.annotate_files(ALL_IDB1, AML_ALL_img)
    data = c3bo.classes
    img_files, centroid_files = data.keys(), data.values()

    c3bo.to_df(img_files, centroid_files)
    c3bo.label_diagnosis()
    c3bo.strong_neural_net((128, 128, 3), 3)

    metrics = [
        "accuracy",
        Precision(),
        Recall(),
        AUC(),
        SensitivityAtSpecificity(0.5),
        SpecificityAtSensitivity(0.5)
    ]
    c3bo.compile_model('rmsprop', 'categorical_crossentropy', metrics)

    df = c3bo.df

    img_pixels = df["image_pixels"].copy()
    labels = df["diagnosis"].copy()
    files = df["image_files"].copy()
    c3bo.preprocess(labels)

    X_full_data = c3bo.images_array(img_pixels, (128, 128, 3))
    X_full_data = c3bo.shuffle_data(X_full_data)

    X_train, y_train, X_val, y_val, X_test, y_test = c3bo.partitioned_data(
        X_full_data)
    c3bo.train(X_train, y_train, X_val, y_val)

    return c3bo.model
def modelBuilder(hp):
    """
    Assign input and output tensors, build neural net and compile model
    :param hp: hyperparameters, argument needed to call this function from evaluateBestParams function
               (see also https://keras.io/api/keras_tuner/hyperparameters/)
    :return: model, compiled neural net model
    """
    ## keras model
    u = Input(shape=(1, ))
    s = Input(shape=(1, ))
    u_embedding = Embedding(N, K)(u)  ## (N, 1, K)
    s_embedding = Embedding(M, K)(s)  ## (N, 1, K)
    u_embedding = Flatten()(u_embedding)  ## (N, K)
    s_embedding = Flatten()(s_embedding)  ## (N, K)
    x = Concatenate()([u_embedding, s_embedding])  ## (N, 2K)

    ## Tune the number of units in the first Dense layer
    ## Choose an optimal value between 32-512
    hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
    x = Dense(units=hp_units)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(100)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dense(1, activation="sigmoid")(x)

    ## define model and compile. Use BinaryCrossEntropy for binary classification approach
    model = Model(inputs=[u, s], outputs=x)
    model.compile(
        loss=BinaryCrossentropy(from_logits=True),
        optimizer=SGD(lr=0.08, momentum=0.9),
        metrics=[
            AUC(thresholds=[0.0, 0.5, 1.0]),
            BinaryAccuracy(threshold=0.5),
            Precision(),
            Recall()
        ],
    )

    ## print model summary
    # model.summary()

    return model
Exemplo n.º 11
0
def assignModel(N, M, K):
    """
    Assign input and output tensors, build neural net and compile model
    :param N: integer, number of users
    :param M: integer, number of songs
    :param K: integer, latent dimensionality
    :return: model, compiled neural net model
    """
    ## keras model
    u = Input(shape=(1,))
    s = Input(shape=(1,))
    u_embedding = Embedding(N, K)(u)   ## (N, 1, K)
    s_embedding = Embedding(M, K)(s)   ## (N, 1, K)
    u_embedding = Flatten()(u_embedding)  ## (N, K)
    s_embedding = Flatten()(s_embedding)  ## (N, K)
    x = Concatenate()([u_embedding, s_embedding])  ## (N, 2K)

    ## the neural network (use sigmoid activation function in output layer for binary classification)
    x = Dense(400)(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(100)(x)
    x = BatchNormalization()(x)
    # x = Activation('sigmoid')(x)
    x = Dense(1, activation="sigmoid")(x)

    ## define model and compile. Use BinaryCrossEntropy for binary classification approach
    model = Model(inputs=[u, s], outputs=x)
    model.compile(
      loss=BinaryCrossentropy(from_logits=True),
      optimizer=SGD(lr=0.08, momentum=0.9),
      metrics=[AUC(thresholds=[0.0, 0.5, 1.0]),
               BinaryAccuracy(threshold=0.5),
               Precision(),
               Recall()],
    )

    return model
Exemplo n.º 12
0
def char_model(x_train, y_train, x_test, y_test, params=None, fit_model=True):
    ''' params is a dictionary containing hyperparameter values. See main.py
    for current definition.
    '''
    # input and embeddings for characters
    char_in = Input(shape=(params['max_num_words'], params['max_chars_in_word']),
                    name='input')
    emb_char = TimeDistributed(Embedding(input_dim=params['num_of_unique_chars']+2,
                                        output_dim=params['lstm_units_char_emb'],
                                        input_length=params['max_chars_in_word'],
                                        mask_zero=True,
                                        trainable=True),
                               name='embed_dense_char')(char_in)
    emb_char = TimeDistributed(LSTM(units=params['lstm_units_char_emb'],
                                    return_sequences=False),
                                    # dropout=params['dropout_rate_char_emb']),
                                    name='learn_embed_char')(emb_char)
    bilstm = Bidirectional(LSTM(units=params['bilstm_units'],
                                # recurrent_dropout=params['bilstm_dropout_rate'],
                                return_sequences=False),
                            merge_mode='sum')(emb_char)


    dense = Dense(params['bilstm_units'], activation='relu',
                    name='linear_decode2')(bilstm)
    out = Dense(3, activation='softmax', name='output_softmax1')(dense)

    model = Model(char_in, out)
    model.compile(loss='categorical_crossentropy', optimizer='adam',
                  metrics=['accuracy', Precision(), Recall()])

    if fit_model:
        history = model.fit(x_train, y_train,
                          batch_size=params['batch_size'], epochs=params['epochs'],
                          validation_data=(x_test, y_test),
                          verbose=2)
        return history, model
    else:
        return model
Exemplo n.º 13
0
    def train_model(self):

        sgd = SGD(lr=0.0001, decay=1e-6,
                  momentum=0.9, nesterov=True)
        early_stopping = EarlyStopping(monitor='val_loss', patience=12)
        for i, layer in enumerate(self.model.layers):
            print(i, layer.name)
        trainable_layer = 48
        for i in range(trainable_layer):
            self.model.layers[i].trainable = False
        reducelr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=8, verbose=1, mode='min')
        datagen = ImageDataGenerator(
            rotation_range=45,
            width_shift_range=0.2,
            height_shift_range=0.2,
            shear_range=0.15,
            zoom_range=0.15,
            horizontal_flip=True,
            # rescale=1./255,
            # vertical_flip=True,
            fill_mode='nearest',
            )
        # datagen.fit(self.dataset.X_train)
        self.model.compile(
            # optimizer='adam',
            optimizer=sgd,
            loss='categorical_crossentropy',
            metrics=[categorical_accuracy,Precision(),Recall()])
        # epochs、batch_size为可调的参数,epochs为训练多少轮、batch_size为每次训练多少个样本
        self.history = self.model.fit(
            datagen.flow(self.dataset.X_train,self.dataset.Y_train,batch_size=6,shuffle=True),
            epochs=1000,
            verbose = 1,
            # validation_data=datagen.flow(self.dataset.X_train,self.dataset.Y_train,batch_size=1,subset="validation"),
            validation_data=(self.dataset.X_test, self.dataset.Y_test,),
            shuffle=True,
            callbacks=[early_stopping,reducelr])
Exemplo n.º 14
0
    def createModel(self):
        model = Sequential()
        model.add(
            Conv2D(
                32,
                (4, 4),
                padding="valid",
                strides=1,
                input_shape=self.inputShape,
                activation="relu",
            ))

        model.add(Conv2D(32, (4, 4), activation="relu"))

        model.add(Conv2D(32, (4, 4), activation="relu"))

        model.add(MaxPooling2D(pool_size=(8, 8)))

        model.add(Flatten())

        model.add(Dense(2048, activation="relu"))
        model.add(Dropout(0.25))

        model.add(Dense(2048, activation="relu"))
        model.add(Dropout(0.25))

        model.add(Dense(self.numOfClasses, activation="softmax"))

        opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
        model.compile(
            loss="categorical_crossentropy",
            optimizer=opt,
            metrics=["accuracy", Precision(),
                     Recall()],
        )
        return model
def train_and_validate(train_x, train_y, test_x, test_y, hparams):
    unique_items = len(train_y[0])
    model = LSTMRec(
        vocabulary_size=unique_items,
        emb_output_dim=hparams['emb_dim'],
        lstm_units=hparams['lstm_units'],
        lstm_activation=hparams['lstm_activation'],
        lstm_recurrent_activation=hparams['lstm_recurrent_activation'],
        lstm_dropout=hparams['lstm_dropout'],
        lstm_recurrent_dropout=hparams['lstm_recurrent_dropout'],
        dense_activation=hparams['dense_activation'])
    model.compile(optimizer=Adam(learning_rate=hparams['learning_rate'],
                                 beta_1=hparams['adam_beta_1'],
                                 beta_2=hparams['adam_beta_2'],
                                 epsilon=hparams['adam_epsilon']),
                  loss='binary_crossentropy',
                  metrics=[
                      Precision(top_k=1, name='P_at_1'),
                      Precision(top_k=3, name='P_at_3'),
                      Precision(top_k=5, name='P_at_5'),
                      Precision(top_k=10, name='P_at_10'),
                      Recall(top_k=10, name='R_at_10'),
                      Recall(top_k=50, name='R_at_50'),
                      Recall(top_k=100, name='R_at_100')
                  ])
    hst = model.fit(
        x=train_x,
        y=train_y,
        batch_size=hparams['batch_size'],
        epochs=250,
        callbacks=[
            EarlyStopping(monitor='val_R_at_10',
                          patience=10,
                          mode='max',
                          restore_best_weights=True,
                          verbose=True),
            ModelCheckpoint(filepath=os.path.join(os.pardir, os.pardir,
                                                  'models',
                                                  hparams['run_id'] + '.ckpt'),
                            monitor='val_R_at_10',
                            mode='max',
                            save_best_only=True,
                            save_weights_only=True,
                            verbose=True),
            TensorBoard(log_dir=os.path.join(os.pardir, os.pardir, 'logs',
                                             hparams['run_id']),
                        histogram_freq=1)
        ],
        validation_split=0.2)
    val_best_epoch = np.argmax(hst.history['val_R_at_10'])
    test_results = model.evaluate(test_x, test_y)
    with tf.summary.create_file_writer(
            os.path.join(os.pardir, os.pardir, 'logs', hparams['run_id'],
                         'hparams')).as_default():
        hp.hparams(hparams)
        tf.summary.scalar('train.final_loss',
                          hst.history["val_loss"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_P_at_1',
                          hst.history["val_P_at_1"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_P_at_3',
                          hst.history["val_P_at_3"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_P_at_5',
                          hst.history["val_P_at_5"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_P_at_10',
                          hst.history["val_P_at_10"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_R_at_10',
                          hst.history["val_R_at_10"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_R_at_50',
                          hst.history["val_R_at_50"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_R_at_100',
                          hst.history["val_R_at_100"][val_best_epoch],
                          step=val_best_epoch)

        tf.summary.scalar('test.final_loss',
                          test_results[0],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_P_at_1',
                          test_results[1],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_P_at_3',
                          test_results[2],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_P_at_5',
                          test_results[3],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_P_at_10',
                          test_results[4],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_R_at_10',
                          test_results[5],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_R_at_50',
                          test_results[6],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_R_at_100',
                          test_results[7],
                          step=val_best_epoch)

    return val_best_epoch, test_results
Exemplo n.º 16
0
RevInput = Input(shape=(MAX_REVSEQUENCE_LENGTH, ), dtype='int32')
x = rev_embedding_layer(RevInput)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = GlobalMaxPooling1D()(x)
z = Dense(128, activation='relu')(x)
z = Dropout(0.5)(z)
Preds = Dense(len(labels_index), activation='sigmoid')(z)

# Compiling the Model
model = Model(inputs=RevInput, outputs=Preds)

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['acc', Recall()])

# Model Checkpoint
checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.h5',
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='auto')
params = {
    'batch_size': 128,
    'n_classes': len(labels_index),
    'shuffle': True,
    'max_rev_seq_length': MAX_REVSEQUENCE_LENGTH
}

training_generator = TrainingDataGenerator(trainingdf, **params)
# - FOLD------------------------------------------------------------------------------------------------------------
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)

brojac = 1
for train, test in kfold.split(X_fit,y):
    model = keras.Sequential()
    model.add(keras.layers.Flatten(input_shape=(brojInputa,), name='PrviSloj'))
    model.add(keras.layers.Dense(16, activation=tf.nn.relu, name='DrugiSloj'))
    model.add(keras.layers.Dense(8, activation=tf.nn.relu, name='TreciSloj'))
    model.add(keras.layers.Dense(4, activation=tf.nn.relu, name='CetvrtiSloj'))
    model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid, name='Izlaz'))

    #opt_adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=10e-08, decay=0.0)
    model.compile(optimizer='adam',
                    loss='binary_crossentropy',
                    metrics=[Recall(), Precision(), BinaryAccuracy(threshold=0.5), SpecificityAtSensitivity(0.5)])

    # zaustavi treniranje modela ako n epoha nema poboljšanja u metrici
    callback_early_stopping = EarlyStopping(monitor='val_precision',
                                        patience=20, verbose=1)

    # upisuj u log tijekom treniranja
    callback_tensorboard = TensorBoard(log_dir='./Logovi/',
                                   histogram_freq=0,
                                   write_graph=False)
    # zabilježi svaki checkpoint
    path_checkpoint = 'Checkpoint.keras'
    callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint,
                                          monitor='val_precision',
                                          verbose=1,
                                          save_weights_only=True,
Exemplo n.º 18
0
    return inputs, predictions


#%%

# Model Architecture
conv_layers = [[256, 7, 3], [256, 7, 3], [256, 3, -1], [256, 3, -1],
               [256, 3, -1], [256, 3, 3]]
fc_layers = [1024, 1024]
loss = "categorical_crossentropy"

# Parameters
optimizer = "adam"
batch_size = 128
epochs = 7
metrics = [Precision(), Recall()]

# Build and compile model
inputs, outputs = create_model(input_size, len(alphabet), conv_layers,
                               fc_layers, num_of_classes)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
model.summary()

#%%

# Training
model.fit(training_inputs,
          training_labels,
          validation_data=(validation_inputs, validation_labels),
          epochs=epochs,
Exemplo n.º 19
0
def model_recall(y_true, y_pred):
    return Recall()(y_true[..., 0], y_pred[..., 0])
Exemplo n.º 20
0
    max_length = 50

    X_train, y_train = parse_sentences(word2idx, label2idx, labels_count,
                                       train_sentences, max_length, True)
    X_dev, y_dev = parse_sentences(word2idx, label2idx, labels_count,
                                   dev_sentences, max_length, True)
    X_test, y_test = parse_sentences(word2idx, label2idx, labels_count,
                                     test_sentences, max_length, True)

    model = BiLSTM(words_count, labels_count, max_length)

    callback = EarlyStopping(monitor="val_accuracy", patience=20, verbose=1)
    model.compile(optimizer="rmsprop",
                  loss="categorical_crossentropy",
                  metrics=["accuracy", Precision(),
                           Recall()])
    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_dev, y_dev),
                        callbacks=[callback],
                        batch_size=32,
                        epochs=2,
                        verbose=1)
    history = pd.DataFrame(history.history)

    plt.clf()
    plt.plot(history["accuracy"])
    plt.plot(history["val_accuracy"])
    plt.title("Accuracy")
    plt.savefig("accuracy.png")
Exemplo n.º 21
0
                 activation='tanh'),
        L.GlobalMaxPool1D(),
        L.Dense(256, activation='relu'),
        L.Dense(1, activation='sigmoid')
    ])
    return cnn


class EvalCallback(Callback):
    def __init__(self):
        super(EvalCallback, self).__init__()
        self.acc = 0

    def on_epoch_end(self, epoch, logs=None):
        if logs['val_accuracy'] > self.acc:
            self.acc = logs['val_accuracy']
            self.model.save_weights('best-weighs.weights')


if __name__ == '__main__':
    (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=NUM_WORDS)
    cnn = build_cnn_model()
    cnn.summary()
    cnn.compile(loss=BinaryCrossentropy(),
                optimizer=Adam(learning_rate),
                metrics=['accuracy', Recall()])
    cnn.fit(IMDBDataLoader(x_train, y_train, max_len=MAX_LEN),
            validation_data=IMDBDataLoader(x_test, y_test, max_len=MAX_LEN),
            epochs=EPOCH,
            callbacks=[EvalCallback(), TensorBoard()])
Exemplo n.º 22
0
PATH_EXP = 'CadLung/EXP1/'
checkpoint_filepath = PATH_EXP+'CHKPNT/checkpoint.hdf5'
bestModel_filepath = PATH_EXP+'MODEL/bestModel.h5'
performancePng_filepath = PATH_EXP+'OUTPUT/bestModelPerformance.png'

#Data
POSTNORMALIZE = 'y' #Options: {'y', 'n'}
NORMALIZE = 'z-score' #Options: {Default: 'none', 'range', 'z-score'}

#Model
VAL_SPLIT = 0.15
regRate = 0.001
regFn = rg.l2(regRate)
EPOCHS = 10
BATCH_SIZE = 32
METRICS = ['accuracy', AUC(), Recall(), Precision(), FalsePositives(), TrueNegatives()]
LOSS_F = 'binary_crossentropy'
INITZR = 'random_normal'
OPT_M = 'adam'
modelChkPnt_cBk = cB.ModelCheckpoint(filepath=checkpoint_filepath,
                                     save_weights_only=True,
                                     monitor='val_accuracy',
                                     mode='max',
                                     save_best_only=True)
clBacks = [modelChkPnt_cBk]

#Network Architecture
MAX_NUM_NODES = (None, 6, 6, 6, 1) #first layer, hidden layers, and output layer. #hidden nodes > 1.
lenMaxNumHidenLayer = len(MAX_NUM_NODES) - 2    #3
ACT_FUN = (None, 'relu', 'relu', 'relu', 'sigmoid') #best activation functions for binary classification
Exemplo n.º 23
0
X_train = dataset[0:train_size, 0:6]
Y_train = dataset[0:train_size, 6]
X_val = dataset[train_size:, 0:6]
Y_val = dataset[train_size:, 6]

model = Sequential()
model.add(Dense(30, input_dim=6, activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(
    loss='binary_crossentropy',
    optimizer='adam',
    metrics=['accuracy',
             Precision(name='precision'),
             Recall(name='recall')])
model.fit(X_train, Y_train, epochs=100, batch_size=10)

classes = model.predict_classes(X_val)
classes = classes[:, 0]

print("Performance on Validation Set:")

acc = accuracy_score(Y_val, classes)
print('Accuracy : ', acc)

pre = precision_score(Y_val, classes)
print("Precision : ", pre)

rec = recall_score(Y_val, classes)
print("Recall : ", rec)
model = Sequential()
model.add(
    Embedding(input_dim=input_dim,
              output_dim=embed_dim,
              input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.4))
model.add(LSTM(30, return_sequences=True, recurrent_dropout=0.5))
model.add(LSTM(30, dropout=0.5, recurrent_dropout=0.5))
model.add(
    Dense(30, activation='sigmoid')
)  #sigmoid for binary classification, softmax for multiclass classifictaion
model.add(Dense(1, activation='sigmoid'))
model.compile(
    loss='binary_crossentropy',
    optimizer='rmsprop',
    metrics=['accuracy', Precision(), Recall()]
)  #binary_crossentropy for binary classification, categorical_crossentropy for multiclass
print(model.summary())
#----------------------------------

# Fit model
#----------------------------------
batch_size = 128
checkpoint1 = ModelCheckpoint("weights/LSTM_best_model1.hdf5",
                              monitor='val_accuracy',
                              verbose=1,
                              save_best_only=True,
                              mode='auto',
                              period=1,
                              save_weights_only=False)
history = model.fit(X_train,
Exemplo n.º 25
0
def main(config=None):
    trial_name = os.path.splitext(__file__)[0]
    model_filename = os.path.sep.join(["output", trial_name,"model.h5"])
    checkpoint_folder = os.path.sep.join(["output", trial_name])
    from pathlib import Path
    Path(checkpoint_folder).mkdir(parents=True, exist_ok=True)

    #import numpy as np # linear algebra
    import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
    #import matplotlib.pyplot as plt
    import matplotlib
    matplotlib.use("Agg")


    from keras.models import Sequential,load_model
    #from keras.layers import Dense, , Flatten, , Conv2DTranspose, BatchNormalization, UpSampling2D, Reshape
    from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D
    from keras.metrics import Precision,Recall
    from helpers.overallperformance import OverallPerformance
    #from keras import backend as K
    #from keras.utils import to_categorical
    from keras.preprocessing.image import ImageDataGenerator
    from keras.optimizers import Adam
    import tensorflow as tf

    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        _ = tf.config.experimental.set_memory_growth(physical_devices[0], True)

    import wandb
    from wandb.keras import WandbCallback
    if(config is None):
        wandb.init(project="minibar")
        config = wandb.config
    else:
        wandb.init(project="minibar",config=config)

    df_train =  pd.read_csv('data/train_labels.csv')
    #df_test =  pd.read_csv('data/test_labels.csv')

    from helpers.decouple import decouple
    matrix_train,_ = decouple(df_train)
    from helpers.matrix_to_df import matrix_to_df
    df_train_agg = matrix_to_df(matrix_train)

    train_datagen = ImageDataGenerator(
            validation_split=0.2,horizontal_flip=True)

    train_generator = train_datagen.flow_from_dataframe(
            dataframe=df_train_agg,
            directory='data/train',
            x_col='filename',
            y_col='class',
            target_size=(config['input_shape_height'], config['input_shape_width']),
            batch_size=config['batch_size'],
            class_mode='categorical',
            subset="training",)

    validation_generator = train_datagen.flow_from_dataframe(
            dataframe=df_train_agg,
            directory='data/train',
            x_col='filename',
            y_col='class',
            target_size=(config['input_shape_height'], config['input_shape_width']),
            batch_size=config['batch_size'],
            class_mode='categorical',
            subset="validation",)


    if os.path.isfile(model_filename) and config['continue_training']:
        model = load_model(model_filename)
    else:
        model = Sequential()

        # Step 1 - Convolution
        model.add(Conv2D(32, (3, 3), padding='same', input_shape = (config['input_shape_height'], config['input_shape_width'], 3), activation = 'relu'))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5)) # antes era 0.25
        # Adding a second convolutional layer
        model.add(Conv2D(64, (3, 3), padding='same', activation = 'relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5)) # antes era 0.25
        # Adding a third convolutional layer
        model.add(Conv2D(64, (3, 3), padding='same', activation = 'relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5)) # antes era 0.25
        # Step 3 - Flattening
        model.add(Flatten())
        # Step 4 - Full connection
        model.add(Dense(units = 512, activation = 'relu'))
        model.add(Dropout(0.5)) 
        model.add(Dense(units = 40, activation = 'sigmoid'))

        model.compile(optimizer=Adam(learning_rate=config['learning_rate']), loss='binary_crossentropy', metrics=['accuracy',Precision(),Recall(),OverallPerformance()])
        model.save(model_filename)

    # construct the set of callbacks
    from helpers.epochcheckpoint import EpochCheckpoint
    callbacks = [
        EpochCheckpoint(checkpoint_folder, every=1,startAt=0),
        WandbCallback(save_model=False)
    ]

    model.fit(
        train_generator,
        #steps_per_epoch=100,
        epochs=config['epoch'],
        #steps_per_epoch=100,
        validation_data=validation_generator,
        #validation_steps=100
        callbacks=callbacks,
        verbose=1,
        initial_epoch=config['initial_epoch']
        )
    model.save(model_filename)
Exemplo n.º 26
0
def model(x_train,
          y_train,
          X_test,
          y_test,
          embed_matrix,
          params=None,
          fit_model=True):
    ''' params is a dictionary containing hyperparameter values. See main.py
    for current definition.
    '''
    # input and embeddings for characters
    word_in = Input(shape=(params['max_num_words'], ))

    emb_word = Embedding(
        input_dim=params['vocab_size'],
        output_dim=300,
        input_length=params['max_num_words'],
        mask_zero=True,
        embeddings_initializer=Constant(embed_matrix))(word_in)

    char_in = Input(shape=(params['max_num_words'],
                           params['max_chars_in_word']),
                    name='input')
    emb_char = TimeDistributed(Embedding(
        input_dim=params['num_of_unique_chars'] + 2,
        output_dim=params['lstm_units_char_emb'],
        input_length=params['max_chars_in_word'],
        mask_zero=True,
        trainable=True),
                               name='embed_dense_char')(char_in)
    emb_char = TimeDistributed(LSTM(units=params['lstm_units_char_emb'],
                                    return_sequences=False,
                                    recurrent_dropout=0.5),
                               name='learn_embed_char')(emb_char)

    x = concatenate([emb_word, emb_char])
    bilstm = Bidirectional(LSTM(units=params['bilstm_units'],
                                recurrent_dropout=0.5,
                                return_sequences=False),
                           merge_mode='sum')(x)

    bilstm = Dense(params['bilstm_units'],
                   activation='relu',
                   name='linear_decode1')(bilstm)

    out = Dense(3, activation='softmax', name='output_softmax1')(bilstm)

    model = Model([word_in, char_in], out)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', Precision(),
                           Recall()])
    print(model.summary())
    if fit_model:
        history = model.fit(x_train,
                            y_train,
                            batch_size=params['batch_size'],
                            epochs=params['epochs'],
                            validation_data=(X_test, y_test),
                            verbose=2)
        return history, model
    else:
        return model
Exemplo n.º 27
0
def get_recall(y_true, y_predict):
    m = Recall()
    m.update_state(y_true, y_predict)

    return m.result().numpy()
Exemplo n.º 28
0

model = model_architecture()
model.summary()



#data_gen = ImageDataGenerator()
data_gen = ImageDataGenerator(preprocessing_function=preprocess_input)
train_it = data_gen.flow_from_directory('2_way_classification/train', target_size = (image_shape,image_shape), batch_size = 64)
val_it = data_gen.flow_from_directory('2_way_classification/val', target_size = (image_shape,image_shape), batch_size = 20)
test_it = data_gen.flow_from_directory('2_way_classification/test', target_size = (image_shape,image_shape), batch_size = 80)



model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy', Recall()])



early_stopping = EarlyStopping(patience = 2, restore_best_weights = True)
filepath="logs/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, period=2)



model.fit_generator(train_it, steps_per_epoch=6, epochs=20, callbacks=[checkpoint], validation_data=val_it, validation_steps=3)

#model.fit_generator(train_it, steps_per_epoch=8, epochs=10, callbacks=[checkpoint], validation_data=val_it, validation_steps=4)


Exemplo n.º 29
0
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.vgg19 import preprocess_input
from keras.optimizers import RMSprop
from sklearn.metrics import classification_report
test_dir = '../test/'
# load json and create model
json_file = open('../Xray_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("../Xray_best_model")

rmsprop = RMSprop(lr=0.0001)
model.compile(loss='categorical_crossentropy',
              metrics=[Recall(class_id=2)],
              optimizer=rmsprop)
#print(loaded_model.summary())

#Test Generator
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)

test_generator = test_datagen.flow_from_directory(test_dir,
                                                  target_size=(200, 150),
                                                  color_mode='rgb',
                                                  batch_size=32,
                                                  class_mode='categorical',
                                                  shuffle=False)

STEP_SIZE_TEST = test_generator.n // test_generator.batch_size
#loss, acc=model.evaluate_generator(test_generator,steps=STEP_SIZE_TEST,verbose=1)
Exemplo n.º 30
0
    if args.class_weights is None:
        class_weights = dict(
            enumerate(
                class_weight.compute_class_weight('balanced',
                                                  classes=np.unique(y_train),
                                                  y=y_train.reshape(-1))))
    else:
        class_weights = dict(enumerate(args.class_weights))

    print(class_weights)

    if args.metric == "f1score":
        model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['accuracy', Recall(), get_f1])

        callbacks = []
        callbacks.append(
            ModelCheckpoint(model_path,
                            monitor='val_get_f1',
                            verbose=1,
                            save_best_only=True,
                            save_weights_only=True,
                            mode='max'))

        csv_logger = CSVLogger(os.path.join(
            args.save_dir,
            'RDNN_log_s{}_{}.csv'.format(args.seed, current_time)),
                               separator=',',
                               append=False)