def train_model(max_length=350, vocab_size=10000):
    model = build_model(max_length=max_length, vocab_size=vocab_size)
    X_ws_train, X_ws_test, trainX, trainY, testX, testY = load_data(
        max_length, vocab_size)
    checkpoint = ModelCheckpoint('best_model.h5',
                                 monitor='val_precision',
                                 mode='max',
                                 verbose=1,
                                 save_best_only=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=[
                      Precision(name="precision", thresholds=0.7),
                      Recall(name="recall")
                  ])
    history = model.fit([trainX, trainX, trainX],
                        array(trainY),
                        validation_data=([testX, testX, testX], testY),
                        epochs=50,
                        verbose=2,
                        callbacks=[
                            EarlyStopping("val_precision",
                                          patience=10,
                                          restore_best_weights=True),
                            checkpoint
                        ])
    return model, history
Beispiel #2
0
def aa_blstm(num_classes, num_letters, sequence_length, embed_size=5000):

    model = Sequential()
    # model.add(Conv1D(input_shape=(sequence_length, num_letters), filters=100, kernel_size=26, padding="valid", activation="relu"))
    # model.add(MaxPooling1D(pool_size=13, strides=13))
    # model.add(Masking(mask_value=0))
    # model.add(Dropout(0.2))
    # model.add(Embedding(num_letters,10000))
    # model.add(SpatialDropout1D(0.2))
    model.add(
        Bidirectional(
            LSTM(5000,
                 dropout=0.2,
                 recurrent_dropout=0.2,
                 activation="tanh",
                 return_sequences=True)))
    model.add(Dropout(0.2))
    model.add(LSTM(embed_size, activation="tanh"))
    model.add(Dense(num_classes, activation=None, name="AV"))
    model.add(Activation("softmax"))
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.001),
                  metrics=['categorical_accuracy',
                           Precision()])
    return model
Beispiel #3
0
 def __init__(self,
              maxlen=128,
              batch_size=32,
              w_embed_size=200,
              padding="post",
              h_embed_size=200,
              dropout=0.1,
              patience=1,
              plot=True,
              max_epochs=100):
     self.maxlen = maxlen
     self.METRICS = [
         BinaryAccuracy(name='accuracy'),
         Precision(name='precision'),
         Recall(name='recall'),
         AUC(name='auc')
     ]
     self.w_embed_size = w_embed_size
     self.h_embed_size = h_embed_size
     self.dropout = dropout
     self.vocab_size = -1
     self.padding = padding
     self.patience = patience
     self.model = None
     self.w2i = {}
     self.epochs = max_epochs
     self.i2w = {}
     self.vocab = []
     self.batch_size = batch_size
     self.show_the_model = plot
     self.threshold = 0.2
     self.toxic_label = 2
     self.not_toxic_label = 1
     self.unk_token = "[unk]"
     self.pad_token = "[pad]"
Beispiel #4
0
    def __build_model(self):
        input_shape = (self.__patch_size, self.__patch_size, 1)
        filters = 8

        model = Sequential()

        model.add(Conv2D(filters, 3, activation='relu', padding='same', input_shape=input_shape))
        model.add(Conv2D(filters, 3, activation='relu', padding='same', input_shape=input_shape))
        model.add(MaxPooling2D())

        model.add(Conv2D(2 * filters, 3, activation='relu', padding='same'))
        model.add(Conv2D(2 * filters, 3, activation='relu', padding='same'))
        model.add(MaxPooling2D())

        model.add(Conv2D(4 * filters, 3, activation='relu', padding='same'))
        model.add(Conv2D(4 * filters, 3, activation='relu', padding='same'))
        model.add(MaxPooling2D())

        model.add(Flatten())
        model.add(Dense(4 * filters * (self.__patch_size // 8) ** 2, activation='relu'))

        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        model.compile(optimizer=Adam(learning_rate=1e-3), loss='binary_crossentropy',
                      metrics=[Precision(name='precision'), Recall(name='recall'), 'accuracy'])
        return model
Beispiel #5
0
def create_model():
    sequence_input = Input(shape=(max_length, ), dtype='int32')
    embedded_sequences = embedding_layer(sequence_input)
    x = Conv1D(filters=128,
               kernel_size=3,
               input_shape=(max_length, len(word_index) + 1),
               data_format='channels_first')(embedded_sequences)
    x = MaxPooling1D()(x)
    x = Conv1D(filters=128,
               kernel_size=3,
               input_shape=(max_length, len(word_index) + 1),
               data_format='channels_first')(x)
    x = MaxPooling1D()(x)
    x = Conv1D(filters=128,
               kernel_size=3,
               input_shape=(max_length, len(word_index) + 1),
               data_format='channels_first')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(128, activation='relu')(x)
    preds = Dense(3, activation='softmax')(x)

    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc', Precision(), Recall()])
    return model
Beispiel #6
0
def create_model(input_len: int, num_classes: int) -> Model:
    input1 = Input(shape=(input_len, ))

    layer1 = Dense(units=128, activation='relu')(input1)

    dropout_1 = Dropout(0.25)(layer1)

    layer2 = Dense(units=256, activation='relu')(dropout_1)

    dropout_2 = Dropout(0.25)(layer2)

    layer3 = Dense(units=num_classes, activation='softmax')(dropout_2)

    model = Model(inputs=[input1], outputs=[layer3])

    metrics = [
        Precision(name='precision'),
        Recall(name='recall'),
        AUC(name='auc')
    ]

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=metrics)
    model.summary()

    return model
def compile_model(model, lr):
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.Adam(lr=lr),
                  metrics=[
                      'acc',
                      CategoricalAccuracy(name="categorical_accuracy",
                                          dtype=None),
                      Precision()
                  ])
    return model
def modelBuilder(hp):
    """
    Assign input and output tensors, build neural net and compile model
    :param hp: hyperparameters, argument needed to call this function from evaluateBestParams function
               (see also https://keras.io/api/keras_tuner/hyperparameters/)
    :return: model, compiled neural net model
    """
    ## keras model
    u = Input(shape=(1, ))
    s = Input(shape=(1, ))
    u_embedding = Embedding(N, K)(u)  ## (N, 1, K)
    s_embedding = Embedding(M, K)(s)  ## (N, 1, K)
    u_embedding = Flatten()(u_embedding)  ## (N, K)
    s_embedding = Flatten()(s_embedding)  ## (N, K)
    x = Concatenate()([u_embedding, s_embedding])  ## (N, 2K)

    ## Tune the number of units in the first Dense layer
    ## Choose an optimal value between 32-512
    hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
    x = Dense(units=hp_units)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(100)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dense(1, activation="sigmoid")(x)

    ## define model and compile. Use BinaryCrossEntropy for binary classification approach
    model = Model(inputs=[u, s], outputs=x)
    model.compile(
        loss=BinaryCrossentropy(from_logits=True),
        optimizer=SGD(lr=0.08, momentum=0.9),
        metrics=[
            AUC(thresholds=[0.0, 0.5, 1.0]),
            BinaryAccuracy(threshold=0.5),
            Precision(),
            Recall()
        ],
    )

    ## print model summary
    # model.summary()

    return model
Beispiel #9
0
def driverModel():
    ##### ----------------
    parent = "Blood-Cancer_Data"
    ALL_IDB1 = f"{parent}/All_IDB1/im"
    annotate1 = f"{parent}/ALL_IDB1/xyc"
    AML_ALL_img = f"{parent}/SN-AM-BALL-MM"
    classes_AML_ALL, _, __ = os.walk(AML_ALL_img)
    multiple_myeloma = f"{parent}/multiple_myeloma"
    myeloma_annotate = f"{parent}/multiple_myeloma/Annotated_PPT_MM_Data.pdf"
    ##### ----------------

    c3bo = C3BO(main_file=parent, annotate_file=annotate1, classes=None)
    c3bo.annotate_files(ALL_IDB1, AML_ALL_img)
    data = c3bo.classes
    img_files, centroid_files = data.keys(), data.values()

    c3bo.to_df(img_files, centroid_files)
    c3bo.label_diagnosis()
    c3bo.strong_neural_net((128, 128, 3), 3)

    metrics = [
        "accuracy",
        Precision(),
        Recall(),
        AUC(),
        SensitivityAtSpecificity(0.5),
        SpecificityAtSensitivity(0.5)
    ]
    c3bo.compile_model('rmsprop', 'categorical_crossentropy', metrics)

    df = c3bo.df

    img_pixels = df["image_pixels"].copy()
    labels = df["diagnosis"].copy()
    files = df["image_files"].copy()
    c3bo.preprocess(labels)

    X_full_data = c3bo.images_array(img_pixels, (128, 128, 3))
    X_full_data = c3bo.shuffle_data(X_full_data)

    X_train, y_train, X_val, y_val, X_test, y_test = c3bo.partitioned_data(
        X_full_data)
    c3bo.train(X_train, y_train, X_val, y_val)

    return c3bo.model
def assignModel(N, M, K):
    """
    Assign input and output tensors, build neural net and compile model
    :param N: integer, number of users
    :param M: integer, number of songs
    :param K: integer, latent dimensionality
    :return: model, compiled neural net model
    """
    ## keras model
    u = Input(shape=(1,))
    s = Input(shape=(1,))
    u_embedding = Embedding(N, K)(u)   ## (N, 1, K)
    s_embedding = Embedding(M, K)(s)   ## (N, 1, K)
    u_embedding = Flatten()(u_embedding)  ## (N, K)
    s_embedding = Flatten()(s_embedding)  ## (N, K)
    x = Concatenate()([u_embedding, s_embedding])  ## (N, 2K)

    ## the neural network (use sigmoid activation function in output layer for binary classification)
    x = Dense(400)(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(100)(x)
    x = BatchNormalization()(x)
    # x = Activation('sigmoid')(x)
    x = Dense(1, activation="sigmoid")(x)

    ## define model and compile. Use BinaryCrossEntropy for binary classification approach
    model = Model(inputs=[u, s], outputs=x)
    model.compile(
      loss=BinaryCrossentropy(from_logits=True),
      optimizer=SGD(lr=0.08, momentum=0.9),
      metrics=[AUC(thresholds=[0.0, 0.5, 1.0]),
               BinaryAccuracy(threshold=0.5),
               Precision(),
               Recall()],
    )

    return model
Beispiel #11
0
def char_model(x_train, y_train, x_test, y_test, params=None, fit_model=True):
    ''' params is a dictionary containing hyperparameter values. See main.py
    for current definition.
    '''
    # input and embeddings for characters
    char_in = Input(shape=(params['max_num_words'], params['max_chars_in_word']),
                    name='input')
    emb_char = TimeDistributed(Embedding(input_dim=params['num_of_unique_chars']+2,
                                        output_dim=params['lstm_units_char_emb'],
                                        input_length=params['max_chars_in_word'],
                                        mask_zero=True,
                                        trainable=True),
                               name='embed_dense_char')(char_in)
    emb_char = TimeDistributed(LSTM(units=params['lstm_units_char_emb'],
                                    return_sequences=False),
                                    # dropout=params['dropout_rate_char_emb']),
                                    name='learn_embed_char')(emb_char)
    bilstm = Bidirectional(LSTM(units=params['bilstm_units'],
                                # recurrent_dropout=params['bilstm_dropout_rate'],
                                return_sequences=False),
                            merge_mode='sum')(emb_char)


    dense = Dense(params['bilstm_units'], activation='relu',
                    name='linear_decode2')(bilstm)
    out = Dense(3, activation='softmax', name='output_softmax1')(dense)

    model = Model(char_in, out)
    model.compile(loss='categorical_crossentropy', optimizer='adam',
                  metrics=['accuracy', Precision(), Recall()])

    if fit_model:
        history = model.fit(x_train, y_train,
                          batch_size=params['batch_size'], epochs=params['epochs'],
                          validation_data=(x_test, y_test),
                          verbose=2)
        return history, model
    else:
        return model
Beispiel #12
0
    def train_model(self):

        sgd = SGD(lr=0.0001, decay=1e-6,
                  momentum=0.9, nesterov=True)
        early_stopping = EarlyStopping(monitor='val_loss', patience=12)
        for i, layer in enumerate(self.model.layers):
            print(i, layer.name)
        trainable_layer = 48
        for i in range(trainable_layer):
            self.model.layers[i].trainable = False
        reducelr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=8, verbose=1, mode='min')
        datagen = ImageDataGenerator(
            rotation_range=45,
            width_shift_range=0.2,
            height_shift_range=0.2,
            shear_range=0.15,
            zoom_range=0.15,
            horizontal_flip=True,
            # rescale=1./255,
            # vertical_flip=True,
            fill_mode='nearest',
            )
        # datagen.fit(self.dataset.X_train)
        self.model.compile(
            # optimizer='adam',
            optimizer=sgd,
            loss='categorical_crossentropy',
            metrics=[categorical_accuracy,Precision(),Recall()])
        # epochs、batch_size为可调的参数,epochs为训练多少轮、batch_size为每次训练多少个样本
        self.history = self.model.fit(
            datagen.flow(self.dataset.X_train,self.dataset.Y_train,batch_size=6,shuffle=True),
            epochs=1000,
            verbose = 1,
            # validation_data=datagen.flow(self.dataset.X_train,self.dataset.Y_train,batch_size=1,subset="validation"),
            validation_data=(self.dataset.X_test, self.dataset.Y_test,),
            shuffle=True,
            callbacks=[early_stopping,reducelr])
Beispiel #13
0
    def createModel(self):
        model = Sequential()
        model.add(
            Conv2D(
                32,
                (4, 4),
                padding="valid",
                strides=1,
                input_shape=self.inputShape,
                activation="relu",
            ))

        model.add(Conv2D(32, (4, 4), activation="relu"))

        model.add(Conv2D(32, (4, 4), activation="relu"))

        model.add(MaxPooling2D(pool_size=(8, 8)))

        model.add(Flatten())

        model.add(Dense(2048, activation="relu"))
        model.add(Dropout(0.25))

        model.add(Dense(2048, activation="relu"))
        model.add(Dropout(0.25))

        model.add(Dense(self.numOfClasses, activation="softmax"))

        opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
        model.compile(
            loss="categorical_crossentropy",
            optimizer=opt,
            metrics=["accuracy", Precision(),
                     Recall()],
        )
        return model
Beispiel #14
0
train_size = int((7200 * (0.7)))
X_train = dataset[0:train_size, 0:6]
Y_train = dataset[0:train_size, 6]
X_val = dataset[train_size:, 0:6]
Y_val = dataset[train_size:, 6]

model = Sequential()
model.add(Dense(30, input_dim=6, activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(
    loss='binary_crossentropy',
    optimizer='adam',
    metrics=['accuracy',
             Precision(name='precision'),
             Recall(name='recall')])
model.fit(X_train, Y_train, epochs=100, batch_size=10)

classes = model.predict_classes(X_val)
classes = classes[:, 0]

print("Performance on Validation Set:")

acc = accuracy_score(Y_val, classes)
print('Accuracy : ', acc)

pre = precision_score(Y_val, classes)
print("Precision : ", pre)

rec = recall_score(Y_val, classes)
Beispiel #15
0
input_dim = len(
    tokenizer.word_index) + 1  #+ 1 because of reserving padding (index zero)

model = Sequential()
model.add(
    Embedding(input_dim=input_dim,
              output_dim=40,
              input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.4))
model.add(Bidirectional(LSTM(30, dropout=0.2, recurrent_dropout=0.2)))
model.add(Dense(30, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy', Precision(),
                       Recall()])
print(model.summary())
#----------------------------------

# Fit model
#----------------------------------
batch_size = 128
checkpoint1 = ModelCheckpoint("weights/BiLSTM_best_model1.hdf5",
                              monitor='val_accuracy',
                              verbose=1,
                              save_best_only=True,
                              mode='auto',
                              period=1,
                              save_weights_only=False)
history = model.fit(X_train,
Beispiel #16
0
# scale data set values between [0;1]
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)  # fit and transform
X_test = scaler.transform(X_test)  # transform

# define the keras model
model = Sequential()
model.add(Dense(64, input_dim=X_train.shape[1], activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

# compile the keras model
# optimizer = optimizer=SGD(lr=0.001, decay=1e-6, momentum=0.9)
model.compile(loss='binary_crossentropy',
              optimizer='Adam',
              metrics=[Precision(name='score')])
# get a png of the model layers
Path(KERAS_MODEL_DIRECTORY).mkdir(parents=True, exist_ok=True)
plot_model(model,
           to_file=KERAS_MODEL_DIRECTORY + '/model.png',
           show_shapes=True)
# fit the keras model on the dataset
history = model.fit(X_train,
                    y_train,
                    epochs=300,
                    validation_data=(X_test, y_test))

model.summary()
# evaluate the keras model
_, score = model.evaluate(X_test, y_test)
# print(model.metrics_names)
Beispiel #17
0
    return inputs, predictions


#%%

# Model Architecture
conv_layers = [[256, 7, 3], [256, 7, 3], [256, 3, -1], [256, 3, -1],
               [256, 3, -1], [256, 3, 3]]
fc_layers = [1024, 1024]
loss = "categorical_crossentropy"

# Parameters
optimizer = "adam"
batch_size = 128
epochs = 7
metrics = [Precision(), Recall()]

# Build and compile model
inputs, outputs = create_model(input_size, len(alphabet), conv_layers,
                               fc_layers, num_of_classes)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
model.summary()

#%%

# Training
model.fit(training_inputs,
          training_labels,
          validation_data=(validation_inputs, validation_labels),
          epochs=epochs,
model = Sequential()
model.add(
    Embedding(input_dim=input_dim,
              output_dim=embed_dim,
              input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.4))
model.add(LSTM(30, return_sequences=True, recurrent_dropout=0.5))
model.add(LSTM(30, dropout=0.5, recurrent_dropout=0.5))
model.add(
    Dense(30, activation='sigmoid')
)  #sigmoid for binary classification, softmax for multiclass classifictaion
model.add(Dense(1, activation='sigmoid'))
model.compile(
    loss='binary_crossentropy',
    optimizer='rmsprop',
    metrics=['accuracy', Precision(), Recall()]
)  #binary_crossentropy for binary classification, categorical_crossentropy for multiclass
print(model.summary())
#----------------------------------

# Fit model
#----------------------------------
batch_size = 128
checkpoint1 = ModelCheckpoint("weights/LSTM_best_model1.hdf5",
                              monitor='val_accuracy',
                              verbose=1,
                              save_best_only=True,
                              mode='auto',
                              period=1,
                              save_weights_only=False)
history = model.fit(X_train,
Beispiel #19
0
    max_length = 50

    X_train, y_train = parse_sentences(word2idx, label2idx, labels_count,
                                       train_sentences, max_length, True)
    X_dev, y_dev = parse_sentences(word2idx, label2idx, labels_count,
                                   dev_sentences, max_length, True)
    X_test, y_test = parse_sentences(word2idx, label2idx, labels_count,
                                     test_sentences, max_length, True)

    model = BiLSTM(words_count, labels_count, max_length)

    callback = EarlyStopping(monitor="val_accuracy", patience=20, verbose=1)
    model.compile(optimizer="rmsprop",
                  loss="categorical_crossentropy",
                  metrics=["accuracy", Precision(),
                           Recall()])
    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_dev, y_dev),
                        callbacks=[callback],
                        batch_size=32,
                        epochs=2,
                        verbose=1)
    history = pd.DataFrame(history.history)

    plt.clf()
    plt.plot(history["accuracy"])
    plt.plot(history["val_accuracy"])
    plt.title("Accuracy")
    plt.savefig("accuracy.png")
Beispiel #20
0
PATH_EXP = 'CadLung/EXP1/'
checkpoint_filepath = PATH_EXP+'CHKPNT/checkpoint.hdf5'
bestModel_filepath = PATH_EXP+'MODEL/bestModel.h5'
performancePng_filepath = PATH_EXP+'OUTPUT/bestModelPerformance.png'

#Data
POSTNORMALIZE = 'y' #Options: {'y', 'n'}
NORMALIZE = 'z-score' #Options: {Default: 'none', 'range', 'z-score'}

#Model
VAL_SPLIT = 0.15
regRate = 0.001
regFn = rg.l2(regRate)
EPOCHS = 10
BATCH_SIZE = 32
METRICS = ['accuracy', AUC(), Recall(), Precision(), FalsePositives(), TrueNegatives()]
LOSS_F = 'binary_crossentropy'
INITZR = 'random_normal'
OPT_M = 'adam'
modelChkPnt_cBk = cB.ModelCheckpoint(filepath=checkpoint_filepath,
                                     save_weights_only=True,
                                     monitor='val_accuracy',
                                     mode='max',
                                     save_best_only=True)
clBacks = [modelChkPnt_cBk]

#Network Architecture
MAX_NUM_NODES = (None, 6, 6, 6, 1) #first layer, hidden layers, and output layer. #hidden nodes > 1.
lenMaxNumHidenLayer = len(MAX_NUM_NODES) - 2    #3
ACT_FUN = (None, 'relu', 'relu', 'relu', 'sigmoid') #best activation functions for binary classification
# - FOLD------------------------------------------------------------------------------------------------------------
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)

brojac = 1
for train, test in kfold.split(X_fit,y):
    model = keras.Sequential()
    model.add(keras.layers.Flatten(input_shape=(brojInputa,), name='PrviSloj'))
    model.add(keras.layers.Dense(16, activation=tf.nn.relu, name='DrugiSloj'))
    model.add(keras.layers.Dense(8, activation=tf.nn.relu, name='TreciSloj'))
    model.add(keras.layers.Dense(4, activation=tf.nn.relu, name='CetvrtiSloj'))
    model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid, name='Izlaz'))

    #opt_adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=10e-08, decay=0.0)
    model.compile(optimizer='adam',
                    loss='binary_crossentropy',
                    metrics=[Recall(), Precision(), BinaryAccuracy(threshold=0.5), SpecificityAtSensitivity(0.5)])

    # zaustavi treniranje modela ako n epoha nema poboljšanja u metrici
    callback_early_stopping = EarlyStopping(monitor='val_precision',
                                        patience=20, verbose=1)

    # upisuj u log tijekom treniranja
    callback_tensorboard = TensorBoard(log_dir='./Logovi/',
                                   histogram_freq=0,
                                   write_graph=False)
    # zabilježi svaki checkpoint
    path_checkpoint = 'Checkpoint.keras'
    callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint,
                                          monitor='val_precision',
                                          verbose=1,
                                          save_weights_only=True,
def train_and_validate(train_x, train_y, test_x, test_y, hparams):
    unique_items = len(train_y[0])
    model = LSTMRec(
        vocabulary_size=unique_items,
        emb_output_dim=hparams['emb_dim'],
        lstm_units=hparams['lstm_units'],
        lstm_activation=hparams['lstm_activation'],
        lstm_recurrent_activation=hparams['lstm_recurrent_activation'],
        lstm_dropout=hparams['lstm_dropout'],
        lstm_recurrent_dropout=hparams['lstm_recurrent_dropout'],
        dense_activation=hparams['dense_activation'])
    model.compile(optimizer=Adam(learning_rate=hparams['learning_rate'],
                                 beta_1=hparams['adam_beta_1'],
                                 beta_2=hparams['adam_beta_2'],
                                 epsilon=hparams['adam_epsilon']),
                  loss='binary_crossentropy',
                  metrics=[
                      Precision(top_k=1, name='P_at_1'),
                      Precision(top_k=3, name='P_at_3'),
                      Precision(top_k=5, name='P_at_5'),
                      Precision(top_k=10, name='P_at_10'),
                      Recall(top_k=10, name='R_at_10'),
                      Recall(top_k=50, name='R_at_50'),
                      Recall(top_k=100, name='R_at_100')
                  ])
    hst = model.fit(
        x=train_x,
        y=train_y,
        batch_size=hparams['batch_size'],
        epochs=250,
        callbacks=[
            EarlyStopping(monitor='val_R_at_10',
                          patience=10,
                          mode='max',
                          restore_best_weights=True,
                          verbose=True),
            ModelCheckpoint(filepath=os.path.join(os.pardir, os.pardir,
                                                  'models',
                                                  hparams['run_id'] + '.ckpt'),
                            monitor='val_R_at_10',
                            mode='max',
                            save_best_only=True,
                            save_weights_only=True,
                            verbose=True),
            TensorBoard(log_dir=os.path.join(os.pardir, os.pardir, 'logs',
                                             hparams['run_id']),
                        histogram_freq=1)
        ],
        validation_split=0.2)
    val_best_epoch = np.argmax(hst.history['val_R_at_10'])
    test_results = model.evaluate(test_x, test_y)
    with tf.summary.create_file_writer(
            os.path.join(os.pardir, os.pardir, 'logs', hparams['run_id'],
                         'hparams')).as_default():
        hp.hparams(hparams)
        tf.summary.scalar('train.final_loss',
                          hst.history["val_loss"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_P_at_1',
                          hst.history["val_P_at_1"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_P_at_3',
                          hst.history["val_P_at_3"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_P_at_5',
                          hst.history["val_P_at_5"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_P_at_10',
                          hst.history["val_P_at_10"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_R_at_10',
                          hst.history["val_R_at_10"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_R_at_50',
                          hst.history["val_R_at_50"][val_best_epoch],
                          step=val_best_epoch)
        tf.summary.scalar('train.final_R_at_100',
                          hst.history["val_R_at_100"][val_best_epoch],
                          step=val_best_epoch)

        tf.summary.scalar('test.final_loss',
                          test_results[0],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_P_at_1',
                          test_results[1],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_P_at_3',
                          test_results[2],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_P_at_5',
                          test_results[3],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_P_at_10',
                          test_results[4],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_R_at_10',
                          test_results[5],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_R_at_50',
                          test_results[6],
                          step=val_best_epoch)
        tf.summary.scalar('test.final_R_at_100',
                          test_results[7],
                          step=val_best_epoch)

    return val_best_epoch, test_results
def main(config=None):
    trial_name = os.path.splitext(__file__)[0]
    model_filename = os.path.sep.join(["output", trial_name,"model.h5"])
    checkpoint_folder = os.path.sep.join(["output", trial_name])
    from pathlib import Path
    Path(checkpoint_folder).mkdir(parents=True, exist_ok=True)

    #import numpy as np # linear algebra
    import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
    #import matplotlib.pyplot as plt
    import matplotlib
    matplotlib.use("Agg")


    from keras.models import Sequential,load_model
    #from keras.layers import Dense, , Flatten, , Conv2DTranspose, BatchNormalization, UpSampling2D, Reshape
    from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D
    from keras.metrics import Precision,Recall
    from helpers.overallperformance import OverallPerformance
    #from keras import backend as K
    #from keras.utils import to_categorical
    from keras.preprocessing.image import ImageDataGenerator
    from keras.optimizers import Adam
    import tensorflow as tf

    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        _ = tf.config.experimental.set_memory_growth(physical_devices[0], True)

    import wandb
    from wandb.keras import WandbCallback
    if(config is None):
        wandb.init(project="minibar")
        config = wandb.config
    else:
        wandb.init(project="minibar",config=config)

    df_train =  pd.read_csv('data/train_labels.csv')
    #df_test =  pd.read_csv('data/test_labels.csv')

    from helpers.decouple import decouple
    matrix_train,_ = decouple(df_train)
    from helpers.matrix_to_df import matrix_to_df
    df_train_agg = matrix_to_df(matrix_train)

    train_datagen = ImageDataGenerator(
            validation_split=0.2,horizontal_flip=True)

    train_generator = train_datagen.flow_from_dataframe(
            dataframe=df_train_agg,
            directory='data/train',
            x_col='filename',
            y_col='class',
            target_size=(config['input_shape_height'], config['input_shape_width']),
            batch_size=config['batch_size'],
            class_mode='categorical',
            subset="training",)

    validation_generator = train_datagen.flow_from_dataframe(
            dataframe=df_train_agg,
            directory='data/train',
            x_col='filename',
            y_col='class',
            target_size=(config['input_shape_height'], config['input_shape_width']),
            batch_size=config['batch_size'],
            class_mode='categorical',
            subset="validation",)


    if os.path.isfile(model_filename) and config['continue_training']:
        model = load_model(model_filename)
    else:
        model = Sequential()

        # Step 1 - Convolution
        model.add(Conv2D(32, (3, 3), padding='same', input_shape = (config['input_shape_height'], config['input_shape_width'], 3), activation = 'relu'))
        model.add(Conv2D(32, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5)) # antes era 0.25
        # Adding a second convolutional layer
        model.add(Conv2D(64, (3, 3), padding='same', activation = 'relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5)) # antes era 0.25
        # Adding a third convolutional layer
        model.add(Conv2D(64, (3, 3), padding='same', activation = 'relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5)) # antes era 0.25
        # Step 3 - Flattening
        model.add(Flatten())
        # Step 4 - Full connection
        model.add(Dense(units = 512, activation = 'relu'))
        model.add(Dropout(0.5)) 
        model.add(Dense(units = 40, activation = 'sigmoid'))

        model.compile(optimizer=Adam(learning_rate=config['learning_rate']), loss='binary_crossentropy', metrics=['accuracy',Precision(),Recall(),OverallPerformance()])
        model.save(model_filename)

    # construct the set of callbacks
    from helpers.epochcheckpoint import EpochCheckpoint
    callbacks = [
        EpochCheckpoint(checkpoint_folder, every=1,startAt=0),
        WandbCallback(save_model=False)
    ]

    model.fit(
        train_generator,
        #steps_per_epoch=100,
        epochs=config['epoch'],
        #steps_per_epoch=100,
        validation_data=validation_generator,
        #validation_steps=100
        callbacks=callbacks,
        verbose=1,
        initial_epoch=config['initial_epoch']
        )
    model.save(model_filename)
Beispiel #24
0
def model(x_train,
          y_train,
          X_test,
          y_test,
          embed_matrix,
          params=None,
          fit_model=True):
    ''' params is a dictionary containing hyperparameter values. See main.py
    for current definition.
    '''
    # input and embeddings for characters
    word_in = Input(shape=(params['max_num_words'], ))

    emb_word = Embedding(
        input_dim=params['vocab_size'],
        output_dim=300,
        input_length=params['max_num_words'],
        mask_zero=True,
        embeddings_initializer=Constant(embed_matrix))(word_in)

    char_in = Input(shape=(params['max_num_words'],
                           params['max_chars_in_word']),
                    name='input')
    emb_char = TimeDistributed(Embedding(
        input_dim=params['num_of_unique_chars'] + 2,
        output_dim=params['lstm_units_char_emb'],
        input_length=params['max_chars_in_word'],
        mask_zero=True,
        trainable=True),
                               name='embed_dense_char')(char_in)
    emb_char = TimeDistributed(LSTM(units=params['lstm_units_char_emb'],
                                    return_sequences=False,
                                    recurrent_dropout=0.5),
                               name='learn_embed_char')(emb_char)

    x = concatenate([emb_word, emb_char])
    bilstm = Bidirectional(LSTM(units=params['bilstm_units'],
                                recurrent_dropout=0.5,
                                return_sequences=False),
                           merge_mode='sum')(x)

    bilstm = Dense(params['bilstm_units'],
                   activation='relu',
                   name='linear_decode1')(bilstm)

    out = Dense(3, activation='softmax', name='output_softmax1')(bilstm)

    model = Model([word_in, char_in], out)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', Precision(),
                           Recall()])
    print(model.summary())
    if fit_model:
        history = model.fit(x_train,
                            y_train,
                            batch_size=params['batch_size'],
                            epochs=params['epochs'],
                            validation_data=(X_test, y_test),
                            verbose=2)
        return history, model
    else:
        return model
Beispiel #25
0
def get_precision(y_true, y_predict):
    m = Precision()
    m.update_state(y_true, y_predict)

    return m.result().numpy()
Beispiel #26
0
                                                    batch_size=40,
                                                    class_mode='categorical',
                                                    shuffle=True)
val_generator = train_datagen.flow_from_directory(proj_dir / "0" / "test",
                                                  target_size=(299, 299),
                                                  color_mode='rgb',
                                                  batch_size=40,
                                                  class_mode='categorical',
                                                  shuffle=True)

op = optimizers.SGD(lr=0.1, momentum=0.1, decay=0.01, nesterov=False)
adam = optimizers.Adam(lr=0.001)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=[
                  Precision(),
                  Recall(),
                  TruePositives(),
                  FalsePositives(),
                  FalseNegatives(),
                  TrueNegatives()
              ])
# Adam optimizer
# loss function will be categorical cross entropy

print("Beginning Training")
step_size_train = train_generator.n // train_generator.batch_size
validation_steps = val_generator.n // val_generator.batch_size
history = model.fit_generator(generator=train_generator,
                              steps_per_epoch=step_size_train,
                              epochs=200,