Ejemplo n.º 1
0
    model = Model(inputs=base_model.input, outputs=predictions)

    return model


model = get_pretrained()
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

#%% Training and logging
from tensorflow.keras.callbacks import CSVLogger
logdir = 'C:\\Users\\Mikko Impiö\\Google Drive\\koulu_honmia\\kandi19\\logs'

csv_logger = CSVLogger(os.path.join(logdir, '09-01-2020_2.log'), append=True)

tr_steps = len(df_train) // BATCH_SIZE
val_steps = len(df_val) // BATCH_SIZE

model.fit_generator(train_ds,
                    validation_data=val_ds,
                    steps_per_epoch=tr_steps,
                    epochs=2,
                    validation_steps=val_steps,
                    callbacks=[csv_logger])

#%% Inference

test_ds = test_ds.batch(BATCH_SIZE)
model.evaluate(test_ds)
Ejemplo n.º 2
0
                                        dtw_clusters=0,
                                        file_prefix="Oweights",
                                        skip_array=[],
                                        weight_pred_ind=False,
                                        weighs_dtw_cluster_ind=True)

norm_model.compile(
    optimizer='adam',
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'])
norm_model_hist = norm_model.fit(
    train_images,
    train_labels,
    epochs=epochs,
    validation_data=(test_images, test_labels),
    callbacks=[CSVLogger(logs_norm, append=True), callback_weights_no_pred])
save_graph_plot(norm_model, project_paths["plots"] + "/norm_model.ps")
save_graph_json(norm_model, project_paths["plots"] + "/norm_model.json")
model_list.append(norm_model_hist)
model_name_list.append("norm_model")

logs_reg = project_paths["weights"] + "/" + "regModelE" + str(
    epochs) + "Skp" + str(total_skips) + ".csv"

#Callbacks
reg_train_steps = 3
clusters = 200
callback_weights_reg = StoreWeights(project_paths["weights"],
                                    reg_train_steps=reg_train_steps,
                                    dtw_clusters=clusters,
                                    file_prefix="Rweights",
Ejemplo n.º 3
0
    color_mode="rgb",
    class_mode='binary')

dev_generator = test_datagen.flow_from_directory('./dataset/dev',
                                                 target_size=(target_side_len,
                                                              target_side_len),
                                                 batch_size=batch_size,
                                                 color_mode="rgb",
                                                 class_mode='binary')

initial_model = VGG16(include_top=False, weights='imagenet', pooling='max')
input = Input(shape=(224, 224, 3), name='image_input')
x = Flatten()(initial_model(input))
x = Dense(200, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Dense(1)(x)
model = Model(inputs=input, outputs=x)
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy', f1])

csv_logger = CSVLogger('VGG16_results.csv', append=True, separator=';')

model.fit_generator(train_generator,
                    steps_per_epoch=2000,
                    epochs=epochs,
                    callbacks=[csv_logger],
                    validation_data=dev_generator,
                    validation_steps=800)
Ejemplo n.º 4
0
    def train_model(self, runName, path_to_train_mix, path_to_train_speech, \
                    path_to_val_mix, path_to_val_speech):
        '''
        Method to train the DTLN model. 
        '''
        
        # create save path if not existent
        savePath = './models_'+ runName+'/' 
        if not os.path.exists(savePath):
            os.makedirs(savePath)
        # create log file writer
        csv_logger = CSVLogger(savePath+ 'training_' +runName+ '.log')
        # create callback for the adaptive learning rate
        reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
                              patience=3, min_lr=10**(-10), cooldown=1)
        # create callback for early stopping
        early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, 
            patience=10, verbose=0, mode='auto', baseline=None)
        # create model check pointer to save the best model
        checkpointer = ModelCheckpoint(savePath+runName+'.h5',
                                       monitor='val_loss',
                                       verbose=1,
                                       save_best_only=True,
                                       save_weights_only=True,
                                       mode='auto',
                                       save_freq='epoch'
                                       )

        # calculate length of audio chunks in samples
        len_in_samples = int(np.fix(self.fs * self.len_samples / 
                                    self.block_shift)*self.block_shift)
        # create data generator for training data
        generator_input = audio_generator(path_to_train_mix, 
                                          path_to_train_speech, 
                                          len_in_samples, 
                                          self.fs, train_flag=True)
        dataset = generator_input.tf_data_set
        dataset = dataset.batch(self.batchsize, drop_remainder=True).repeat()
        # calculate number of training steps in one epoch
        steps_train = generator_input.total_samples//self.batchsize
        # create data generator for validation data
        generator_val = audio_generator(path_to_val_mix,
                                        path_to_val_speech, 
                                        len_in_samples, self.fs)
        dataset_val = generator_val.tf_data_set
        dataset_val = dataset_val.batch(self.batchsize, drop_remainder=True).repeat()
        # calculate number of validation steps
        steps_val = generator_val.total_samples//self.batchsize
        # start the training of the model
        self.model.fit(
            x=dataset, 
            batch_size=None,
            steps_per_epoch=steps_train, 
            epochs=self.max_epochs,
            verbose=1,
            validation_data=dataset_val,
            validation_steps=steps_val, 
            callbacks=[checkpointer, reduce_lr, csv_logger, early_stopping],
            max_queue_size=50,
            workers=4,
            use_multiprocessing=True)
        # clear out garbage
        tf.keras.backend.clear_session()
Ejemplo n.º 5
0
    tf.keras.layers.GlobalAveragePooling2D(
        activity_regularizer=tf.keras.regularizers.l2(0.001)),
    tf.keras.layers.Dense(len(types), activation='sigmoid')
])
# plot_model(model1, to_file='model_complex.png', show_shapes=True)
# exit()

model1.compile(
    optimizer=Adam(lr=0.0001),
    loss='binary_crossentropy',
    metrics=['acc', f1_m, precision_m, recall_m,
             tf.keras.metrics.AUC()])

date = datetime.now().strftime("_%m_%d_%Y_%H_%M_%S")

csv_logger = CSVLogger('logs/log_all' + date + '.csv')
early_stop = EarlyStopping(monitor='val_loss',
                           min_delta=0.01,
                           patience=5,
                           mode='min',
                           verbose=1,
                           restore_best_weights=True)
model_path = 'saved_models/best_model_all' + date + '.h5'
mc = ModelCheckpoint(model_path, monitor='val_loss', mode='min', verbose=1)
history = model1.fit_generator(train_generator,
                               epochs=25,
                               steps_per_epoch=steps_train,
                               validation_data=valid_generator,
                               validation_steps=steps_valid,
                               verbose=2,
                               callbacks=[csv_logger, mc, early_stop])
Ejemplo n.º 6
0
    model.compile(
        loss="binary_crossentropy",
        optimizer=tf.keras.optimizers.Adam(lr),
        metrics=[
            tf.keras.metrics.MeanIoU(num_classes=2),
            tf.keras.metrics.Recall(),
            tf.keras.metrics.Precision()
        ]
    )

    # model.summary()

    callbacks = [
        ModelCheckpoint(model_path, monitor="val_loss", verbose=1),
        ReduceLROnPlateau(monitor="val_loss", patience=5, factor=0.1, verbose=1),
        CSVLogger(csv_path),
        EarlyStopping(monitor="val_loss", patience=10)
    ]

    train_steps = len(train_x)//batch_size
    if len(train_x) % batch_size != 0:
        train_steps += 1

    test_steps = len(test_x)//batch_size
    if len(test_x) % batch_size != 0:
        test_steps += 1

    model.fit(
        train_dataset,
        validation_data=test_dataset,
        epochs=epochs,
Ejemplo n.º 7
0
def save_history(history):
    with open('history.db', 'wb') as file_pi:
        pickle.dump(history.history, file_pi)


if __name__ == "__main__":
    epochs = 50
    data_loader = load_data(
        "/home/ctadmin/data_drive/kits19/data/training_patches")
    validation_loader = load_data("/home/kits/kits19/data/validation_patches",
                                  is_validation=True)

    callbacks = list()
    callbacks.append(
        ModelCheckpoint("best_VNetW.h5",
                        monitor='val_loss',
                        save_weights_only=True,
                        save_best_only=True,
                        verbose=1))
    # callbacks.append(ReduceLROnPlateau(factor=0.5, patience=2, verbose=1))
    callbacks.append(EarlyStopping(monitor='val_loss', patience=10))
    callbacks.append(CSVLogger("training.log", append=True))

    model = unet()
    history = model.fit_generator(data_loader,
                                  validation_data=validation_loader,
                                  epochs=epochs,
                                  callbacks=callbacks)
    model.save_weights("weights/VNetW.h5")
    save_history(history)
Ejemplo n.º 8
0
def trainModel(epochs, bn_layers, dropout_layers, l2_layers, padding,
               target_size, dense_sizes, architecture, conv_layers_over_5,
               use_maxpool_after_conv_layers_after_5th, version, load_existing,
               gpu_id, model_filename, lc_filename, data_dir):

    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    #   epochs - number of max epochs to train (subject to early stopping)
    #   bn_layers - list of indexes of Dense layers (-1 and down) and CNN layers (1 and up) where Batch Norm should be applied
    #   dropout_layers - list of indexes of Dense layers (-1 and down) where Dropout should be applied
    #   bn_layers - list of indexes of Dense layers (-1 and down) where L2 regularization should be applied
    #   padding - changed to "same" to keep 2^n feature map sizes
    #   dense_sizes - dictionary of dense layer sizes (cnt of neurons)
    #   architecture - one of:  Model_6classes_c4_d3_v1, Model_6classes_c5_d2_v1, Model_6classes_c5_d3_v1
    #   conv_layers_over_5 - number of convolutional layers after 5th
    #   use_maxpool_after_conv_layers_after_5th - list of boolean values whether to use maxpooling after 5th layer
    #   version - used to name a learning curve file
    #   load_existing - whether to load an existing model file
    # Returns:
    #   model: trained Keras model
    #
    # To call:
    #   model = Train_v1.trainModel(epochs=20)

    crop_range = 1  # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    #target_size = 224
    batch_size = 32
    #datasrc = "visible"

    # Manually copied to C: to speed up training
    #data_dir = os.path.join(Glb.images_folder, "Bal_v14", "Ind-{}".format(hier_lvl) )
    data_dir_train = os.path.join(data_dir, "Train")
    data_dir_val = os.path.join(data_dir, "Val")
    data_dir_test = os.path.join(data_dir, "Test")

    train_iterator = Glb_Iterators.get_iterator(data_dir_train, "div255")
    val_iterator = Glb_Iterators.get_iterator(data_dir_val, "div255")
    test_iterator = Glb_Iterators.get_iterator(
        data_dir_test, "div255", shuffle=False
    )  # dont shuffle in order to get proper actual/prediction pairs

    Softmax_size = len(train_iterator.class_indices)
    dense_sizes["d-1"] = Softmax_size

    #model_filename = os.path.join(Glb.results_folder,
    #                              "model_clsf_from_isVisible_{}_gpu{}_hier{}.h5".format(date.today().strftime("%Y%m%d"), gpu_id, hier_lvl))
    #lc_filename = os.path.join(Glb.results_folder,
    #                           "lc_clsf_from_isVisible_{}_gpu{}_hier{}.csv".format(date.today().strftime("%Y%m%d"), gpu_id, hier_lvl))
    # Create or load model
    if not load_existing:
        print("Creating model")
        prepModel = modelVersions_dic[architecture]
        prep_model_params = {
            "input_shape": (target_size, target_size, 3),
            "bn_layers":
            bn_layers,
            "dropout_layers":
            dropout_layers,
            "l2_layers":
            l2_layers,
            "padding":
            padding,
            "dense_sizes":
            dense_sizes,
            "conv_layers_over_5":
            conv_layers_over_5,
            "use_maxpool_after_conv_layers_after_5th":
            use_maxpool_after_conv_layers_after_5th
        }
        model = prepModel(**prep_model_params)
    else:
        print("Loading model")
        #model_filename = r"J:\Visible_models\6class\model_6classes_v" + str(version) + ".h5"
        model = load_model(model_filename)
        model.compile(
            loss='categorical_crossentropy',
            optimizer=Adam(learning_rate=0.001),  # default LR: 0.001
            metrics=['accuracy'])

    print(model.summary())

    callback_earlystop = EarlyStopping(monitor='val_accuracy',
                                       min_delta=0.0001,
                                       patience=10,
                                       verbose=1,
                                       mode='max',
                                       restore_best_weights=True)
    callback_csv_logger = CSVLogger(lc_filename, separator=",", append=False)

    mcp_save = ModelCheckpoint(model_filename,
                               save_best_only=True,
                               monitor='val_accuracy',
                               mode='max')

    model.fit(train_iterator,
              steps_per_epoch=len(train_iterator),
              epochs=epochs,
              verbose=2,
              validation_data=val_iterator,
              validation_steps=len(val_iterator),
              callbacks=[callback_csv_logger, callback_earlystop, mcp_save])  #

    print("Evaluation on test set (1 frame)")
    test_metrics = model.evaluate(test_iterator)
    print("Test: {}".format(test_metrics))

    print("Evaluating F1 test set (1 frame)")
    y_pred = model.predict(test_iterator)
    y_pred_classes = np.argmax(y_pred, axis=1)
    y_true = test_iterator.classes
    test_acc = accuracy_score(y_true=y_true, y_pred=y_pred_classes)
    test_f1 = f1_score(y_true=y_true, y_pred=y_pred_classes, average='macro')
    print("acc:{}, f1:{}".format(test_acc, test_f1))

    # metrics to csv
    df_metrics = pd.DataFrame(
        data={
            "gpu": [gpu_id],
            "datetime": [datetime.now().strftime("%Y%m%d %H:%M:%S")],
            "data_dir": [data_dir],
            "test_acc": [test_acc],
            "test_f1": [test_f1]
        })
    df_metrics_filename = os.path.join(Glb.results_folder, "metrics_mrg.csv")
    df_metrics.to_csv(df_metrics_filename, index=False, header=False, mode='a')

    #print("Evaluation on validation set (1 frame)")
    #val_metrics = model.evaluate(val_iterator)
    #print("Val: {}".format(val_metrics))

    return model
Ejemplo n.º 9
0
def main():

    good_path = "/home/osboxes/DeepLearningResearch/Data/badging_med/mal_badging_med.txt"

    mal_path = "/home/osboxes/DeepLearningResearch/Data/badging_med/ben_badging_med.txt"

    tr = .80
    batch = 100
    epochs = 10
    neurons = 20

    perm_inputs, feat_inputs, labels = vectorize(good_path, mal_path)
    print("returned from vectorize method" + str(perm_inputs) + "feat Inputs" +
          str(feat_inputs) + "labels" + str(labels))
    perm_width = int(len(perm_inputs[0]))
    print("perm_width: " + str(perm_width))
    feat_width = int(len(feat_inputs[0]))
    print("feat_width: " + str(feat_width))

    cm = np.zeros([2, 2], dtype=np.int64)
    #neurons = [10, 20, 30, 40]
    optimizers = ['nadam', 'adam', 'RMSprop', 'SGD']
    #for neuronVar in neurons:
    for optimizerVar in optimizers:
        if os.path.exists('log_' + optimizerVar + '.csv'):
            os.remove('log_' + optimizerVar + '.csv')
        else:
            print("The file does not exist")

        print("OPTIMIZER: " + str(optimizerVar))
        model = create_dualInputLarge(input_ratio=.125,
                                      neurons=20,
                                      perm_width=perm_width,
                                      feat_width=feat_width,
                                      optimizer=optimizerVar)
        plot_model(model, to_file='model.png')
        # model.summary()
        time.sleep(1)

        sss = StratifiedShuffleSplit(n_splits=1,
                                     random_state=0,
                                     test_size=1 - tr)
        i = 0
        print("stratified shuffle split")
        for train_index, test_index in sss.split(perm_inputs, labels):
            perm_train, perm_test = perm_inputs[train_index], perm_inputs[
                test_index]
            feat_train, feat_test = feat_inputs[train_index], feat_inputs[
                test_index]
            labels_train, labels_test = labels[train_index], labels[test_index]
            #print ("perm_width: " + str(perm_width))
            #print ("feat_width: " + str(feat_width))
            model = create_dualInputLarge(input_ratio=.125,
                                          neurons=20,
                                          perm_width=perm_width,
                                          feat_width=feat_width,
                                          optimizer=optimizerVar)

            print('\nsplit %i' % i)
            csv_logger = CSVLogger('log_' + optimizerVar + '.csv',
                                   append=True,
                                   separator=',')
            model.fit([perm_train, feat_train],
                      labels_train,
                      epochs=epochs,
                      batch_size=batch,
                      callbacks=[csv_logger])
            print("model trained")
            labels_pred = model.predict([perm_test, feat_test],
                                        batch_size=batch)
            # print("prediction made: " +str(labels_pred))
            labels_pred = (labels_pred > 0.5)
            #print("labels_pred" +str(labels_pred))
            cm = cm + confusion_matrix(labels_test, labels_pred)
            i += 1
        acc = calc_accuracy(cm)
        print('average accuracy was: ' + str(acc))

        precision = calc_precision(cm)
        print('Average precision was: ' + str(precision))

        recall = cal_recall(cm)
        print('Average recall value is: ' + str(recall))

    #scoring = ['precision', 'accuracy', 'recall', 'f1']

    #print("creating the loaded model")

#  loaded_model = KerasClassifier(build_fn=get_model(model), epochs=epochs, batch_size=batch, verbose=2)
# print("calling the cross_validate method")
# fit_params = dict(batch_size=batch, epochs=epochs)

#cv_result = cross_validate(loaded_model, perm_inputs, labels, fit_params=fit_params, cv=sss, return_train_score=True, n_jobs=1, verbose=2)
#df = pandas.DataFrame(cv_result)

#path1 = '/home/osboxes/DeepLearningResearch/Demo/test' + '.csv'
# file1 = open(path1, "a+")
#df.to_csv(file1, index=True)
#  file1.close()

    return
Ejemplo n.º 10
0
    def fit_with_generator(
        self,
        train_data_raw: np.ndarray,
        labels_raw: np.ndarray,
        model_filepath: str,
        weights_filepath: str,
        logs_dir: str,
        training_log: str,
        resume: bool,
    ) -> Tuple[List[float], List[float]]:
        """Fit the training data to the network and save the network model as a HDF file.

        Arguments:
            train_data_raw {list} -- The HDF5 raw training data.
            labels_raw {list} -- The HDF5 raw training labels.
            model_filepath {string} -- The model file path.
            weights_filepath {string} -- The weights file path.
            logs_dir {string} -- The TensorBoard log file directory.
            training_log {string} -- The path to the log file of epoch results.
            resume {bool} -- True to continue with previous training result or False to start a new one (default: {False}).
        Returns:
            tuple -- A tuple contains validation losses and validation accuracies.
        """

        initial_epoch = 0
        batch_size = self.hyperparameters.batch_size
        validation_split = self.hyperparameters.validation_split
        csv_logger = (CSVLogger(training_log)
                      if not resume else CSVLogger(training_log, append=True))
        checkpoint = ModelCheckpoint(
            filepath=weights_filepath,
            monitor=self.hyperparameters.monitor,
            verbose=1,
            save_best_only=False,
            save_weights_only=True,
        )
        tensorboard = TensorBoard(
            log_dir=logs_dir,
            histogram_freq=0,
            write_graph=True,
            write_images=True,
        )
        earlyStopping = EarlyStopping(
            monitor=self.hyperparameters.monitor,
            min_delta=self.hyperparameters.es_min_delta,
            mode=self.hyperparameters.es_mode,
            patience=self.hyperparameters.es_patience,
            verbose=1)
        callbacks_list = [
            checkpoint,
            tensorboard,
            csv_logger,
            earlyStopping,
        ]
        if not resume:
            Optimizer = getattr(tf_optimizers, self.hyperparameters.optimizer)
            self.__model.compile(
                loss=self.hyperparameters.loss,
                optimizer=Optimizer(
                    learning_rate=self.hyperparameters.learning_rate),
                metrics=self.hyperparameters.metrics,
            )
        if resume:
            assert os.path.isfile(
                training_log
            ), "{} does not exist and is required by training resumption".format(
                training_log)
            training_log_file = open(training_log)
            initial_epoch += sum(1 for _ in training_log_file) - 1
            training_log_file.close()
            assert self.hyperparameters.epochs > initial_epoch, \
                "The existing model has been trained for {0} epochs. Make sure the total epochs are larger than {0}".format(initial_epoch)

        train_generator = self.__generator(train_data_raw,
                                           labels_raw,
                                           batch_size,
                                           validation_split,
                                           is_validation=False)
        test_generator = self.__generator(train_data_raw,
                                          labels_raw,
                                          batch_size,
                                          validation_split,
                                          is_validation=True)
        steps_per_epoch = math.ceil(
            float(train_data_raw.shape[0]) * (1 - validation_split) /
            batch_size)
        validation_steps = math.ceil(
            float(train_data_raw.shape[0]) * validation_split / batch_size)

        try:
            hist = self.__model.fit(
                train_generator,
                steps_per_epoch=steps_per_epoch,
                validation_data=test_generator,
                validation_steps=validation_steps,
                epochs=self.hyperparameters.epochs,
                shuffle=False,
                callbacks=callbacks_list,
                initial_epoch=initial_epoch,
            )
        except KeyboardInterrupt:
            Network.__LOGGER.warning("Training interrupted by the user")
            raise TerminalException("Training interrupted by the user")
        finally:
            self.__model.save(model_filepath)
            Network.__LOGGER.warning("Model saved to %s" % model_filepath)

        return hist.history["val_loss"], hist.history["val_acc"] if int(
            tf.__version__.split(".")[0]) < 2 else hist.history["val_accuracy"]
Ejemplo n.º 11
0
model.add(Conv2D(64, (3, 3), strides = 1, activation = 'relu', kernel_initializer = 'he_uniform'))
model.add(MaxPooling2D((2, 2), strides = (2, 2), padding = 'same'))

model.add(Flatten())

model.add(Dense(100, activation = 'relu', kernel_initializer = 'he_uniform'))

model.add(Dense(10, activation = 'softmax'))

model.compile(optimizer = SGD(learning_rate = 0.01, momentum = 0.9), loss = 'categorical_crossentropy', metrics = ['accuracy'])

if not os.path.isdir('Model'):
    os.mkdir('Model')

callbacks = [ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1,
                              patience = 7, min_lr = 1e-5),
             EarlyStopping(patience = 9, # Patience should be larger than the one in ReduceLROnPlateau
                          min_delta = 1e-5),
             CSVLogger(os.path.join('Model', 'training.log'), append = True),
             ModelCheckpoint(os.path.join('Model', 'backup_last_model.hdf5')),
             ModelCheckpoint(os.path.join('Model', 'best_val_acc.hdf5'), monitor = 'val_accuracy', mode = 'max', save_best_only = True),
             ModelCheckpoint(os.path.join('Model', 'best_val_loss.hdf5'), monitor = 'val_loss', mode = 'min', save_best_only = True)]

model.fit(trainGenerator, epochs = 50, validation_data = validationGenerator, callbacks = callbacks)

model = load_model(os.path.join('Model', 'best_val_loss.hdf5'))
loss, acc = model.evaluate(validationGenerator)

print('Loss on Validation Data : ', loss)
print('Accuracy on Validation Data :', '{:.4%}'.format(acc))
Ejemplo n.º 12
0
    newModel.compile(
        loss="categorical_crossentropy",  #keras.losses.binary_crossentropy
        optimizer=model_opt,
        metrics=['accuracy'])
    newModel.summary()

if int(file_index) == 2:
    newModel = load_model("./Models/2CNN_1n1c1c_1.h5")

######################################################################################
# time counter
print(time.strftime("%a %b %d %H:%M:%S %Y", time.localtime()))
ticks_1 = time.time()
############################################################################################################################################################
check_list = []
csv_logger = CSVLogger("./Models/training_log_1n1c1c_" + str(file_index) +
                       ".csv")
checkpoint = ModelCheckpoint(filepath="./Models/checkmodel_1n1c1c_" +
                             str(file_index) + ".h5",
                             save_best_only=True,
                             verbose=1)
earlystopping = EarlyStopping(
    monitor="loss",
    min_delta=0.01,
    patience=50,
    verbose=1,
    mode="auto",
    baseline=None,
    restore_best_weights=False,
)
check_list.append(checkpoint)
check_list.append(csv_logger)
Ejemplo n.º 13
0
from hessianlearn import *

# Memory issue with GPUs
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
for device in gpu_devices:
    tf.config.experimental.set_memory_growth(device, True)

import numpy as np
import resnet_hl

lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=5,
                               min_lr=0.5e-6)
early_stopper = EarlyStopping(min_delta=0.001, patience=10)
csv_logger = CSVLogger('resnet18_cifar10.csv')

batch_size = 32
nb_classes = 10
nb_epoch = 200
data_augmentation = True

# input image dimensions
img_rows, img_cols = 32, 32
# The CIFAR10 images are RGB.
img_channels = 3

# The data, shuffled and split between train and test sets:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()

# Convert class vectors to binary class matrices.
Ejemplo n.º 14
0
else:
    get_loss = {'dice': DiceLoss, 'jaccard': JaccardLoss, 'focal': FocalLoss}
    loss = get_loss[args.loss]()

# compiling model
model.compile(loss=loss, optimizer=Adam(), metrics=['mean_squared_error'])

# creating directory for experiment
callbacks = []
experiment_label = '_'.join([args.dataset, model.name, args.run_label])
experiment_path = os.path.join(args.save_path, experiment_label)
if not os.path.exists(experiment_path):
    os.makedirs(experiment_path)

# setting additional callbacks
log = CSVLogger(os.path.join(experiment_path, 'optimization.log'))
stop = EarlyStopping(patience=args.stop_patience)
plateau = ReduceLROnPlateau(patience=args.reduce_patience)
save_filename = os.path.join(experiment_path, 'model.hdf5')
save = ModelCheckpoint(save_filename, save_best_only=True)
callbacks.extend([log, stop, save, plateau])

# saving hyper-parameters and model summary
with open(os.path.join(experiment_path, 'hyperparameters.json'), 'w') as filer:
    json.dump(args.__dict__, filer, indent=4)
with open(os.path.join(experiment_path, 'model_summary.txt'), 'w') as filer:
    model.summary(print_fn=lambda x: filer.write(x + '\n'))

# starting optimization
model.fit(sequencers['train'],
          epochs=args.epochs,
Ejemplo n.º 15
0
#############################################################
# Run training/testing
#############################################################

# save weights when loss is improved
checkpoint = ModelCheckpoint('{epoch:04d}-{val_loss:.4f}.hdf5',
                             monitor='val_loss',
                             verbose=0,
                             save_best_only=True)

# reduce learning rate after 2000 epochs
learning_rate_reduce = LearningRateScheduler(learningRateReduce, verbose=0)

# record logs after every epoch
today = str(date.today())
csv_logger = CSVLogger('training-' + today + '.log')

if TESTING:
    # record test loss (MAE)
    [loss, acc] = model.evaluate(DataGenerator(input_fns, input_gt_fns))

    print('Loss: ' + str(loss))

    # predict output images on test set
    out = model.predict(DataGenerator(input_fns, input_gt_fns), verbose=1)

    psnr = []
    ssim = []

    # record metrics and also save images to disk
    for idx, im in enumerate(out):
Ejemplo n.º 16
0
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))

# Output Layer
model.add(Dense(2))
model.add(Activation('softmax'))

model.summary()

# Compile the model
model.compile(loss=tf.keras.losses.categorical_crossentropy,
              optimizer='adam',
              metrics=['accuracy'])

csv_logger = CSVLogger("metric_calc_5.csv", separator=",", append=True)
model_history = model.fit(x=input_x,
                          y=output_y,
                          epochs=50,
                          validation_data=(test_x, test_y),
                          verbose=2,
                          callbacks=[csv_logger])
model.save("alexnet_v4_calc.h5")

calc_metrics = pd.DataFrame(columns=["loss", "acc", "val_loss", "val_acc"])
calc_metrics = calc_metrics.append(pd.DataFrame(model_history.history),
                                   ignore_index=True)
calc_metrics.to_csv(r"calc_metrics_6_pd_to_csv.csv")

##y_pred = model.predict(input_x)
##y_pred = y_pred.ravel() #flatten both pred and true y
Ejemplo n.º 17
0
def main(go_file, train_data_file, test_data_file, terms_file, model_file,
         out_file, split, batch_size, epochs, load, logger_file, threshold,
         device, params_index):
    params = {
        'max_kernel': 129,
        'initializer': 'glorot_normal',
        'dense_depth': 0,
        'nb_filters': 512,
        'optimizer': Adam(lr=3e-4),
        'loss': 'binary_crossentropy'
    }
    # SLURM JOB ARRAY INDEX
    pi = params_index
    if params_index != -1:
        kernels = [33, 65, 129, 257, 513]
        dense_depths = [0, 1, 2]
        nb_filters = [32, 64, 128, 256, 512]
        params['max_kernel'] = kernels[pi % 5]
        pi //= 5
        params['dense_depth'] = dense_depths[pi % 3]
        pi //= 3
        params['nb_filters'] = nb_filters[pi % 5]
        pi //= 5
        out_file = f'data/predictions_{params_index}.pkl'
        logger_file = f'data/training_{params_index}.csv'
        model_file = f'data/model_{params_index}.h5'
    print('Params:', params)

    go = Ontology(go_file, with_rels=True)
    terms_df = pd.read_pickle(terms_file)
    terms = terms_df['terms'].values.flatten()

    train_df, valid_df = load_data(train_data_file, terms, split)
    test_df = pd.read_pickle(test_data_file)
    terms_dict = {v: i for i, v in enumerate(terms)}
    nb_classes = len(terms)
    with tf.device('/' + device):
        test_steps = int(math.ceil(len(test_df) / batch_size))
        test_generator = DFGenerator(test_df, terms_dict, nb_classes,
                                     batch_size)
        if load:
            logging.info('Loading pretrained model')
            model = load_model(model_file)
        else:
            logging.info('Creating a new model')
            model = create_model(nb_classes, params)

            logging.info("Training data size: %d" % len(train_df))
            logging.info("Validation data size: %d" % len(valid_df))
            checkpointer = ModelCheckpoint(filepath=model_file,
                                           verbose=1,
                                           save_best_only=True)
            earlystopper = EarlyStopping(monitor='val_loss',
                                         patience=6,
                                         verbose=1)
            logger = CSVLogger(logger_file)

            logging.info('Starting training the model')

            valid_steps = int(math.ceil(len(valid_df) / batch_size))
            train_steps = int(math.ceil(len(train_df) / batch_size))
            train_generator = DFGenerator(train_df, terms_dict, nb_classes,
                                          batch_size)
            valid_generator = DFGenerator(valid_df, terms_dict, nb_classes,
                                          batch_size)

            model.summary()
            model.fit_generator(train_generator,
                                steps_per_epoch=train_steps,
                                epochs=epochs,
                                validation_data=valid_generator,
                                validation_steps=valid_steps,
                                max_queue_size=batch_size,
                                workers=12,
                                callbacks=[logger, checkpointer, earlystopper])
            logging.info('Loading best model')
            model = load_model(model_file)

        logging.info('Evaluating model')
        loss = model.evaluate_generator(test_generator, steps=test_steps)
        logging.info('Test loss %f' % loss)
        logging.info('Predicting')
        test_generator.reset()
        preds = model.predict_generator(test_generator, steps=test_steps)

        # valid_steps = int(math.ceil(len(valid_df) / batch_size))
        # valid_generator = DFGenerator(valid_df, terms_dict,
        #                               nb_classes, batch_size)
        # logging.info('Predicting')
        # valid_generator.reset()
        # preds = model.predict_generator(valid_generator, steps=valid_steps)
        # valid_df.reset_index()
        # valid_df['preds'] = list(preds)
        # train_df.to_pickle('data/train_data_train.pkl')
        # valid_df.to_pickle('data/train_data_valid.pkl')

    test_labels = np.zeros((len(test_df), nb_classes), dtype=np.int32)
    for i, row in enumerate(test_df.itertuples()):
        for go_id in row.prop_annotations:
            if go_id in terms_dict:
                test_labels[i, terms_dict[go_id]] = 1
    logging.info('Computing performance:')
    roc_auc = compute_roc(test_labels, preds)
    logging.info('ROC AUC: %.2f' % (roc_auc, ))
    test_df['labels'] = list(test_labels)
    test_df['preds'] = list(preds)

    logging.info('Saving predictions')
    test_df.to_pickle(out_file)
Ejemplo n.º 18
0
plot_model(model, 'model.jpg')
if training:
    checkpoint = ModelCheckpoint(model_path,
                                 'val_class_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True)
    reduce_lr = ReduceLROnPlateau('val_class_acc',
                                  0.5,
                                  10,
                                  verbose=1,
                                  min_lr=1e-6)
    early_stopping = EarlyStopping('val_class_acc',
                                   patience=50,
                                   restore_best_weights=True)
    logger = CSVLogger(model_path + '.csv', append=True)
    tensorboard = TensorBoard(model_path[:model_path.rfind('.')] + '_logs',
                              batch_size=1024,
                              update_freq='epoch')
    #model.fit(trainX, trainY, batch_size=128, epochs=500, validation_data=(validX, validY), verbose=2, callbacks=[checkpoint, reduce_lr, early_stopping, logger, tensorboard])
    model.fit(
        trainX, [trainY, missing_col],
        batch_size=128,
        epochs=500,
        validation_data=(validX, [validY, valid_missing_col]),
        verbose=2,
        callbacks=[checkpoint, reduce_lr, early_stopping, logger, tensorboard])

if submit:
    if svm:
        testX = utils.load_test_data(submit)
Ejemplo n.º 19
0
    # all but last 5 layers
    for layer in model.layers[:-5]:
        layer.trainable = False
    # last 5 layers
    for layer in model.layers[-5:]:
        layer.trainable = True

    model.compile(optimizer=SGD(learning_rate=0.01, momentum=0.9), loss="categorical_crossentropy", metrics=["accuracy"])

    model.summary()

    output_folder_path = os.path.join("hyperparameter-tuning-mobilenetv2", set_folder)
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)

    csv_logger_filepath = os.path.join(output_folder_path, "history.log")
    csv_logger = CSVLogger(csv_logger_filepath)

    history = model.fit(train_generator,
                        epochs=num_epochs,
                        validation_data=val_generator,
                        verbose=1,
                        callbacks=[csv_logger]
    )

    # probably won't reach here becuase training would be stopped in between
    history_object_filepath  = os.path.join(output_folder_path, "history_dict")
    with open(history_object_filepath, "wb") as history_object:
        pickle.dump(history.history, history_object)
Ejemplo n.º 20
0
def train():
    max_features = 10000
    sequence_length = 250

    # Load all datasets
    raw_train_ds = load_train_dataset()
    raw_val_ds = load_val_dataset()
    raw_test_ds = load_test_dataset()

    # Define the vectorization layer
    global vectorization_layer
    vectorization_layer = TextVectorization(
        standardize='lower_and_strip_punctuation',
        max_tokens=max_features,
        output_mode='int',
        output_sequence_length=sequence_length)

    # Adapt the vectorization layer
    vectorization_layer.adapt(raw_train_ds.map(lambda x, y: x))

    # Vectorize the dataset
    train_ds = raw_train_ds.map(text_to_vector)
    val_ds = raw_val_ds.map(text_to_vector)
    test_ds = raw_test_ds.map(text_to_vector)

    auto_tune = tf.data.experimental.AUTOTUNE
    train_ds = train_ds.cache().prefetch(buffer_size=auto_tune)
    val_ds = val_ds.cache().prefetch(buffer_size=auto_tune)
    test_ds = test_ds.cache().prefetch(buffer_size=auto_tune)

    embedding_dim = 32

    # Define the model
    model = tf.keras.Sequential([
        layers.Embedding(max_features + 1, embedding_dim),
        layers.Dropout(0.2),
        layers.GlobalAveragePooling1D(),
        layers.Dropout(0.2),
        layers.Dense(embedding_dim, activation='relu'),
        layers.Dropout(0.2),
        layers.Dense(1)
    ])

    # Compile the model
    model.compile(loss=losses.BinaryCrossentropy(from_logits=True),
                  optimizer='adam',
                  metrics=tf.metrics.BinaryAccuracy(threshold=0.0))

    # Train the model
    epochs = 10
    csv_logger = CSVLogger('training.log')
    history = model.fit(train_ds,
                        validation_data=val_ds,
                        epochs=epochs,
                        callbacks=[csv_logger])

    # Test the model
    loss, accuracy = model.evaluate(test_ds)
    print("Model loss against test dataset = ", loss)
    print("Model accuracy against test dataset = ", accuracy)

    # Save the model
    export_model = tf.keras.Sequential([
        tf.keras.Input(shape=(1, ), dtype="string"), vectorization_layer,
        model,
        layers.Activation('sigmoid')
    ])

    export_model.compile(loss=losses.BinaryCrossentropy(from_logits=False),
                         optimizer="adam",
                         metrics=['accuracy'])
    export_model.save('saved_model')
Ejemplo n.º 21
0
            # with CustomObjectScope({'loss': sm.losses.bce_jaccard_loss, 'metrics': sm.metrics.iou_score}):
            model = load_model(model_path,
                               custom_objects={
                                   'loss': loss,
                                   'metrics': metrics
                               })
        else:

            model = sm.Unet('seresnet18', classes=1, activation='sigmoid')
            model.compile(
                'Adam',
                loss=loss,
                metrics=metrics,
            )

    csv_logger = CSVLogger(
        f"{file_path}{model_name}_{batch_size}_{epochs}.csv", append=False)
    checkpoint = ModelCheckpoint(model_path, verbose=1, save_best_only=True)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=5,
                                  min_lr=1e-6,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=10,
                                   restore_best_weights=False)
    callbacks = [csv_logger, checkpoint, reduce_lr, early_stopping]

    model.fit_generator(train_gen,
                        validation_data=valid_gen,
                        steps_per_epoch=train_steps,
                        validation_steps=valid_steps,
Ejemplo n.º 22
0
def callbacks_req(model_type='LSTM'):
    csv_logger = CSVLogger(model_folder+'/training-log-'+model_type+'-'+str(test_year)+'.csv')
    filepath = model_folder+"/model-" + model_type + '-' + str(test_year) + "-E{epoch:02d}.h5"
    model_checkpoint = ModelCheckpoint(filepath, monitor='val_loss',save_best_only=True)
    earlyStopping = EarlyStopping(monitor='val_loss',mode='min',patience=10,restore_best_weights=True)
    return [csv_logger,earlyStopping,model_checkpoint]

LRScheduler = LearningRateScheduler(lrSchedule)

# Create checkpoint for the training
# This checkpoint performs model saving when
# an epoch gives highest testing accuracy
filepath = modelname + ".hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_acc',
                             verbose=0,
                             save_best_only=True,
                             mode='max')

# Log the epoch detail into csv
csv_logger = CSVLogger(modelname + '.csv')
callbacks_list = [checkpoint, csv_logger, LRScheduler]

# Fit the model
datagen = ImageDataGenerator(width_shift_range=0.2,
                             height_shift_range=0.2,
                             rotation_range=20,
                             horizontal_flip=True,
                             vertical_flip=False)

model.fit_generator(
    datagen.flow(trDat, trLbl, batch_size=32),
    validation_data=(tsDat, tsLbl),
    epochs=200,  #originally 200
    verbose=1,
    steps_per_epoch=len(trDat) / 32,
Ejemplo n.º 24
0
    def set_log_callback(self):
        '''Set a logger to write the accuracies and losses in each epic to a file'''

        csv_logger = CSVLogger(self.output_dir + '\\training.log')
        self.callbacks.append(csv_logger)
Ejemplo n.º 25
0
                                     skip_array=skip_steps,
                                     weight_pred_ind=True,
                                     weighs_dtw_cluster_ind=True,
                                     replicate_csvs_at=0)

checkpoint_path = project_paths["checkpoints"] + "/weights_epoch-{epoch}.ckpt"
restore_path = project_paths["checkpoints"] + "/weights_epoch-" + str(
    FirstSkip) + ".ckpt"
callback_save_model_reg = ModelCheckpoint(filepath=checkpoint_path,
                                          save_weights_only=False,
                                          verbose=1,
                                          period=FirstSkip)

project_paths = get_project_paths(sys.argv[0], to_tmp=False)
logs = project_paths["weights"] + "/reg_model_history_log.csv"
csv_logger = CSVLogger(logs, append=True)

(x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.

csv_logger = CSVLogger(logs, append=True)
shutil.copy2(os.path.realpath(__file__),
             project_paths['code'])  # copying code file to logs


def create_model():
    model = models.Sequential()
    model.add(layers.Flatten())
    model.add(layers.Dense(latent_dim, activation='relu'))
Ejemplo n.º 26
0
    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    return model

def get_layers(numbers):
    options = [] 

    for a in numbers:
        for b in numbers:
            for c in numbers:
                options.append([a, b, c])

    return options

logger = CSVLogger('Ratio_logs.csv', append=True)

sizes=[16, 32, 64, 128]
models = []

for a, b, c in get_layers(sizes):
    model = create_model(a, b, c)
    models.append(model)

best_loss = 100

for nn in models:
    print('_'*50) 
    print(nn.summary())

    nn.fit(X_train, y_train, callbacks=[logger], batch_size=32, epochs=100)
Ejemplo n.º 27
0
import numpy as np
import random
from dataloader import CaptchaSequence
import string
characters = string.digits + string.ascii_uppercase
data = CaptchaSequence(characters, batch_size=2, steps=1000)

from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import EarlyStopping, CSVLogger, ModelCheckpoint
from tensorflow.keras.optimizers import *

train_data = CaptchaSequence(characters, batch_size=128, steps=1000)
valid_data = CaptchaSequence(characters, batch_size=128, steps=100)
callbacks = [
    EarlyStopping(patience=3),
    CSVLogger('cnn.csv'),
    ModelCheckpoint('cnn_best.h5', save_best_only=True)
]

from model import model
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(1e-3, amsgrad=True),
              metrics=['accuracy'])
model.fit_generator(train_data,
                    epochs=100,
                    validation_data=valid_data,
                    workers=4,
                    use_multiprocessing=True,
                    callbacks=callbacks)
Ejemplo n.º 28
0
def training(X_tr, Y_tr, X_v, Y_v):
    model = GRU_model()

    #compute weight, ideally, we don't need this if all classes are equal in number.

    weight = np.zeros(num_classes)  # class
    #counting classes
    for i in Y_tr:
        weight[int(i)] += 1
    print('Training class count')
    print(weight)
    d = np.min([1.0 / temp for temp in weight])
    weight = [1.0 / temp / d for temp in weight]

    rmsprop = keras.optimizers.RMSprop(lr=0.003,
                                       rho=0.9,
                                       epsilon=1e-08,
                                       decay=0.0)
    adam = keras.optimizers.Adam(lr=0.01,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08)
    sgd = keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=1e-6)
    # OBS rmsprop and adam cant get loss past 2, while sgd got as low as 0.2

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=rmsprop,
                  metrics=['sparse_categorical_accuracy'])
    reduce_lr = ReduceLROnPlateau(monitor='val_acc',
                                  factor=0.5,
                                  patience=1500,
                                  min_lr=0.0001)
    csv_logger = CSVLogger('allnode_PIN.log')
    filepath = "allnode_weights-v-{epoch:02d}.hdf5"
    checkpointer = ModelCheckpoint(monitor='val_loss',
                                   filepath=filepath,
                                   verbose=1,
                                   save_best_only=True)
    start = time()

    model.fit_generator(generator(X_tr, Y_tr, 256),
                        steps_per_epoch=4,
                        epochs=epochs,
                        validation_data=(X_v, Y_v),
                        callbacks=[checkpointer, csv_logger, reduce_lr],
                        max_queue_size=3,
                        use_multiprocessing=True,
                        class_weight=weight)
    end = time()
    print('time:', end - start)
    '''
    open log file to find best weight
    '''

    # open log file
    # log file contains loss of each epoch in each running step
    log_file = "./allnode_PIN.log"
    loss = []
    with open(log_file) as f:
        f = f.readlines()
    f[0:] = f[1:]  #delete header
    for line in f:
        loss.append([int(line.split(',')[0]),
                     float(line.split(',')[3])])  #save all lost to list

    # find minimum loss
    # Keep minimum lost and save its index and use it for testing
    min_loss = 100
    for data in range(len(loss)):
        if loss[data][1] < min_loss:
            min_loss = loss[data][1]
            best_model_index = loss[data][0]
    return best_model_index, end - start, loss[:][1]
Ejemplo n.º 29
0
def customTrainedModel(
        train_path,
        valid_path,
        model,
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy'],
        filepath='saved_models/weights-improvement-{epoch:02d}.h5',
        monitor='val_accuracy',
        verbose=1,
        save_best_only=True,
        mode='max',
        logfile='my_logs.csv',
        separator=',',
        append=False,
        batch_size=DEFAULT_BATCH_SIZE,
        class_mode='categorical',
        epochs=DEFAULT_EPOCHS,
        target_size=TARGET_SIZE,
        featurewise_center=False,
        samplewise_center=False,
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,
        zca_whitening=False,
        zca_epsilon=1e-06,
        rotation_range=0,
        width_shift_range=CONSTANT_ZERO,
        height_shift_range=CONSTANT_ZERO,
        brightness_range=None,
        shear_range=CONSTANT_ZERO,
        zoom_range=CONSTANT_ZERO,
        channel_shift_range=CONSTANT_ZERO,
        fill_mode='nearest',
        cval=CONSTANT_ZERO,
        horizontal_flip=False,
        vertical_flip=False,
        rescale=None,
        preprocessing_function=None,
        data_format=None,
        validation_split=CONSTANT_ZERO,
        dtype=None,
        test_featurewise_center=False,
        test_samplewise_center=False,
        test_featurewise_std_normalization=False,
        test_samplewise_std_normalization=False,
        test_zca_whitening=False,
        test_zca_epsilon=1e-06,
        test_rotation_range=0,
        test_width_shift_range=CONSTANT_ZERO,
        test_height_shift_range=CONSTANT_ZERO,
        test_brightness_range=None,
        test_shear_range=CONSTANT_ZERO,
        test_zoom_range=CONSTANT_ZERO,
        test_channel_shift_range=CONSTANT_ZERO,
        test_fill_mode='nearest',
        test_cval=CONSTANT_ZERO,
        test_horizontal_flip=False,
        test_vertical_flip=False,
        test_rescale=None,
        test_preprocessing_function=None,
        test_data_format=None,
        test_validation_split=CONSTANT_ZERO,
        test_dtype=None,
        validation_freq=1,
        class_weight=None,
        max_queue_size=10,
        workers=1,
        use_multiprocessing=False,
        shuffle=True,
        initial_epoch=0):

    model = model

    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor=monitor,
                                 verbose=verbose,
                                 save_best_only=save_best_only,
                                 mode=mode)

    log_csv = CSVLogger(logfile, separator=separator, append=append)

    callable_list = [checkpoint, log_csv]

    train_datagen = ImageDataGenerator(
        featurewise_center=featurewise_center,
        samplewise_center=samplewise_center,
        featurewise_std_normalization=featurewise_std_normalization,
        samplewise_std_normalization=samplewise_std_normalization,
        zca_whitening=zca_whitening,
        zca_epsilon=zca_epsilon,
        rotation_range=rotation_range,
        width_shift_range=width_shift_range,
        height_shift_range=height_shift_range,
        brightness_range=brightness_range,
        shear_range=shear_range,
        zoom_range=zoom_range,
        channel_shift_range=channel_shift_range,
        fill_mode=fill_mode,
        cval=cval,
        horizontal_flip=horizontal_flip,
        vertical_flip=vertical_flip,
        rescale=rescale,
        preprocessing_function=preprocessing_function,
        data_format=data_format,
        validation_split=validation_split,
        dtype=dtype)

    test_datagen = ImageDataGenerator(
        featurewise_center=test_featurewise_center,
        samplewise_center=test_samplewise_center,
        featurewise_std_normalization=test_featurewise_std_normalization,
        samplewise_std_normalization=test_samplewise_std_normalization,
        zca_whitening=test_zca_whitening,
        zca_epsilon=test_zca_epsilon,
        rotation_range=test_rotation_range,
        width_shift_range=test_width_shift_range,
        height_shift_range=test_height_shift_range,
        brightness_range=test_brightness_range,
        shear_range=test_shear_range,
        zoom_range=test_zoom_range,
        channel_shift_range=test_channel_shift_range,
        fill_mode=test_fill_mode,
        cval=test_cval,
        horizontal_flip=test_horizontal_flip,
        vertical_flip=test_vertical_flip,
        rescale=test_rescale,
        preprocessing_function=test_preprocessing_function,
        data_format=test_data_format,
        validation_split=test_validation_split,
        dtype=test_dtype)

    training_set = train_datagen.flow_from_directory(train_path,
                                                     target_size=target_size,
                                                     batch_size=batch_size,
                                                     class_mode=class_mode)

    test_set = test_datagen.flow_from_directory(valid_path,
                                                target_size=target_size,
                                                batch_size=batch_size,
                                                class_mode=class_mode)

    history = model.fit_generator(training_set,
                                  validation_data=test_set,
                                  epochs=epochs,
                                  steps_per_epoch=len(training_set),
                                  validation_steps=len(test_set),
                                  callbacks=callable_list,
                                  validation_freq=validation_freq,
                                  class_weight=class_weight,
                                  max_queue_size=max_queue_size,
                                  workers=workers,
                                  use_multiprocessing=use_multiprocessing,
                                  shuffle=shuffle,
                                  initial_epoch=initial_epoch)

    return history, model
model.add(BatchNormalization())
model.add(Dense(1))
model.add(Activation('sigmoid'))


# In[9]:


my_optimizer = optimizer_choice(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0, amsgrad=False)
model.compile(optimizer=my_optimizer, loss='binary_crossentropy', metrics=['accuracy', f1, precision, recall])


# In[10]:


csv_logger = CSVLogger('VGG16_results_new_nofreeze_5dropout.csv', append=True, separator=';')
filepath="vgg_weights/VGG16_new_weights_5drop_regu-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)


# In[11]:


model.fit_generator(
        train_generator,
        steps_per_epoch= train_generator.samples//train_generator.batch_size,
        epochs=epochs,
        callbacks=[csv_logger],
        validation_data=dev_generator,
        validation_steps=dev_generator.samples//dev_generator.batch_size)