Example #1
0
    def train(self, dataset, batch_size=20, epochs=10, data_up=True):
        # 生成SGD优化器进行训练
        sgd = SGD(lr=0.01, decay=1e-6,momentum=0.9, nesterov=True)
        # 完成实际的模型配置工作
        self.model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])

        # 数据提升
        if not data_up:
            self.model.fit(dataset.train_images,
                           dataset.train_labels,
                           batch_size=batch_size,
                           epochs=epochs,
                           validation_data=(dataset.valid_images, dataset.valid_labels),
                           shuffle=True)
        # 使用实时数据提升
        else:
            # 定义数据生成器
            datagen = ImageDataGenerator(
                rotation_range=20,                     # 数据提升时图片随机转动的角度
                width_shift_range=0.2,                 # 数据提升时图片水平偏移的幅度,百分比
                height_shift_range=0.2,                # 垂直平移幅度
                horizontal_flip=True,                  # 随机水平翻转
                vertical_flip=False)                   # 随机垂直翻转
            # 计算整个训练样本集的数量
            datagen.fit(dataset.train_images)

            # 利用生成器开始训练模型
            self.model.fit_generator(datagen.flow(dataset.train_images, dataset.train_labels,
                                     batch_size=batch_size),
                                     samples_per_epoch=dataset.train_images.shape[0],
                                     epochs=epochs,
                                     validation_data=(dataset.valid_images, dataset.valid_labels))

            # 构造TensorBoard
            tbCallBack = tb(log_dir="/Users/heyiheng/Desktop/biyelunwen/LastDemo/logs",
                            histogram_freq=1,
                            batch_size=batch_size,
                            write_grads=True)
            history = self.model.fit(dataset.train_images, dataset.train_labels,
                                     batch_size=batch_size,
                                     epochs=epochs,
                                     shuffle=True,
                                     verbose=2,
                                     validation_split=0.2,
                                     callbacks=[tbCallBack])
            return model, history
Example #2
0
sequence_data = paddingsequence(tokenizer,MAX_SEQUENCE_LENGTH,strings)

#Train and Validation Creation
x_train,y_train,x_val,y_val = train_validation_create(sequence_data,labels,VALIDATION_SPLIT)



#####################Model Fitting##########################################

save_location = wd + "/Predictions/" 

#Use pre-trained model to generate embedding
embedding_matrix = embeddingenerator(EMBEDDING_FILE,EMBEDDING_DIM,MAX_NB_WORDS,word_index)

#Initiating Tensorboard
tbCallBack = tb(log_dir= wd +  "/Log/" + MODEL_NAME,histogram_freq=0, batch_size=32, write_graph=True, write_grads= True, write_images=True)

model = kerasmodelbuilder(EMBEDDING_DIM,MAX_SEQUENCE_LENGTH, \
                      word_index,MAX_NB_WORDS,embedding_matrix)

model.fit(x_train, y_train,validation_data=(x_val, y_val), \
          epochs=epochs, \
          batch_size=batch_size, \
          callbacks = [tbCallBack]
          )


y_val_proba = model.predict(x_val)
fpr, tpr, _ = metrics.roc_curve(y_val, y_val_proba)
auc = metrics.roc_auc_score(y_val, y_val_proba)
plt.plot(fpr,tpr,label="validation data, auc="+str(auc))
Example #3
0
from model import VggRes64
import pandas as pd
from keras.callbacks import TensorBoard as tb, ModelCheckpoint
from keras.models import load_model

if __name__ == "__main__":

    model_name = "discriminator_2x2_tiny_label_25.h5"

    source_dir = "E:/Data/mtcnn_64/"
    df = pd.read_csv("./source/mtcnn_64_drop_col.txt", sep=" ")
    model = VggRes64.VggRes64()

    model.set_model(load_model("./save/{}".format(model_name)))
    tesnorboard = tb(log_dir='./logs/{}'.format(model_name), histogram_freq=0)
    checkpoint = ModelCheckpoint('./save/{}'.format(model_name),
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 period=10)
    model.set_tensorboard(tesnorboard)
    model.set_checkpoint(checkpoint)

    train_set = df[:-10000]
    validation_set = df[-10000:]
    generator_train = model.create_image_generator(train_set, "filename",
                                                   source_dir)
    generator_validation = model.create_image_generator(
        validation_set, "filename", source_dir)

    model.fit_generator(generator_train, generator_validation, epochs=500)
Example #4
0
# load YAML and create model
#with open(DATADIR+trained_model, 'r') as yaml_file:
#    loaded_model_yaml = yaml_file.read()
#    loaded_model = model_from_yaml(loaded_model_yaml)

# load weights into new model
#loaded_model.load_weights(DATADIR+trained_weights)
model = load_model(DATADIR + compiled_model)
print("Loaded model from disk")

from keras.callbacks import TensorBoard as tb
from datetime import datetime

t = datetime.now()
tensorboard = tb(
    log_dir='tensorboard_logs/{:%Y-%m-%d-%H-%M}'.format(t))  # Dial

with h5py.File(DATADIR + training_file, "r") as f1:
    with h5py.File(DATADIR + label_file, "r") as f2:
        x_train = f1[
            'training_data']  # Note that Keras is special in being able to read the HDF5 _object_
        y_train = f2['training_labels']

        model.fit(
            x_train,
            y_train,
            batch_size=batch_size,
            epochs=epochs,
            shuffle='batch',  # Required for using HDF5
            callbacks=[tensorboard])
Example #5
0
def crearConfigTensorBoard(pathToLogs):
    return tb(log_dir=pathToLogs,
              histogram_freq=0,
              write_graph=True,
              write_images=False)