def train():

        # compile the model with all of the training parameters (should be done *after* setting layers to non-trainable)
        model.compile(optimizer=training_config['optimizer'], loss=training_config['loss_function'],
                  metrics=training_config['metrics'])


        # create csv logger to store to CSV
        csv_logging = callbacks.CSVLogger(training_filepath, separator=',', append=False)
        model_checkpoint = callbacks.ModelCheckpoint(snapshots_dir + 'weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                                                     monitor='val_loss', verbose=0,
                                                     save_best_only=False, save_weights_only=False,
                                                     mode='auto', period=1)

        # train the model on the new data for a few epochs
        print "training model with full model"
        model.fit_generator(
            train_generator,
            samples_per_epoch=nb_train_samples,
            nb_epoch=training_config['nb_epoch'],
            validation_data=val_generator,
            nb_val_samples=nb_val_samples,
            callbacks=[csv_logging, model_checkpoint]
        )
예제 #2
0
from dataloader_test import test
from dataloader_train import train
from model import model

import matplotlib.pyplot as plt
%matplotlib inline


model.summary()

model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
history = model.fit_generator(train, epochs=25, validation_data = test, verbose = 1)

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()


plt.show()


예제 #3
0
from preprocess.generator import train_generator, valid_generator


# Custom loss function
def huber_loss(y_true, y_pred):
    return tf.losses.huber_loss(y_true, y_pred)


# Creating the model
model = model()
model.summary()

# Compile
adam = Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=1e-10, decay=0.0)
model.compile(optimizer=adam, loss=huber_loss)

# Train
epochs = 10
train_gen = train_generator(sample_per_batch=2, batch_number=1690)
val_gen = valid_generator(sample_per_batch=50, batch_number=10)

checkpoints = ModelCheckpoint('weights/weights{epoch:03d}.h5', save_weights_only=True, period=1)
history = model.fit_generator(train_gen, steps_per_epoch=1690, epochs=epochs, verbose=1, shuffle=True,
                              validation_data=val_gen, validation_steps=10,
                              callbacks=[checkpoints], max_queue_size=100)

with open('history.txt', 'a+') as f:
    print(history.history, file=f)

print('All Done!')
            train_ = train(r)
            p.append(train_[1].tolist())
            q.append(train_[2].tolist())

            # a_t = []
            # for ii in train_[0]:
            #     for iii in ii:
            #         a_t.extend(iii)
            # a.append(a_t)

            a_t = []
            for ii in train_[0].tolist():
                a_t.extend(ii)
            a.append(a_t)
            an.append(train_[3].tolist())
        p = np.array(p)
        q = np.array(q)
        a = np.array(a)
        an = np.array(an)
        print(p.shape, q.shape, a.shape, an.shape)
        yield ([p, q, a], [a, an])


model = model()
model.fit_generator(dgen(),
                    steps_per_epoch=100,
                    epochs=30,
                    validation_data=dgen(),
                    validation_steps=20)
model.save('oporc_1.h5')
예제 #5
0
        preprocess_function=preprocess_input)
    reduce_lr_01 = ReduceLROnPlateau(monitor='val_1st_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0,
                                     mode='max')
    reduce_lr_02 = ReduceLROnPlateau(monitor='val_2nd_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0.,
                                     mode='max')
    reduce_lr_03 = ReduceLROnPlateau(monitor='val_3rd_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0,
                                     mode='max')
    reduce_lr_04 = ReduceLROnPlateau(monitor='val_4th_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0.,
                                     mode='max')
    for i in range(30):
        f = model.fit_generator(
            train_gen,
            steps_per_epoch=7210 / 32,
            epochs=1,
            validation_data=test_gen,
            validation_steps=1000 / 32,
            callbacks=[reduce_lr_01, reduce_lr_02, reduce_lr_03, reduce_lr_04])
        model.save_weights('weights/classifier.h5')
예제 #6
0
import dataset
from model import model


step_size_train = dataset.train_generator.n // dataset.train_generator.batch_size
epoch = 5

model.fit_generator(generator=dataset.train_generator, steps_per_epoch=step_size_train,epochs=epoch)

model.save('model.h5')
예제 #7
0
파일: train.py 프로젝트: aMrHunter/AI-poem
        # 调用了super初始化了父类的构造函数
        # 当需要继承父类构造函数中的内容,
        # 且子类需要在父类的基础上补充时,使用super().__init__()方法
        super().__init__()
        # 赋一个较大的初始值
        self.lowest = 1e10

    def on_epoch_end(self, epoch, logs=None):
        # 每个epoch训练完后调用 如果当前loss更低 就保存当前模型
        # 也可以设置logs['loss'] 小于一个值时候停止训练
        loss_value.append(logs['loss'])
        print(loss_value)
        if logs['loss'] <= self.lowest:
            self.lowest = logs['loss']
            model.save(BEST_MODEL_PATH)

        # 随机生成几首 看看效果
        # print()
        #   for i in range(5):
        #    print(write.generate_random(tokenizer, model))


# 创建数据集生成器
dataset = PDGenerator(poetry, random=True)

# 生成器与模型并行运行,以提高效率
model.fit_generator(dataset.for_fit(),
                    steps_per_epoch=dataset.steps,
                    epochs=TRAIN_EPOCHS,
                    callbacks=[Evaluate()])
예제 #8
0
파일: training.py 프로젝트: WxlSky/frostAV
import keras
from keras.callbacks import CSVLogger

import path
from model import model
import batch
import logger

path.run.make()
run = path.run.loadCurrent()

model.compile(loss='categorical_crossentropy',
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])

model.fit_generator(batch.trainIterator,
                    validation_data=batch.validationIterator,
                    steps_per_epoch=batch.trainIterator.n / batch.size,
                    epochs=8,
                    callbacks=[
                        keras.callbacks.CSVLogger(run.log,
                                                  separator=',',
                                                  append=False)
                    ])

model.save(run.model)
logger.addModelDiagram(run)
logger.addModelSummary(run)
logger.addAccuracyPlot(run)
csv_data_path = "../data/driving_log.csv"
output_model_file_path = '../results/models/model.h5'

simulator_data = DataLoadHelper(csv_data_path)
train_generator = simulator_data.load_train_data_from_generator()
validation_generator = simulator_data.load_validation_data_from_generator()

#Clear any previous keras sessions
K.clear_session()

#Create an object to the model, compile, fit and save it to .h5 file
model = model(input_size=(160, 320, 3))

model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator, samples_per_epoch=simulator_data.sample_training_size(), \
                    validation_data=validation_generator, nb_val_samples=simulator_data.sample_validation_size(),
                    nb_epoch=3, verbose=1)

model.save(output_model_file_path)

### print the keys contained in the history object
print(history_object.history.keys())

### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
예제 #10
0
        log_dir + "\\weights_epoch_{epoch:02d}-loss_{val_loss:.2f}.hdf5",
        monitor='val_loss',
        verbose=0,
        save_best_only=False,
        save_weights_only=False,
        mode='auto',
        period=1)
    # lr_sched = keras.callbacks.LearningRateScheduler(lambda epoch: 0.002* 0.75 ** (epoch-1) )
    cfm_callback = confusion_matrix_callback(train_generator,
                                             validation_generator)
    callbacks = [tensorboard_callback, reduce_lr, modelCP,
                 csv_logger]  # , lr_sched] #, cfm_callback ]
    # ------------------------------

    # saved_weights = r"C:\Users\User\PycharmProjects\PlantPathology\logs\fit\20200331-120037\weights_epoch_17-loss_1.00.hdf5"
    # saved_weights = r"C:\Users\User\PycharmProjects\PlantPathology\logs\fit\saved_weights\weights_epoch_34-loss_1.06.hdf5"
    if C.MODEL == 'VGG' and C.PRETRAINED_VGG:
        model.load_weights(C.PRETRAINED_VGG)

    model.fit_generator(
        train_generator,
        validation_data=validation_generator,
        epochs=500,
        callbacks=callbacks,
        verbose=True,
        # class_weight=get_class_weights()
        class_weight=1 / pd.Series(train_generator.classes).value_counts(),
        # initial_epoch = 20,
        # class_weight= 'auto',
    )
예제 #11
0
import numpy as np
import random
from dataloader import CaptchaSequence
import string
characters = string.digits + string.ascii_uppercase
data = CaptchaSequence(characters, batch_size=2, steps=1000)

from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import EarlyStopping, CSVLogger, ModelCheckpoint
from tensorflow.keras.optimizers import *

train_data = CaptchaSequence(characters, batch_size=128, steps=1000)
valid_data = CaptchaSequence(characters, batch_size=128, steps=100)
callbacks = [
    EarlyStopping(patience=3),
    CSVLogger('cnn.csv'),
    ModelCheckpoint('cnn_best.h5', save_best_only=True)
]

from model import model
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(1e-3, amsgrad=True),
              metrics=['accuracy'])
model.fit_generator(train_data,
                    epochs=100,
                    validation_data=valid_data,
                    workers=4,
                    use_multiprocessing=True,
                    callbacks=callbacks)
예제 #12
0
import tensorflow as tf
from dataset import PoetryDataGenerator, tokenizer, poetry
from model import model
import settings
import utils


class Evaluate(tf.keras.callbacks.Callback):
    """
    在每个epoch训练完成后,保留最有权重,并随机生成settings.SHOW_NUM首古诗展示
    """
    def __init__(self):
        super().__init__()
        self.lowest = 1e10

    def on_epoch_end(self, epoch, logs=None):
        if logs['loss'] <= self.lowest:
            self.lowest = logs['loss']
            model.save_weights(settings.BEST_MODEL_PATH)
        for i in range(settings.SHOW_NUM):
            print(utils.generate_random_poetry(tokenizer, model))


# 创建数据生成器
data_generator = PoetryDataGenerator(poetry, batch_size=settings.BATCH_SIZE)
# 开始训练
model.fit_generator(data_generator.forfit(),
                    steps_per_epoch=data_generator.steps,
                    epochs=settings.TRAIN_EPOCHS,
                    callbacks=[Evaluate()])
예제 #13
0
import pandas as pd
import numpy as np 
import cv2 as cv
import h5py


ROOT_DIR = os.path.abspath('')
dataset_dir = os.path.join(ROOT_DIR, 'datasets/Affectnet')
sys.path.append(dataset_dir)
import data_loading as data

model_dir = os.path.join(ROOT_DIR, 'models/DeXpression')
sys.path.append(model_dir)
from model import model

image_size = (128, 128)
batch_size = 300

early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)

train_datagen = image.ImageDataGenerator(rescale=1./255)
val_datagen = image.ImageDataGenerator(rescale=1./255)

dataset_imgs_dir = data.dataset_dir

train_generator = train_datagen.flow_from_dataframe(data.train_dataframe, directory=f'{dataset_imgs_dir}Manually_Annotated_Images', x_col='subDirectory_filePath', y_col='expression', target_size=image_size, batch_size=batch_size, drop_duplicates=False)
val_generator = val_datagen.flow_from_dataframe(data.val_dataframe, directory=f'{dataset_imgs_dir}Manually_Annotated_Images', x_col='subDirectory_filePath', y_col='expression', target_size=image_size, batch_size=batch_size)

model.fit_generator(train_generator, steps_per_epoch=414798/300, epochs=38, callbacks=[early_stopping], validation_data=val_generator, workers=12)

model.save('model_epoch_38_image_size_128_batch_300_1.h5')
예제 #14
0
BATCH_SIZE = 64
EPOCHS = 20

train_gen = data_gen(train_dir, BATCH_SIZE)
val_gen = data_gen(test_dir, BATCH_SIZE)
train_gen.build_data()
val_gen.build_data()

now = jdatetime.datetime.today()

model_folder_name = '%s-%s[%s-%s]__Model' % (now.month, now.day, now.hour,
                                             now.minute)
os.mkdir('data\\models\\%s' % model_folder_name)

tensorboard_callback = k.callbacks.TensorBoard(log_dir='data\\models\\%s' %
                                               (model_folder_name),
                                               histogram_freq=1)

adam = k.optimizers.Adam()
model.compile(loss='mean_squared_error', optimizer=adam)

history = model.fit_generator(
    generator=train_gen.next_batch(),
    steps_per_epoch=int(train_gen.n / train_gen.batch_size),
    epochs=EPOCHS,
    validation_data=val_gen.next_batch(),
    validation_steps=int(val_gen.n / val_gen.batch_size),
    verbose=1,
    callbacks=[tensorboard_callback])

model.save_weights('data\\models\\%s\\model.h5' % model_folder_name)
예제 #15
0
파일: training.py 프로젝트: WxlSky/frostAV
from keras.optimizers import SGD

import batch
from model import model

model.compile(loss='categorical_crossentropy',
              optimizer=SGD(lr=1e-3),
              metrics=['accuracy'])

model.fit_generator(batch.trainingBatchIterator,
                    steps_per_epoch=batch.sampleSize / batch.size,
                    epochs=2)

model.save('fine_tune.h5')
예제 #16
0
train_indexes = indexes[:int(len(indexes) * 0.8)]
valid_indexes = indexes[int(len(indexes) * 0.8):]

train_datagen = DataGen.create_set(train_dataset_info[train_indexes],
                                   BATCH_SIZE,
                                   SHAPE,
                                   augment=True)
valid_datagen = DataGen.create_set(train_dataset_info[valid_indexes],
                                   BATCH_SIZE,
                                   SHAPE,
                                   augment=False)

history = model.fit_generator(train_datagen,
                              validation_data=next(valid_datagen),
                              steps_per_epoch=STEPS_PER_EPOCH,
                              epochs=EPOCHS,
                              verbose=1,
                              callbacks=[checkpointer, tensorboard])

submit = pd.read_csv('./res/sample_submission.csv')

predicted = []
for name in tqdm(submit["Id"]):
    path = os.path.join('./res/test/', name)
    image = DataGen.load_image(path, SHAPE)
    score_predict = model.predict(image[np.newaxis])[0]
    label_predict = np.arange(28)[score_predict >= 0.5]
    str_predict_label = ' '.join(str(l) for l in label_predict)
    predicted.append(str_predict_label)

submit['Predicted'] = predicted
예제 #17
0
from model import model
from properties import *
from keras.callbacks import BaseLogger, ModelCheckpoint

from data_generator import midi_input_generator, generate_song_array, save_array_to_midi

print "Training model"

#print generate_song_array(model)
#save_array_to_midi([generate_song_array(model)], 'Generated_zero.mid')
cp = ModelCheckpoint(model_check_point_file,
                     monitor='loss',
                     verbose=1,
                     save_best_only=True,
                     mode='max')
model.fit_generator(midi_input_generator(),
                    num_seq_in_epoch,
                    nb_epoch=num_epochs,
                    callbacks=[BaseLogger(), cp])

model.save_weights('net_dump.nw')

save_array_to_midi([generate_song_array(model)], 'Generated.mid')
    md.train_dir,
    target_size = (150, 150), #--> 모든 이미지를 150x150 으로 변경
    batch_size = 10,
    class_mode = 'binary' #--> oprimizer 부분에서 binary crossentropy loss를 사용해서 이진 레이블이 필요하다.
)

validation_generator = train_datagen.flow_from_directory(
    md.validation_dir,
    target_size=(150, 150),
    batch_size=10,
    class_mode = 'binary'
)

history = model.fit_generator(
      train_generator,
      steps_per_epoch=200,
      epochs=30,
      validation_data=validation_generator,
      validation_steps=50)om_directory(
    md.train_dir,
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)


train_generator = train_datagen.flow_from_directory(
    md.train_dir,
    target_size = (150, 150), #--> 모든 이미지를 150x150 으로 변경
    batch_size = 10,
    class_mode = 'binary' #--> oprimizer 부분에서 binary crossentropy loss를 사용해서 이진 레이블이 필요하다.
)

validation_generator = train_datagen.flow_from_directory(
예제 #19
0
train_labels = to_categorical(train_labels, num_classes=10)

# Разделение обучающей выборки базы данных на обучение и валидацию
X_train, X_val, Y_train, Y_val = train_test_split(train_images,
                                                  train_labels,
                                                  test_size=0.1,
                                                  random_state=RANDOM_SEED)
# аугментация изображений
datagen = ImageDataGenerator(featurewise_center=False,
                             samplewise_center=False,
                             featurewise_std_normalization=False,
                             samplewise_std_normalization=False,
                             zca_whitening=False,
                             rotation_range=15,
                             zoom_range=0.2,
                             width_shift_range=0.15,
                             height_shift_range=0.15,
                             horizontal_flip=False,
                             vertical_flip=False)
datagen.fit(X_train)

# Обучение
history = model.fit_generator(datagen.flow(X_train,
                                           Y_train,
                                           batch_size=BATCH_SIZE),
                              epochs=EPOCHS,
                              validation_data=(X_val, Y_val),
                              verbose=1,
                              steps_per_epoch=X_train.shape[0] // BATCH_SIZE,
                              callbacks=checkpoints)
예제 #20
0
from datetime import datetime
from tensorflow import keras

logdir = "./folder" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)

from get_image_generator import train_generator, validation_generator, test_generator
from model import model

from meta_parameters import EPOCHS, STEPS_PER_EPOCH, VALIDATION_STEPS, BATCH_SIZE

model.fit_generator(
    train_generator,
    steps_per_epoch=STEPS_PER_EPOCH,
    epochs=EPOCHS,
    validation_data=validation_generator,
    validation_steps=VALIDATION_STEPS,
    use_multiprocessing=True,
    callbacks=[tensorboard_callback],
)

model.save_weights('my_model_weights.h5')
print("evaluate", model.metrics_names)
print(model.evaluate_generator(
    test_generator,
    use_multiprocessing=True,
))

model.count_params()
model.summary()
#TODO numbe of parameters
예제 #21
0
        color_mode = 'grayscale',
        target_size = (img_rows, img_cols),
        batch_size = batch_size,
        class_mode = 'categorical',
        shuffle = True
        )

validation_generator = validation_datagen.flow_from_directory(
        validation_data_dir,
        color_mode = 'grayscale',
        target_size = (img_rows, img_cols),
        batch_size = batch_size,
        class_mode = 'categorical',
        shuffle = True
        )

from model import model
#print(model.summary())

nb_train_samples = 28273
nb_validation_samples = 3534
epochs = 30

history = model.fit_generator(
    train_generator,
    steps_per_epoch = nb_train_samples // batch_size,
    epochs = epochs,
    callbacks = callbacks,
    validation_data = validation_generator,
    validation_steps = nb_validation_samples // batch_size)