示例#1
0
 def __init__(self, monitor="acc", patience=10, verbose=0, desired="high"):
     EarlyStopping.__init__(self, monitor=monitor, patience=patience, verbose=verbose)
     self.logs = {}
     if desired == "high":
         self.check_improved = lambda new, old: new > old
         self.best = -self.best
     else:
         self.check_improved = lambda new, old: new < old
示例#2
0
    p.add_argument('sigma', type=float)

    p.add_argument('--history')
    
    p.add_argument('--batch_size', default=4, type=int)
    p.add_argument('--epochs', default=64, type=int)
    
    p.add_argument('--cont', action='store_true')
    args = p.parse_args()
    
    train_generator, val_generator, train_batches, val_batches = \
            create_generators(args.data_directory, \
                              args.density_scale, args.sigma, args.batch_size)
    
    # Setup callbacks
    early_stopping = EarlyStopping(patience=5, verbose=2)
    model_checkpoint = ModelCheckpoint(args.model, save_best_only=True, verbose=2)
    callbacks = [early_stopping, model_checkpoint]
    if args.history is not None:
        csv_logger = CSVLogger(args.history, append=True)
        callbacks.append(csv_logger)
    
    # Load model
    custom_objects = dict(inspect.getmembers(losses, inspect.isfunction))
    model = models.load_model(args.model, custom_objects=custom_objects)
    model.summary()

    # Get score
    if args.cont:
        losses = model.evaluate_generator(val_generator, val_batches)
        val_loss_idx = model.metrics_names.index('loss')
示例#3
0
model.add(BatchNormalization())
model.add(Dropout(0.25))

model.add(Flatten())
#model.add(Dense(1024, activation = "relu"))
#model.add(Dropout(0.5))
model.add(Dense(512, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(2, activation = "softmax"))
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

# model callbacks
early_stop = EarlyStopping('val_loss', patience=patience)
ReduceLROnPlateau(monitor='val_acc', 
                                            patience=50, 
                                            verbose=1, 
                                            factor=0.5, 
                                            min_lr=0.00001)

model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names,
                                   monitor='val_loss',
                                   verbose=1,
                                   save_best_only=True,
                                   save_weights_only=False)
callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]

# training model
示例#4
0
model_checkpoint = ModelCheckpoint(
    filepath=
    'ssd7_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
    monitor='val_loss',
    verbose=1,
    save_best_only=True,
    save_weights_only=False,
    mode='auto',
    period=1)

csv_logger = CSVLogger(filename='ssd_training_log.csv',
                       separator=',',
                       append=True)

early_stopping = EarlyStopping(monitor='val_loss',
                               min_delta=0.0,
                               patience=10,
                               verbose=1)

reduc_learning_rate = ReduceLROnPlateau(monitor='val_loss',
                                        factor=0.2,
                                        patience=8,
                                        verbose=1,
                                        epsilon=0.001,
                                        cooldown=0,
                                        min_lr=0.00001)

callbacks = [model_checkpoint, csv_logger, early_stopping, reduc_learning_rate]

initial_epoch = 0
final_epoch = 20
steps_per_epoch = 1000
        train_dir,
        # All images will be resized to 150x150
        target_size=(150, 150),
        batch_size=20,
        # Since we use binary_crossentropy loss, we need binary labels
        class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
        validation_dir,
        target_size=(150, 150),
        batch_size=20,
        class_mode='categorical')
#Early stopping is required when system realizes that there is no improvement after ceratin epochs
from keras.callbacks import ModelCheckpoint, EarlyStopping

early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')   
#Save weights in model.h5
save_weights = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True)

history = model.fit_generator(train_generator, steps_per_epoch=2000//20, epochs=2, validation_data=validation_generator, 
    validation_steps=1000//20,
    callbacks=[early_stopping, save_weights])








        targets_train_a = np.array(a[t])

print(inputs_train.shape)
print(targets_train_v.shape)
print(targets_train_a.shape)
print(inputs_validation.shape)
print(targets_validation_v.shape)
print(targets_validation_a.shape)

cb_bestModel = ModelCheckpoint('model_checkpoints/model_best.h5',
                               monitor='val_loss',
                               mode='min',
                               verbose=1,
                               save_best_only=True)
cb_earlyStop = EarlyStopping(
    monitor='val_loss', mode='min', verbose=1, patience=5
)  # waiting for X consecutive epochs that don't reduce the val_loss
cb_learningRate = LearningRateScheduler(scheduler)

# datagen = ImageDataGenerator(
#     rotation_range=30,
#     width_shift_range=0.25,
#     height_shift_range=0.25,
#     horizontal_flip=True,
#     brightness_range=[0.5, 1.5],
#     zoom_range=0.3)

# inputs_train = np.array(inputs_train)
# datagen.fit(inputs_train[1])
# gen1 = datagen.flow(inputs_train, targets_train, batch_size=BATCH_SIZE)
# train_steps = len(gen1)
示例#7
0
for x in range(num_hidden_layers):
    model.add(Dense(512, activation=activationFN))
    #model.add(Dropout(0.2))

# Final output layer. Use softmax for probability.
model.add(Dense(num_classes, activation='softmax'))

model.summary()

# Compile the model with loss function and optimiser.
model.compile(loss=lossFN,
              optimizer=optimiserFN,
              metrics=['accuracy'])

#Early stopping of the training if loss increases too often.
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=15)
# Add below to only use the best weights (on keras 2.3+ which means no AMD support)
#, restore_best_weights=True)

#Fit model based on training and validation datasets.
history = model.fit(x_train, y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_split=1.0/12.0,
                    callbacks=[TimeHistory(), early_stopping])
                   # callbacks=[PlotLossesKeras()])
                   # callbacks=[early_stopping])

#Test the model based on testing dataset.
score = model.evaluate(x_test, y_test, verbose=0)
示例#8
0
model.add(Activation('softmax'))

print(model.summary())

from keras.optimizers import RMSprop, SGD, Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau

checkpoint = ModelCheckpoint('Emotion_little_vgg.h5',
                             monitor='val_loss',
                             mode='min',
                             save_best_only=True,
                             verbose=1)

earlystop = EarlyStopping(monitor='val_loss',
                          min_delta=0,
                          patience=3,
                          verbose=1,
                          restore_best_weights=True)

reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.2,
                              patience=3,
                              verbose=1,
                              min_delta=0.0001)

callbacks = [earlystop, checkpoint, reduce_lr]

model.compile(loss='categorical_crossentropy',
              optimizer=Adam(lr=0.001),
              metrics=['accuracy'])
示例#9
0
model.add(Dropout(0.2))
model.add(Dense(200, activation='linear'))
model.add(Dropout(0.2))
model.add(Dense(200, activation='linear'))
model.add(Dropout(0.2))
model.add(Dense(200, activation='linear'))
model.add(Dropout(0.2))
model.add(Dense(200, activation='linear'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='linear'))

model.summary()

# EarlyStopping & ModelCheckpoint(use x) & Tensorboard(use x)
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
es = EarlyStopping(monitor='val_loss', patience=50, mode='auto')

modelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath,
                     monitor='val_loss',
                     save_best_only=True,
                     mode='auto')

tb = TensorBoard(log_dir='graph',
                 histogram_freq=0,
                 write_graph=True,
                 write_images=True)

### 3. 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
model.fit(x_train,
示例#10
0
np.random.seed(113) #set seed before any keras import
model = Sequential()
model.add(Dense(200, input_shape=(vocab_size,), kernel_initializer='he_uniform',  kernel_constraint=maxnorm(5)))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(1))
model.add(Activation('sigmoid'))
optimizer =Adadelta(lr=0.22)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
print(model.summary())

model.fit(X_train_nhot, y_train,validation_split=0.1 , epochs=15, verbose=1, batch_size=128)
loss, accuracy = model.evaluate(X_test_nhot,y_test)
EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=2,
                              verbose=0, mode='auto')
print("Accuracy: ", accuracy *100)




feed_forward = model.predict_classes(X_test_nhot, verbose=1)



t = Tokenizer()
t.fit_on_texts(X_train)
vocab_size = len(t.word_index) + 1
encoded_docs = t.texts_to_sequences(X_train)
encoded_doc = t.texts_to_sequences(X_test)
示例#11
0
y = np.zeros((yards.shape[0], 199))
for idx, target in enumerate(list(yards)):
    y[idx][99 + target] = 1

# Use train/test index to split target variable
train_inds, val_inds = X_train.index, X_val.index
y_train, y_val = y[train_inds], y[val_inds]

X_train = [np.absolute(X_train[i]) for i in cat] + [X_train[num]]
X_val = [np.absolute(X_val[i]) for i in cat] + [X_val[num]]
model = model_NN()
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=[])

es = EarlyStopping(monitor='val_CRPS',
                   mode='min',
                   restore_best_weights=True,
                   verbose=2,
                   patience=5)
es.set_model(model)

# cb = keras.callbacks.ReduceLROnPlateau(
#         monitor='val_CRPS',
#         factor=0.01,
#         patience=5
#     )

metric = Metric(model, [es], [(X_train, y_train), (X_val, y_val)])

# for i in range(1):
#     model.fit(X_train, y_train, verbose=False)
# for i in range(1):
示例#12
0
                                                 shuffle=False,
                                                 class_mode='categorical')

test_set = test_datagen.flow_from_directory('../dataset/test',
                                            target_size=(64, 64),
                                            color_mode='rgb',
                                            batch_size=32,
                                            shuffle=False,
                                            class_mode='categorical')

# inicializar e otimizar modelo
print("[INFO] Inicializando e otimizando a CNN...")
start = time.time()

early_stopping_monitor = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       verbose=1,
                                       patience=15)

model = Convolucao.build(64, 64, 3, CLASS)
model.compile(optimizer=SGD(0.01),
              loss="categorical_crossentropy",
              metrics=["accuracy"])

# treinar a CNN
print("[INFO] Treinando a CNN...")
classifier = model.fit_generator(
    training_set,
    steps_per_epoch=(training_set.n // training_set.batch_size),
    epochs=EPOCHS,
    validation_data=test_set,
    validation_steps=(test_set.n // test_set.batch_size),
def predict(epochs=20,batch_size=256,**kwargs):
    
    seed(1)
    mymodel = build_model_att(**kwargs)
    
    class TEST_MSE(Callback):
        def __init__(self):
            self.x_test = x_test
            self.y_test = y_test
    
        def on_train_begin(self, logs={}):
            self.mse = []

        def on_epoch_end(self, batch, logs={}):
            def _mse(y_pred,y_test):
                return np.mean((y_pred-y_test)**2)
            epoch_end_predict = mymodel.predict(self.x_test)
            self.mse.append(_mse(epoch_end_predict[:,0],self.y_test))
    
    #回调函数
    mse_hist = TEST_MSE()
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
                              patience=5, min_lr=1e-5)
    #restore_best_weights=True--返回监控指标最好的模型
    early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=2, 
                               restore_best_weights=True)
    
    history = mymodel.fit(x_train, y_train,epochs=epochs, batch_size=batch_size,
                          validation_data = (x_val,y_val),verbose=True,
                          callbacks=[mse_hist,reduce_lr,early_stopping])
    y_pred = mymodel.predict(x_test)
    
    #计算模型在测试集上的mse
    print('mse:%.4f' % _mse(y_pred[:,0],y_test))
    
    #画出训练集和验证集上的loss变化情况
    plt.figure()
    plt.plot(history.history['loss'],label='train_loss')
    plt.plot(history.history['val_loss'],label='val_loss')
    plt.plot(mse_hist.mse,label = 'test_loss')
    
    #分别画出val loss和test loss的最小值点
    def _highlight(x,y,color,loc,upx=0.8,upy=1.4,downx=0.8,downy=0.6):
        # 显示坐标点
        plt.scatter(x,y,s=20,marker='x')
        # 显示坐标点横线、竖线
        plt.vlines(x, 0, y, colors=color, linestyles="dashed", linewidth=1)
        plt.hlines(y, 0, x, colors=color, linestyles="dashed", linewidth=1)
        # 显示坐标点坐标值
        if loc == 'up':
            plt.annotate('(%d,%.4f)'%(x,y), xy=(x,y), xytext=(x*upx,y*upy), 
                         arrowprops=dict(facecolor='black', shrink=0.05, width=0.2, headwidth=0.5, headlength=0.5))
        if loc == 'down':
            plt.annotate('(%d,%.4f)'%(x,y), xy=(x,y), xytext=(x*downx,y*downy), 
                         arrowprops=dict(facecolor='black', shrink=0.05, width=0.2, headwidth=0.5, headlength=0.5))
        
    val_min_x = np.argmin(history.history['val_loss'])
    val_min_y = history.history['val_loss'][val_min_x]
    _highlight(val_min_x,val_min_y,color='black',loc='down')
    _highlight(val_min_x,mse_hist.mse[val_min_x],color='black',loc='up')
    
    test_min_x = np.argmin(mse_hist.mse)
    test_min_y = mse_hist.mse[test_min_x]
    _highlight(test_min_x,test_min_y,color='black',loc='up',upy=1.2)
    
    plt.grid()
    plt.legend()
    name = ['%s_%s' % (i.replace('_','-'),j) for i,j in kwargs.items()]
    name = '_'.join(name[:(len(name)//3)]) + '\n' + '_'.join(name[(len(name)//3):])
    name = name.replace('.','')
    plt.title(name)
    plt.savefig('result/fig/'+name,dpi=800)
    
    return y_pred
def main():
    #hyperparameters:
    num_types = 4
    n_epochs = 100
    start_frame = 10
    jump_frame = 2
    class_samples = {0: 6000, 1: 6000, 2: 5000, 3: 2000}
    es_patience = 20
    dropout = 0.5
    n_splits = 5

    X = np.load('/sonidata/flatworms/dchaike1_scratch/dchaike1/new_shape_stack_X.npy')
    y = np.load('/sonidata/flatworms/dchaike1_scratch/dchaike1/new_shape_stack_y.npy')

    print("hello")

    #filtering some of the classes from the dataset
    X, y = filter_classes(X, y)

    X = np.expand_dims(X, -1)
    print('Original dataset shape %s' % Counter(y))
    print(X.shape)
    print(y.shape)

    X, y = shuffle(X, y)
    matrices = np.zeros((n_splits, num_types, num_types))
    total_test_pred = None
    total_test_ans = None
    #implementing cross-validation:
    skf = StratifiedKFold(n_splits = n_splits)
    i_fold = 0
    for rest_ind, test_ind in skf.split(X, y):
        X_test, y_test = X[test_ind], y[test_ind]
        X_rest, y_rest = X[rest_ind], y[rest_ind]
        #X_train, y_train = X_rest, y_rest

        val_skf = StratifiedKFold(n_splits = n_splits - 1)
        for train_ind, val_ind in skf.split(X_rest, y_rest):
            X_val, y_val = X_rest[val_ind], y_rest[val_ind]
            X_train, y_train = X_rest[train_ind], y_rest[train_ind]
            break
        print('split trainset shape %s' % Counter(y_train))
        print(X_train.shape)
        print(y_train.shape)
        #converting the labels to one-hot-vector
        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)
        y_val = to_categorical(y_val)

        X_train, y_train = extract_frames(X_train, y_train, start_frame, jump_frame)

        print('extracted trainset shape %s' % Counter(y_train.argmax(axis=-1)))
        print(X_train.shape)
        print(y_train.shape)
        X_val, y_val = extract_frames(X_val, y_val, num_types, jump_frame)

        #starting from start_frame in the test set
        X_new_test = np.zeros((X_test.shape[0], X_test.shape[1]-start_frame, X_test.shape[2], X_test.shape[3], X_test.shape[4]))
        y_new_test = np.ones((X_test.shape[0], num_types))
        for i, sequence in enumerate(X_test):
            X_new_test[i] = sequence[start_frame:]
            y_new_test[i] = y_test[i]
        X_test, y_test = X_new_test, y_new_test

        #solving class imbalance in the training set
        print('balanced trainset shape %s' % Counter(y_train.argmax(axis=-1)))
        print(X_train.shape)

        base_model = keras.applications.xception.Xception(weights="imagenet", include_top=False)
        avg = keras.layers.GlobalAveragePooling2D()(base_model.output)
        output = keras.layers.Dense(num_types, activation="softmax")(avg)
        model = keras.Model(inputs=base_model.input, outputs=output)
        model.compile(optimizer= "Nadam", loss = focal_loss(),
                           metrics=['accuracy', metrics.recall, metrics.precision])

        X_train = np.concatenate([X_train, X_train, X_train], axis=-1)
        X_val = np.concatenate([X_val, X_val, X_val], axis=-1)
        history = model.fit(X_train, y_train,
                            validation_data=[X_val, y_val],
                            epochs=n_epochs,
                            callbacks=[EarlyStopping(patience=es_patience)])

        X_train = None
        X_val = None
        X_test = np.concatenate([X_test, X_test, X_test], axis=-1)

        print("fold " + str(i_fold))

        y_test = [np.argmax(truth) for truth in y_test]
        test(model, X_test, y_test)
        test_pred = probability_predict(model, X_test)
        test_ans = y_test

        if(i_fold == 0):
            total_test_pred = test_pred
            total_test_ans = test_ans
        else:
            total_test_pred = np.concatenate([total_test_pred, test_pred], axis = 0)
            total_test_ans = np.concatenate([total_test_ans, test_ans], axis = 0)

        test_pred = None
        test_ans = None

        # summarize history for accuracy
        plt.plot(history.history['acc'])
        plt.plot(history.history['val_acc'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()

        # summarize history for loss
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()

        save_command = input("Save this model? (yes/no)")
        if save_command == 'yes':
            model_name = input("Name your model: ")
            model.save(model_name + str(i_fold) + ".h5")
            print("The model is saved!")

        i_fold += 1

    print("Overall Performance using Probabilities Method")
    analyze(total_test_pred, total_test_ans)
    if len(sys.argv) != 2:
        print('Usage: python train_flip_augmented.py <augmented_array_index_no>')
        sys.exit(1)

    index = int(sys.argv[1])

    image_file = 'augmented_images_{}.npy'.format(index)
    mask_file = 'augmented_masks_{}.npy'.format(index)

    print('Index: {}, Image File: {}, Mask File: {}'.format(index, image_file, mask_file))
    
    image_array = np.load(os.path.join('train/generated/flipped_images', image_file))
    mask_array = np.load(os.path.join('train/generated/flipped_masks', mask_file))

    image_shape = image_array.shape

    indices = np.arange(0, image_shape[0])
    np.random.shuffle(indices)

    image_array = image_array[indices]
    mask_array = mask_array[indices]

    unet_model = load_model('model-tgs-salt-dropout-2.h5', custom_objects={'mean_iou': mean_iou})

    earlystopper = EarlyStopping(patience=5, verbose=1)
    checkpointer = ModelCheckpoint('model-tgs-flip-augmented-2.h5', verbose=1, save_best_only=True)
    results = unet_model.fit(image_array, mask_array, validation_split=0.3, batch_size=64, epochs=70,
                        callbacks=[checkpointer])

示例#16
0
    def __init__(self):
        # Variables to hold the description of the experiment
        self.config_description = "This is the template config file."

        # System dependent variable
        self._workers = 5
        self._multiprocessing = True

        # Variables for comet.ml
        self._project_name = "jpeg_deep"
        self._workspace = "ssd"

        # Network variables
        self._weights = "/dlocal/home/2017018/bdegue01/weights/jpeg_deep/classification_dct/resnet_deconv/classification_dct_jpeg-deep_GYftBmXMdjdxoMksyI3e9VqB5IriBC9T/checkpoints/epoch-87_loss-0.7459_val_loss-1.5599.h5"
        self._network = SSD300_resnet(
            backbone="deconv_rfa", dct=True, image_shape=(38, 38))

        # Training variables
        self._epochs = 240
        self._batch_size = 32
        self._steps_per_epoch = 1000

        self.optimizer_parameters = {
            "lr": 0.001, "momentum": 0.9}
        self._optimizer = SGD(**self.optimizer_parameters)
        self._loss = SSDLoss(neg_pos_ratio=3, alpha=1.0).compute_loss
        self._metrics = None

        dataset_path = environ["DATASET_PATH"]
        images_2007_path = join(dataset_path, "VOC2007/JPEGImages")
        self.train_sets = [(images_2007_path, join(
            dataset_path, "VOC2007/ImageSets/Main/train.txt"))]
        self.validation_sets = [(images_2007_path, join(
            dataset_path, "VOC2007/ImageSets/Main/val.txt"))]
        self.test_sets = [(images_2007_path, join(
            dataset_path, "VOC2007/ImageSets/Main/test.txt"))]

        # Keras stuff
        self.model_checkpoint = None
        self.reduce_lr_on_plateau = ReduceLROnPlateau(patience=5, verbose=1)
        self.terminate_on_nan = TerminateOnNaN()
        self.early_stopping = EarlyStopping(monitor='val_loss',
                                            min_delta=0,
                                            patience=15)

        self._callbacks = [self.reduce_lr_on_plateau, self.early_stopping,
                           self.terminate_on_nan]

        self.input_encoder = SSDInputEncoder()

        self.train_transformations = [SSDDataAugmentation()]
        self.validation_transformations = [
            ConvertTo3Channels(), Resize(height=300, width=300)]
        self.test_transformations = [ConvertTo3Channels(), Resize(
            height=300, width=300)]

        self._train_generator = None
        self._validation_generator = None
        self._test_generator = None

        self._horovod = None
        self._displayer = DisplayerObjects()
示例#17
0
def get_callbacks(path):
    early_stop = EarlyStopping('val_loss', patience=5, mode="min")
    model_ckpt = ModelCheckpoint(path, save_best_only=True)
    return [early_stop, model_ckpt]
test_generator = test_datagen.flow_from_directory( test_data_dir, 
                   target_size =(img_width, img_height), shuffle=False,
          batch_size = 1, class_mode ='categorical') 


# confirm the iterator works
#batchX, batchy = train_generator.next()
#print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))
#batchX, batchy = validation_generator.next()
#print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))
#batchX, batchy = test_generator.next()
#print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))

# simple early stopping
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=30)
mc = ModelCheckpoint('best_model.h5', monitor='val_acc', mode='max', verbose=1, save_best_only=True)
  
model.fit_generator(train_generator, 
    steps_per_epoch = train_generator.samples // batch_size, callbacks=[es, mc], 
    epochs = epochs, validation_data = validation_generator, 
    validation_steps = validation_generator.samples // batch_size)
"""
    epochs = epochs, validation_data = validation_generator, 
    validation_steps = nb_validation_samples // batch_size) 
"""

# load the saved model
saved_model = load_model('best_model.h5')

# make a prediction
示例#19
0
    def _kappa_disambiguation(self, X, output):
        '''
        :param X:
        :param output:
        '''
        self.metric_plot = None
        self.patience = 20  #for cbEarly is enoughfrom observation  @todo in init

        if self.metrics[0] == "accuracy":
            self.metric_plot = "acc"
            raise "min_delta must be redefined according to val_acc"
            if self.use_smooth_cb:
                raise 'not available for acc self.use_smooth_cb'
            if self.cbEarly == "metric":
                self.cbEarly = EarlyStopping(
                    monitor='val_acc' if self.validation else "acc",
                    min_delta=0.0001,
                    patience=self.patience,
                    verbose=0,
                    mode='auto')
            self.kappa_logger = None

        elif self.metrics[0] == 'cohen_kappa':
            self.metrics = None  # 'cohen_kappa_metric' cannot be supported @see explication in Cohen_kappa_logger
            self.metric_plot = 'cohen_kappa'
            if self.cbEarly == "metric":
                if self.validation:
                    monitor = "val_cohen_kappa_smoothed" if self.smooth_cb else "val_cohen_kappa"
                else:
                    if not self.smooth_cb:
                        monitor = "cohen_kappa"
                    else:
                        raise "No cohen_kappa_smoothed"
                print("monitor", monitor)
                self.cbEarly = EarlyStopping(
                    monitor=monitor if self.validation else "cohen_kappa",
                    min_delta=0.00000001,
                    patience=self.patience,  # a large patience is necessary!
                    verbose=0,
                    mode='max',
                    restore_best_weights=True)

            if type(self.validation) is float:
                X, X_val, output, y_val = train_test_split(
                    X, output, test_size=self.validation)
            elif type(self.validation) is tuple:
                assert self.validation[0].shape[1] == X.shape[
                    1], "X_validation must be transformed with prep first"
                X_val = self.validation[0]
                y_val = self.__category_to_output(self.validation[1])
            elif not self.validation is None:
                raise "unknown validation type"

            #             self.validation = None # can slightly reduce computation but need val_loss for callback LRReduceOnPlateau

            self.kappa_logger = Cohen_kappa_logger(
                output_to_category=self.__output_to_category,
                X_train=X,
                y_train=output,
                X_val=X_val,
                y_val=y_val,
                kappa_weights=self.kappa_weights)

        else:
            print(self.metrics[0])
            raise "not implemented"
        return X, output
示例#20
0
    plot_model(model, 'semiArcFork.png', show_shapes=True, rankdir='TB')
    model.summary()
    cpr = os.path.join(config.expRoot, 'checkPoint')

    if not os.path.exists(cpr):
        os.makedirs(cpr)
    mcp = ModelCheckpoint(
        os.path.join(cpr,
                     r'AugClassifyFork_{epoch:03d}-{val_loss:.6f}-{val_dense_5_loss:.6f}-{val_conv3d_2_loss:.6f}-{dense_5_recall:.6f}-{dense_5_precision:.6f}.hdf5'),
        'val_loss', period=2)
    logP = os.path.join(config.expRoot, 'log')
    if not os.path.exists(logP):
        os.makedirs(logP)
    logger = CSVLogger(os.path.join(logP, 'log .txt'))
    lrReduce = ReduceLROnPlateau(factor=config.lrReduceRate, patience=config.lrReducePatience, verbose=1)
    estp = EarlyStopping(patience=config.estpPatient, verbose=1, min_delta=config.estpDelta, )
    model.fit_generator(semiDatagene(mode='train', batchSize=batchSize), steps_per_epoch=int(np.ceil(3020 / batchSize)),
                        epochs=config.epochs,
                        callbacks=[logger, mcp, lrReduce, estp, ],
                        validation_data=semiDatagene(mode='test', batchSize=batchSize),
                        validation_steps=int(np.ceil(4809 / batchSize)))

    # model.fit_generator(semiDatagene(mode='train', batchSize=batchSize), steps_per_epoch=10,
    #                     epochs=config.epochs,
    #                     callbacks=[logger, mcp, lrReduce, estp, ],
    #                     validation_data=semiDatagene(mode='test', batchSize=batchSize),
    #                     validation_steps=10)

    visualLoss(os.path.join(logP, 'log.txt'))
示例#21
0
文件: tcn.py 项目: zhupeiru/dts
def main(_run):
    ################################
    # Load Experiment's paramaters #
    ################################
    params = vars(args)
    logger.info(params)

    ################################
    #         Load Dataset         #
    ################################
    dataset_name = params['dataset']
    if dataset_name == 'gefcom':
        dataset = gefcom2014
    else:
        dataset = uci_single_households

    data = dataset.load_data(fill_nan='median',
                             preprocessing=True,
                             split_type='simple',
                             is_train=params['train'],
                             detrend=params['detrend'],
                             exogenous_vars=params['exogenous'],
                             use_prebuilt=True)
    scaler, train, test, trend = data['scaler'], data['train'], data['test'], data['trend']
    if not params['detrend']:
        trend = None

    X_train, y_train = get_rnn_inputs(train,
                                      window_size=params['input_sequence_length'],
                                      horizon=params['output_sequence_length'],
                                      shuffle=True,
                                      multivariate_output=params['exogenous'])

    ################################
    #     Build & Train Model      #
    ################################

    tcn = TCNModel(layers=params['layers'],
                   filters=params['out_channels'],
                   kernel_size=params['kernel_size'],
                   kernel_initializer='glorot_normal',
                   kernel_regularizer=l2(params['l2_reg']),
                   bias_regularizer=l2(params['l2_reg']),
                   dilation_rate=params['dilation'],
                   use_bias=False,
                   return_sequence=True,
                   tcn_type=params['tcn_type'])

    if params['exogenous']:
        exog_var_train = y_train[:, :, 1:]  # [n_samples, horizon, n_features]
        y_train = y_train[:, :, 0]          # [n_samples, horizon]
        conditions_shape = (exog_var_train.shape[1], exog_var_train.shape[-1])

        X_test, y_test = get_rnn_inputs(test,
                                        window_size=params['input_sequence_length'],
                                        horizon=params['output_sequence_length'],
                                        shuffle=False,
                                        multivariate_output=True)
        exog_var_test = y_test[:, :, 1:]  # [n_samples, horizon, n_features]
        y_test = y_test[:, :, 0]          # [n_samples, horizon]
    else:
        X_test, y_test = get_rnn_inputs(test,
                                        window_size=params['input_sequence_length'],
                                        horizon=params['output_sequence_length'],
                                        shuffle=False)
        exog_var_train = None
        exog_var_test = None
        conditions_shape = None

    # IMPORTANT: Remember to pass the trend values through the same ops as the inputs values
    if params['detrend']:
        X_trend_test, y_trend_test = get_rnn_inputs(trend[1],
                                                    window_size=params['input_sequence_length'],
                                                    horizon=params['output_sequence_length'],
                                                    shuffle=False)
        trend = y_trend_test

    model = tcn.build_model(input_shape=(X_train.shape[1], X_train.shape[-1]),
                            horizon=params['output_sequence_length'],
                            conditions_shape=conditions_shape,
                            use_final_dense=True)

    if params['load'] is not None:
        logger.info("Loading model's weights from disk using {}".format(params['load']))
        model.load_weights(params['load'])

    optimizer = Adam(params['learning_rate'])
    model.compile(optimizer=optimizer, loss=['mse'], metrics=metrics)
    callbacks = [EarlyStopping(patience=50, monitor='val_loss')]

    if params['exogenous'] and params['tcn_type'] == 'conditional_tcn':
        history = model.fit([X_train, exog_var_train], y_train,
                            validation_split=0.1,
                            batch_size=params['batch_size'],
                            epochs=params['epochs'],
                            callbacks=callbacks,
                            verbose=2)
    else:
        history = model.fit(X_train, y_train,
                            validation_split=0.1,
                            batch_size=params['batch_size'],
                            epochs=params['epochs'],
                            callbacks=callbacks,
                            verbose=2)

    ################################
    #          Save weights        #
    ################################
    model_filepath = os.path.join(
        config['weights'],'{}_{}_{}'
            .format(params['tcn_type'], params['dataset'], time.time()))
    model.save_weights(model_filepath)
    logger.info("Model's weights saved at {}".format(model_filepath))

    #################################
    # Evaluate on Validation & Test #
    #################################
    fn_inverse_val = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=None)
    fn_inverse_test = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=trend)
    fn_plot = lambda x: plot(x, dataset.SAMPLES_PER_DAY, save_at=None)

    if params['exogenous'] and params['tcn_type'] == 'conditional_tcn':
        val_scores = tcn.evaluate(history.validation_data[:-1], fn_inverse=fn_inverse_val)
        test_scores = tcn.evaluate([[X_test, exog_var_test], y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot)
    else:
        val_scores = tcn.evaluate(history.validation_data[:-1], fn_inverse=fn_inverse_val)
        test_scores = tcn.evaluate([X_test, y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot)

    metrics_names = [m.__name__ if not isinstance(m, str) else m for m in model.metrics]
    return dict(zip(metrics_names, val_scores)), \
           dict(zip(metrics_names, test_scores)), \
           model_filepath
for layer in base_model.layers:
    layer.trainable = False

#this is the model we will train
model = Model(inputs=base_model.inputs, outputs=pred)

checkpoint = ModelCheckpoint("resnet50_1.h5",
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             save_weights_only=False,
                             mode='auto',
                             period=1)
early = EarlyStopping(monitor='val_acc',
                      min_delta=0,
                      patience=10,
                      verbose=1,
                      mode='auto')

#freeze all lower level layers
for layer in base_model.layers:
    layer.trainable = False

model.compile(optimizer='sgd',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

img_rows, img_cols = 256, 256  # Resolution of inputs
channel = 3
batch_size = 16
nb_epoch = 12
def main():
    args = firstPassCommandLine()
    trainF = args.pruneInpTrain
    validF = args.pruneInpValid
    weightF = args.pruneWeight
    validWeightF = args.pruneValidWeight
    testF = args.pruneInpTest
    modelF = args.pruneF
    nepoch = args.nepoch
    batchSize = args.batchSize

    # Load sample weights
    weights = load_weight(weightF)
    valid_weights = load_weight(validWeightF)

    # Load data in the svm rank file format
    trainfeats, trainlabels = load_svmlight(trainF)
    validfeats, validlabels = load_svmlight(validF)
    testfeats_exists = False

    #weights = weights[:len(trainlabels)] valid_weights = valid_weights[:len(validlabels)]

    if testF != None:
        testfeats, testlabels = load_svmlight(testF)
        testfeats_exists = True
    else:
        testfeats = None
        testlabels = None

    # convert to dense array
    trainfeats = np.array(trainfeats.todense())
    validfeats = np.array(validfeats.todense())
    if testfeats_exists:
        testfeats = np.array(testfeats.todense())

    # concatenate weights to data points for oversampling
    trainfeats_weights = np.hstack(
        (trainfeats, np.reshape(weights, (len(weights), 1))))
    validfeats_weights = np.hstack(
        (validfeats, np.reshape(valid_weights, (len(valid_weights), 1))))

    # Apply the random over-sampling
    ros = RandomOverSampler()
    trainfeats_weights_ros, trainlabels_ros = ros.fit_sample(
        trainfeats_weights, trainlabels)
    validfeats_weights_ros, validlabels_ros = ros.fit_sample(
        validfeats_weights, validlabels)
    trainfeats_ros = trainfeats_weights_ros[:, :-1]
    trainweights_ros = trainfeats_weights_ros[:, -1]
    validfeats_ros = validfeats_weights_ros[:, :-1]
    validweights_ros = validfeats_weights_ros[:, -1]

    if testfeats_exists:
        testfeats_ros, testlabels_ros = ros.fit_sample(testfeats, testlabels)

    # Reset labels (not needed anymore since for neural nets, we use labels of 0 and 1 in trj files)
    #trainlabels[np.where(trainlabels==-1)[0]] = 0
    #validlabels[np.where(validlabels==-1)[0]] = 0
    #if testfeats != None:
    #  testlabels[np.where(testlabels==-1)[0]] = 0

    # Create model
    INPUT_DIM = trainfeats.shape[1]
    pruneModel = getPruneModel(INPUT_DIM)
    pruneModel.summary()

    ## Train model.
    early_stopping = EarlyStopping(monitor='val_loss', patience=20)
    history = pruneModel.fit(trainfeats_ros, trainlabels_ros,
                             batch_size=batchSize, \
                             sample_weight=trainweights_ros,
                             epochs=nepoch, verbose=1,
                             validation_data=(validfeats_ros, validlabels_ros, validweights_ros),
                             callbacks=[early_stopping],
                             shuffle=True)
    # Evaluate on training
    train_preds = pruneModel.predict_classes(trainfeats)
    train_eval_ = pruneModel.evaluate(trainfeats, trainlabels, verbose=1)
    print("[Loss, accuracy] = " + str(train_eval_))
    print("Precision = " + str(precision_score(trainlabels, train_preds)))
    print("Recall = " + str(recall_score(trainlabels, train_preds)))

    train_preds = pruneModel.predict_classes(trainfeats)
    train_eval_ = pruneModel.evaluate(trainfeats, trainlabels, verbose=1)
    print("[Loss, accuracy] (randomly oversampled) = " + str(train_eval_))
    print("Precision (randomly oversampled) = " +
          str(precision_score(trainlabels, train_preds)))
    print("Recall (randomly oversampled) = " +
          str(recall_score(trainlabels, train_preds)))

    # Evaluate on test
    eval_ = pruneModel.evaluate(validfeats, validlabels, verbose=1)

    if testfeats_exists:
        eval_ = pruneModel.evaluate(testfeats, testlabels, verbose=1)
        print("[Test loss, test accuracy] = " + str(eval_))
        eval_ = pruneModel.evaluate(testfeats, testlabels, verbose=1)
        print("[Test loss, test accuracy] (randomly oversampled) = " +
              str(eval_))

    # Save mode
    print('Creating: ' + modelF)
    export_model(pruneModel, modelF)
    pruneModel.save(modelF[:-3] + "_keras.h5")
    del pruneModel
    be.clear_session()
    return
示例#24
0
	_, sPath = os.path.splitdrive(sOutPath)
	sPath, sFilename = os.path.split(sPath)
	sFilename, sExt = os.path.splitext(sFilename)
	model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_lr_' + str(
		learningRate) + '_bs_' + str(batchSize)
	weight_name = model_name + '_weights.h5'
	model_json = model_name + '_json'
	model_all = model_name + '_model.h5'
	model_mat = model_name + '.mat'

	if (os.path.isfile(model_mat)):  # no training if output file exists
		return

    cnn = createModel(patchSize, 1)
    opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss', patience=20, verbose=1), ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]
    #callbacks = [ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]

    cnn.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
    cnn.summary()
    result = cnn.fit(X_train,
	                 y_train,
	                 validation_data=[X_test, y_test],
	                 epochs=iEpochs,
	                 batch_size=batchSize,
	                 callbacks=callbacks,
	                 verbose=1)

    score_test, acc_test = cnn.evaluate(X_test, y_test, batch_size=batchSize )

    prob_test = cnn.predict(X_test, batchSize, 0)
            mod.add(GRU(j, return_sequences=True))
            mod.add(GRU(j))
        if i == 4:
            mod.add(GRU(j, return_sequences=True))
            mod.add(GRU(j, return_sequences=True))
            mod.add(GRU(j))
        if i == 5:
            mod.add(GRU(j, return_sequences=True))
            mod.add(GRU(j, return_sequences=True))
            mod.add(GRU(j, return_sequences=True))
            mod.add(GRU(j))

        mod.add(Dense(1))

        # compile
        mod.compile(optimizer='adam', loss='mean_squared_error')

        # fit
        mod_fitted = mod.fit(
            X,
            y,
            validation_split=0.4,
            shuffle=True,
            batch_size=256,
            epochs=max_epochs,
            callbacks=[EarlyStopping(patience=stopping_patience)])

        validation_history.append(mod_fitted.history['val_loss'])

        # reset graph
        K.clear_session()
    try:
        with open(csv_file, 'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(csv_cols)
            for i in range(len(image_ids)):
                writer.writerow([image_ids[i]] + output[i])
    except IOError:
        print("I/O error")


from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
checkpointer = ModelCheckpoint(filepath='./checkpoints/vanilla_model.h5',
                               verbose=1,
                               save_best_only=True)
tb = TensorBoard(log_dir='./logs')
early_stopper = EarlyStopping(patience=3)

if test_flag & os.path.exists('./checkpoints/vanilla_model.h5'):
    model = architecture(models.load_model('./checkpoints/vanilla_model.h5'))
    X_test = []
    image_ids = []
    temp_x_test_paths = []
    """Sorting the image paths"""
    X_test_paths = glob.glob(os.path.join(test_dir, '*.jpg'))
    for path in X_test_paths:
        temp = re.split('/|-', path)
        temp[-1] = (
            temp[-1].split('.')[0].zfill(4)) + '.' + temp[-1].split('.')[1]
        path = os.path.join(temp[0], temp[1], temp[2], temp[3] + '-' + temp[4])
        temp_x_test_paths.append(path)
    temp_x_test_paths = sorted(temp_x_test_paths)
示例#27
0
nData = min(df['sig'].shape[0], df['bkg'].shape[0])

df['sig'] = df['sig'].sample(frac=1)  #[:nData]
df['bkg'] = df['bkg'].sample(frac=1)  #[:nData]

# add isSignal variable
df['sig']['isSignal'] = np.ones(len(df['sig']))
df['bkg']['isSignal'] = np.zeros(len(df['bkg']))

df_all = pd.concat([df['sig'],
                    df['bkg']]).sample(frac=1).reset_index(drop=True)
dataset = df_all.values
X_data = dataset[:, 0:input_dim]
Y_data = dataset[:, input_dim]

early_stopping = EarlyStopping(monitor='val_loss', patience=100)

model_checkpoint = ModelCheckpoint('dense_model.h5',
                                   monitor='val_loss',
                                   verbose=0,
                                   save_best_only=True,
                                   save_weights_only=False,
                                   mode='auto',
                                   period=1)

begt = time.time()
print("Begin Bayesian optimization")
best_auc = 0.0
best_config = {}
res_gp = gp_minimize(objective,
                     space,
示例#28
0
#Definição de variável armazenando o número de colunas
numero_colunas = rede.shape[1]

#Adicionando as camadas
model.add(Dense(10, activation='relu', input_shape=(numero_colunas, )))
model.add(Dense(20, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.add(Dense(3))

#Compilação do modelo e definição da optimização e função de perda
model.compile(optimizer='adam', loss='mean_squared_error')

#Interrompe as interações quando a rede deixa de se aprimorar
from keras.callbacks import EarlyStopping

early_stopping_monitor = EarlyStopping(patience=3)

#Treino do modelo
teste = model.fit(rede_treino,
                  saida_treino,
                  validation_data=(rede_teste, saida_teste),
                  epochs=150,
                  callbacks=[early_stopping_monitor])

import matplotlib.pyplot as plt

fig, graf_perda = plt.subplots()

graf_perda.plot(teste.history['loss'],
                'r',
                marker='.',
示例#29
0
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

model = Sequential()
model.add(Dense(
    25,
    input_dim=784,
))
model.add(Dense(10))
model.add(Dense(7))
model.add(Dense(5))
model.add(Dense(10))
model.add(Dense(10))
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='loss', patience=20)

model.fit(x_train,
          y_train,
          validation_split=0.2,
          epochs=100,
          batch_size=1,
          verbose=1,
          callbacks=[early_stopping])

acc = model.evaluate(x_test, y_test)

print(acc)
示例#30
0
# 只需要修改K.square(z_mean)为K.square(z_mean - yh),也就是让隐变量向类内均值看齐
kl_loss = -0.5 * K.sum(
    1 + z_plus_log_var - K.square(z_plus_mean - yh) - K.exp(z_plus_log_var),
    axis=-1)

#cos相似度的loss,保證類別向量散度
vae_loss = K.mean(xent_loss + kl_loss)

# add_loss是新增的方法,用于更灵活地添加各种loss
vae.add_loss(vae_loss)
vae.compile(optimizer=keras.optimizers.RMSprop(1e-2))
vae.summary()

history = LossHistory()
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0)

learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss',
                                            patience=10,
                                            verbose=1,
                                            factor=0.5,
                                            min_lr=0.0001)

vae.fit(
    [x_train, y_train],
    shuffle=True,
    epochs=epochs,
    batch_size=batch_size,
    validation_data=([x_test, y_test], None),
    #validation_split=(0.2),
    callbacks=[history, learning_rate_reduction, early_stopping])
示例#31
0
model.add(Dropout(0.5))

#Output layer.
model.add(Dense(NUM_CLASSES, activation="softmax"))

#Compile model.
model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])

print("\n\nTraining....\n\n")

#Declare callbacks.
filepath = "./models/model-{epoch:02d}-{val_acc:.2f}.h5"
callbacks = [
    EarlyStopping(monitor="val_loss", patience=0),
    ModelCheckpoint(filepath=filepath,
                    monitor="val_loss",
                    save_best_only=False,
                    mode="auto",
                    period=1)
]

#Fit model and store statistics.
stats = model.fit(np.array(X_train),
                  Y_train,
                  epochs=EPOCHS,
                  batch_size=BATCH_SIZE,
                  validation_data=(X_test, Y_test),
                  shuffle=True,
                  verbose=1,
示例#32
0
def train(model, annotation_path, input_shape, anchors, num_classes, log_dir):
    '''
    训练网络
    :param model:网络
    :param annotation_path:标记文件目录
    :param input_shape:输入张量大小
    :param anchors:  shape 为 (N, 2),值为聚类中心长宽的数组
    :param num_classes: 标签个数
    :param log_dir: 保存路径
    :return: 无
    '''
    # 当评价指标不再提升时,减少学习率
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    # 当监测值不再改善时,该回调函数将中止训练
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)
    # 训练验证集9:1分类
    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.shuffle(lines)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val
    print('Train on {} samples, validate on {} samples, with batch size {}.'.
          format(num_train, num_val, batch_size))

    history = model.fit_generator(
        generator=data_generator(lines[:num_train], batch_size, input_shape,
                                 anchors, num_classes),
        steps_per_epoch=max(1, num_train // batch_size),
        # callbacks=[logging, checkpoint, reduce_lr, early_stopping],
        validation_data=data_generator(lines[num_train:], batch_size,
                                       input_shape, anchors, num_classes),
        validation_steps=max(1, num_val // batch_size),
        epochs=iterations)
    # 绘制训练 & 验证的准确率值
    # print(history.history.keys())
    # plt.plot(history.history['acc'])
    # plt.plot(history.history['val_acc'])
    # plt.title('Model accuracy')
    # plt.ylabel('Accuracy')
    # plt.xlabel('Epoch')
    # plt.legend(['Train', 'Test'], loc='upper left')
    # plt.show()

    # 绘制训练 & 验证的损失值
    # plt.plot(history.history['loss'])
    # plt.plot(history.history['val_loss'])
    # plt.title('Model loss')
    # plt.ylabel('Loss')
    # plt.xlabel('Epoch')
    # plt.legend(['Train', 'Test'], loc='upper left')
    # plt.show()

    # plot_model(model, to_file='model.png')

    model.save_weights(log_dir)

    print('model has been trained!\n')