save_arch(model, arch_path) # モデルを保存しておく # トレーニングの準備 checkpoint_collback = ModelCheckpoint(filepath=weights_path, monitor='val_loss', save_best_only=True, mode='auto') change_lr = LearningRateScheduler(lambda epoch: float(learning_rates[epoch])) flip_gen = FlippedImageDataGenerator() sgd = SGD(lr=lr, momentum=momentum, nesterov=nesterov) model.compile(loss=loss_method, optimizer=sgd) # トレーニング start_time = time.time() print('start_time: %s' % (datetime.now())) hist = model.fit_generator(flip_gen.flow(X_train, y_train), samples_per_epoch=X_train.shape[0], nb_epoch=nb_epoch, validation_data=(X_val, y_val), callbacks=[checkpoint_collback, change_lr]) print('end_time: %s, duracion(min): %d' % (datetime.now(), int(time.time() - start_time) / 60)) # プロットしてファイルとして保存する # plot_hist(hist, model_name) # plot_model_arch(model, model_name) save_history(hist, model_name)
save_arch(model, arch_path) # モデルを保存しておく # トレーニングの準備 sgd = SGD(lr=start, momentum=momentum, nesterov=nesterov) model.compile(loss=loss_method, optimizer=sgd) #plot(model, to_file="model_{}.png".format(cols[0]), show_shapes=True) flipgen = FlippedImageDataGenerator() flipgen.flip_indices = setting['flip_indices'] early_stop = EarlyStopping(patience=patience) learning_rates = np.linspace(start, stop, nb_epoch) change_lr = LearningRateScheduler( lambda epoch: float(learning_rates[epoch])) weights_path = 'model/' + model_name + '-' + setting[ 'id'] + '-weights-' + str(nb_epoch) + '.hdf5' checkpoint_collback = ModelCheckpoint(filepath=weights_path, monitor='val_loss', save_best_only=True, mode='auto') print("Training model for columns {} for {} epochs".format(cols, nb_epoch)) # トレーニング実施 hist = model.fit_generator( flipgen.flow(X_train, y_train), samples_per_epoch=X_train.shape[0], nb_epoch=nb_epoch, validation_data=(X_val, y_val), callbacks=[checkpoint_collback, change_lr, early_stop]) save_history(hist, model_name + '-' + setting['id'])