def main(): # data generator data_generator = ImageDataGenerator( featurewise_center=False, featurewise_std_normalization=False, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, zoom_range=.1, horizontal_flip=True) model = CNN() opt = optimizers.Adam(lr=0.0001) # opt = optimizers.SGD(lr=0.001) model.compile(opt, loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # callbacks f = open(base_path + 'gender_classification_training.log', 'w') f.close() log_file_path = base_path + 'gender_classification_training.log' csv_logger = CSVLogger(log_file_path, append=False) early_stop = EarlyStopping('val_loss', patience=patience) reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience/4), verbose=1) trained_models = base_path + 'CNN.{epoch:02d}-{val_loss:.3f}-{val_acc:.2f}.hdf5' # model_cp = ModelCheckpoint(trained_models, 'val_acc', verbose=1, save_best_only=True) model_cp = ModelCheckpoint(trained_models, 'val_loss', verbose=1, save_best_only=True) callbacks = [model_cp, csv_logger, early_stop, reduce_lr] # load data faces, labels = load_data(data_path) print (len(faces)) print (len(labels)) faces = preprocess_input(faces) order = np.argsort(np.random.random(len(faces))) faces = faces[order] labels = labels[order] train_data, val_data = split_data(faces, labels, validation_split) train_faces, train_labels = train_data model.fit_generator(data_generator.flow(train_faces, train_labels, batch_size), steps_per_epoch=len(train_faces)/batch_size, epochs=num_epochs, verbose=1, callbacks=callbacks, validation_data=val_data)
def run(): (X_train, y_train), (X_test, y_test) = datasets.load_data(img_rows=32, img_cols=32) Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) model = CNN(input_shape=X_train.shape[1:], nb_classes=nb_classes) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) X_train = preprocess_input(X_train) X_test = preprocess_input(X_test) csv_logger = CSVLogger('../log/cnn.log') checkpointer = ModelCheckpoint(filepath="/tmp/weights.hdf5", monitor="val_acc", verbose=1, save_best_only=True) datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization= False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range= 0, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range= 0.1, # randomly shift images horizontally (fraction of total width) height_shift_range= 0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images datagen.fit(X_train) model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), samples_per_epoch=X_train.shape[0], nb_epoch=nb_epoch, validation_data=(X_test, Y_test), callbacks=[csv_logger, checkpointer])