num_epochs = 1000 validation_split = .2 do_random_crop = False patience = 100 num_classes = 2 dataset_name = 'imdb' input_shape = (64, 64, 1) if input_shape[2] == 1: grayscale = True images_path = '../datasets/imdb_crop/' log_file_path = '../trained_models/gender_models/gender_training.log' trained_models_path = '../trained_models/gender_models/gender_mini_XCEPTION' # data loader data_loader = DataManager(dataset_name) ground_truth_data = data_loader.get_data() train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split) print('Number of training samples:', len(train_keys)) print('Number of validation samples:', len(val_keys)) image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2], train_keys, val_keys, None, path_prefix=images_path, vertical_flip_probability=0, grayscale=grayscale, do_random_crop=do_random_crop) # model parameters/compilation
# callback log_file_path = base_path + dataset_name + '_emotion_mobilenetv3_training.log' csv_logger = CSVLogger(log_file_path, append=False) early_stop = EarlyStopping('val_loss', patience=patience) reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1) trained_models_path = base_path + dataset_name + '_mobilenet_v3' model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5' model_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1, save_best_only=True) callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr] # loading dataset data_loader = DataManager(dataset_name, image_size=input_shape[:2]) train_faces, train_emotions, test_faces, test_emotions = data_loader.get_data( ) train_faces = preprocess_input(train_faces) test_faces = preprocess_input(test_faces) num_samples, num_classes = train_emotions.shape model.fit_generator(data_generator.flow(train_faces, train_emotions, batch_size), steps_per_epoch=len(train_faces) / batch_size, epochs=num_epochs, verbose=1, callbacks=callbacks, validation_data=(test_faces, test_emotions))
# callbacks log_file_path = base_path + dataset_name + '_emotion_training.log' csv_logger = CSVLogger(log_file_path, append=False) early_stop = EarlyStopping('val_loss', patience=patience) reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1) trained_models_path = base_path + dataset_name + '_simple_CNN' model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5' model_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1, save_best_only=True) callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr] # loading dataset data_loader = DataManager(dataset_name, image_size=input_shape[:2]) faces, emotions = data_loader.get_data() faces = preprocess_input(faces) num_samples, num_classes = emotions.shape train_data, val_data = split_data(faces, emotions, validation_split) train_faces, train_emotions = train_data model.fit_generator(data_generator.flow(train_faces, train_emotions, batch_size), steps_per_epoch=len(train_faces) / (batch_size), epochs=num_epochs, verbose=1, callbacks=callbacks, validation_data=val_data)
datasets = ['fer2013'] for dataset_name in datasets: print('Training dataset:', dataset_name) # callbacks log_file_path = base_path + dataset_name + '_emotion_training.log' csv_logger = CSVLogger(log_file_path, append=False) early_stop = EarlyStopping('val_loss', patience=patience) reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience/4), verbose=1) trained_models_path = base_path + dataset_name + '_mini_XCEPTION' model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5' model_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1, save_best_only=True) callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr] # loading dataset data_loader = DataManager(dataset_name, image_size=input_shape[:2]) faces, emotions = data_loader.get_data() faces = preprocess_input(faces) num_samples, num_classes = emotions.shape train_data, val_data = split_data(faces, emotions, validation_split) train_faces, train_emotions = train_data model.fit_generator(data_generator.flow(train_faces, train_emotions, batch_size), steps_per_epoch=len(train_faces) / batch_size, epochs=num_epochs, verbose=1, callbacks=callbacks, validation_data=val_data)
def train_emotion_classifier(): # 参数设置 batch_size = 32 # 批量训练数据大小 epochs = 10000 # 训练轮数 input_shape = (64, 64, 1) # 图片矩阵 validation_split = .2 # 验证集大小 num_classes = 7 # 类数 patience = 50 # 信心值,用于后面的EarlyStopping等,在信心值个epochs过去后模型性能不再提升,就执行指定动作 base_path = '../datasets/trained_models/emotion_models/' # 数据生成 data_generator = ImageDataGenerator( featurewise_center=False, # 使输入数据集去中心化(均值为0), 按feature执行 featurewise_std_normalization=False, # 将输入除以数据集的标准差以完成标准化, 按feature执行 rotation_range=10, # 数据提升时图片随机转动的角度 width_shift_range=0.1, # 图片宽度的某个比例,数据提升时图片水平偏移的幅度 height_shift_range=0.1, # 图片高度的某个比例,数据提升时图片竖直偏移的幅度 zoom_range= .1, # 浮点数或形如[lower,upper]的列表,随机缩放的幅度,若为浮点数,则相当于[lower,upper] = [1 - zoom_range, 1+zoom_range] horizontal_flip=True, # 进行随机水平翻转 ) # 模型参数/编译 model = mini_XCEPTION(input_shape, num_classes) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() datasets = ['fer2013'] for dataset_name in datasets: print('Training dataset:', dataset_name) # 回调 log_file_path = base_path + dataset_name + '_emotion_training.log' # 构造log文件名 csv_logger = CSVLogger(log_file_path, append=False) # 将epochs训练结果保存为csv early_stop = EarlyStopping( 'val_loss', patience=patience) # 当patience个epoch过去而模型性能不提升时,停止训练 reduce_lr = ReduceLROnPlateau( # 当评价指标不在提升时,减少学习率.当学习停滞时,减少2倍或10倍的学习率常常能获得较好的效果。 monitor='val_loss', factor=0.1, # 监视val_loss,每次减少学习率的因子为0.1 patience=int(patience / 4), verbose= 1, # 该回调函数检测指标的情况,如果在patience个epoch中看不到模型性能提升,则减少学习率, verbose信息展示模式=1 ) trained_models_path = base_path + dataset_name + '_mini_XCEPTION' model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5' model_checkpoint = ModelCheckpoint( model_names, monitor='val_loss', verbose=1, save_best_only=True, # 只保存性能最好的模型 ) callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr] # 加载数据集 data_loader = DataManager(dataset_name, image_size=input_shape[:2]) faces, emotions = data_loader.get_data() faces = preprocess_input(faces) num_samples, num_classes = emotions.shape train_data, val_data = split_data(faces, emotions, validation_split) train_faces, train_emotions = train_data model.fit_generator(data_generator.flow(train_faces, train_emotions, batch_size), steps_per_epoch=len(train_faces) / batch_size, epochs=epochs, verbose=1, callbacks=callbacks, validation_data=val_data)
image_prefix = root_prefix + 'JPEGImages/' image_shape = (300, 300, 3) dataset_name = 'VOC2007' weights_path = '../trained_models/weights_SSD300.hdf5' trained_models_path = '../trained_models/model_checkpoints/' trained_models_filename = (trained_models_path + 'ssd300_weights.{epoch:03d}-{val_loss:.3f}.hdf5') frozen_layers = [ 'input_1', 'conv1_1', 'conv1_2', 'pool1', 'conv2_1', 'conv2_2', 'pool2', 'conv3_1', 'conv3_2', 'conv3_3', 'pool3' ] box_scale_factors = [.1, .1, .2, .2] # loading and splitting data data_manager = DataManager(dataset_name) ground_truth_data = data_manager.get_data() train_keys, validation_keys = split_data(ground_truth_data, training_ratio=.8) # instantiating model model = SSD300(image_shape, num_classes, weights_path, frozen_layers) multibox_loss = MultiboxLoss(num_classes, neg_pos_ratio=2.0).compute_loss model.compile(optimizer, loss=multibox_loss, metrics=['acc']) # setting parameters for data augmentation generator prior_boxes = create_prior_boxes(model) image_generator = ImageGenerator(ground_truth_data, prior_boxes, num_classes, box_scale_factors, batch_size, image_shape[0:2],
def train_gender_classifier(): # 参数设置 batch_size = 32 # 批量训练数据大小 epochs = 10000 # 训练轮数 input_shape = (64, 64, 1) # 图片矩阵 validation_split = .2 # 验证集大小 num_classes = 2 # 类数,男和女 patience = 100 # 信心值,用于后面的EarlyStopping等,在信心值个epochs过去后模型性能不再提升,就执行指定动作 dataset_name = 'imdb' # 数据集名称 do_random_crop = False # 设为灰度图 if input_shape[2] == 1: grayscale = True # 设置路径 images_path = '../datasets/imdb_crop/' log_file_path = '../datasets/trained_models/gender_models/gender_training.log' trained_models_path = '../datasets/trained_models/gender_models/gender_mini_XCEPTION' # 加载数据 data_loader = DataManager(dataset_name) ground_truth_data = data_loader.get_data() train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split) print('Number of training samples:', len(train_keys)) print('Number of validation samples:', len(val_keys)) image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2], train_keys, val_keys, None, path_prefix=images_path, vertical_flip_probability=0, grayscale=grayscale, do_random_crop=do_random_crop) # 模型参数 model = mini_XCEPTION(input_shape, num_classes) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # 模型回调 early_stop = EarlyStopping(monitor='val_loss', patience=patience) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=int(patience / 2), verbose=1) csv_logger = CSVLogger(log_file_path, append=False) model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5' model_checkpoint = ModelCheckpoint(model_names, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False) callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr] # 训练模型 model.fit_generator(image_generator.flow(mode='train'), steps_per_epoch=int(len(train_keys) / batch_size), epochs=epochs, verbose=1, callbacks=callbacks, validation_data=image_generator.flow('val'), validation_steps=int(len(val_keys) / batch_size))
input_shape = (48, 48, 1) validation_split = 0.1 model = load_model( 'D:\\workplace\\fighting\\trained_models\\71.5\\fer2013_mini_XCEPTION.84-0.715.hdf5' ) datasets = ['fer2013'] # for循环实现回调,加载数据集 for dataset_name in datasets: print('Training dataset:', dataset_name) # 遍历循环,在数据dataset中逐个取值存入变量dataset_name中,然后运行循环体 data_loader = DataManager( dataset_name, image_size=input_shape[:2] ) # 自定义DataManager函数实现根据数据集name进行加载,dataloder返回一个迭代器 对输入张量进行切片操作 faces, emotions = data_loader.get_data( ) # 自定义get_data函数根据不同数据集name得到各自的ground truth data #faces = cv2.resize(faces.astype('uint8'), input_shape, cv2.COLOR_GRAY2BGR) ###!!!! faces = preprocess_input( faces) # 自定义preprocess_input函数:处理输入的数据,先转为float32类型然后/ 255.0 num_samples, num_classes = emotions.shape # shape函数读取矩阵的长度 train_data, val_data = split_data( faces, emotions, validation_split) # 自定义split_data对数据整理各取所得train_data、 val_data train_faces, train_emotions = train_data val_x, val_y = val_data score = model.evaluate(val_x, val_y, verbose=0) print('Test Loss:', score[0]) print('Test accuracy:', score[1]) test_true = np.argmax(val_y, axis=1) test_pred = np.argmax(model.predict(val_x), axis=1) print(test_pred)
num_epochs = 1000 validation_split = .2 do_random_crop = False patience = 100 num_classes = 2 dataset_name = 'imdb' input_shape = (64, 64, 1) if input_shape[2] == 1: grayscale = True images_path = '../datasets/imdb_crop/' log_file_path = '../trained_models/gender_models/gender_training.log' trained_models_path = '../trained_models/gender_models/gender_mini_XCEPTION' # data loader data_loader = DataManager(dataset_name) ground_truth_data = data_loader.get_data() train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split) print('Number of training samples:', len(train_keys)) print('Number of validation samples:', len(val_keys)) image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2], train_keys, val_keys, None, path_prefix=images_path, vertical_flip_probability=0, grayscale=grayscale, do_random_crop=do_random_crop) # model parameters/compilation model = mini_XCEPTION(input_shape, num_classes) model.compile(optimizer='adam', loss='categorical_crossentropy',
def main(): # parameters param = args() batch_size = param.batch_size num_epochs = param.num_epochs validation_split = param.val_ratio do_random_crop = False patience = param.patience dataset_name = param.dataset_name grayscale = param.graymode mode = param.mode anno_file = param.anno_file if mode == "gender": num_classes = 2 elif mode == "age": num_classes = 101 elif mode == "emotion": num_classes = 7 else: num_classes = 5 if grayscale: input_shape = (64, 64, 1) else: input_shape = (64, 64, 3) images_path = param.img_dir log_file_path = '../trained_models/%s_models/%s_model/raining.log' % ( mode, dataset_name) trained_models_path = '../trained_models/%s_models/%s_model/%s_mini_XCEPTION' % ( mode, dataset_name, mode) pretrained_model = param.load_model print("-------begin to load data------", input_shape) # data loader data_loader = DataManager(dataset_name, anno_file) ground_truth_data = data_loader.get_data() train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split) print('Number of training samples:', len(train_keys)) print('Number of validation samples:', len(val_keys)) train_image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2], train_keys, path_prefix=images_path, grayscale=grayscale) val_image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2], val_keys, path_prefix=images_path, grayscale=grayscale) # model parameters/compilation if pretrained_model != None: model = load_model(pretrained_model, compile=False) print("pretrained model:", model.input_shape) else: model = mini_XCEPTION(input_shape, num_classes) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # model callbacks early_stop = EarlyStopping('val_acc', patience=patience) reduce_lr = ReduceLROnPlateau('val_acc', factor=0.1, patience=int(patience), verbose=1, min_lr=0.0000001) csv_logger = CSVLogger(log_file_path, append=False) model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5' model_checkpoint = ModelCheckpoint(model_names, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False) callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr] # training model print("-----begin to train model----") model.fit_generator( train_image_generator.flow(), steps_per_epoch=int(np.ceil(len(train_keys) / batch_size)), epochs=num_epochs, verbose=1, callbacks=callbacks, validation_data=val_image_generator.flow(), validation_steps=int(np.ceil(len(val_keys) / batch_size)))