if input_shape[2] == 1: grayscale = True images_path = '../datasets/imdb_crop/' log_file_path = '../trained_models/gender_models/gender_training.log' trained_models_path = '../trained_models/gender_models/gender_mini_XCEPTION' # data loader data_loader = DataManager(dataset_name) ground_truth_data = data_loader.get_data() train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split) print('Number of training samples:', len(train_keys)) print('Number of validation samples:', len(val_keys)) image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2], train_keys, val_keys, None, path_prefix=images_path, vertical_flip_probability=0, grayscale=grayscale, do_random_crop=do_random_crop) # model parameters/compilation model = mini_XCEPTION(input_shape, num_classes) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # model callbacks early_stop = EarlyStopping('val_loss', patience=patience) reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience/2), verbose=1)
def main(): # parameters param = args() batch_size = param.batch_size num_epochs = param.num_epochs validation_split = param.val_ratio do_random_crop = False patience = param.patience dataset_name = param.dataset_name grayscale = param.graymode mode = param.mode anno_file = param.anno_file if mode == "gender": num_classes = 2 elif mode == "age": num_classes = 101 elif mode == "emotion": num_classes = 7 else: num_classes = 5 if grayscale: input_shape = (64, 64, 1) else: input_shape = (64, 64, 3) images_path = param.img_dir log_file_path = '../trained_models/%s_models/%s_model/raining.log' % ( mode, dataset_name) trained_models_path = '../trained_models/%s_models/%s_model/%s_mini_XCEPTION' % ( mode, dataset_name, mode) pretrained_model = param.load_model print("-------begin to load data------", input_shape) # data loader data_loader = DataManager(dataset_name, anno_file) ground_truth_data = data_loader.get_data() train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split) print('Number of training samples:', len(train_keys)) print('Number of validation samples:', len(val_keys)) train_image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2], train_keys, path_prefix=images_path, grayscale=grayscale) val_image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2], val_keys, path_prefix=images_path, grayscale=grayscale) # model parameters/compilation if pretrained_model != None: model = load_model(pretrained_model, compile=False) print("pretrained model:", model.input_shape) else: model = mini_XCEPTION(input_shape, num_classes) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # model callbacks early_stop = EarlyStopping('val_acc', patience=patience) reduce_lr = ReduceLROnPlateau('val_acc', factor=0.1, patience=int(patience), verbose=1, min_lr=0.0000001) csv_logger = CSVLogger(log_file_path, append=False) model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5' model_checkpoint = ModelCheckpoint(model_names, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False) callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr] # training model print("-----begin to train model----") model.fit_generator( train_image_generator.flow(), steps_per_epoch=int(np.ceil(len(train_keys) / batch_size)), epochs=num_epochs, verbose=1, callbacks=callbacks, validation_data=val_image_generator.flow(), validation_steps=int(np.ceil(len(val_keys) / batch_size)))
grayscale = True images_path = '../datasets/imdb_crop/' log_file_path = '../trained_models/gender_models/gender_training.log' trained_models_path = '../trained_models/gender_models/gender_mini_XCEPTION' # data loader data_loader = DataManager(dataset_name) ground_truth_data = data_loader.get_data() train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split) print('Number of training samples:', len(train_keys)) print('Number of validation samples:', len(val_keys)) image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2], train_keys, val_keys, None, path_prefix=images_path, vertical_flip_probability=0, grayscale=grayscale, do_random_crop=do_random_crop) # model parameters/compilation model = mini_XCEPTION(input_shape, num_classes) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # model callbacks early_stop = EarlyStopping('val_loss', patience=patience) reduce_lr = ReduceLROnPlateau('val_loss',
image_array = load_image(image_path, input_shape) plot_box_data(unregressed_positive_boxes, image_array, arg_to_class, colors=colors) plt.imshow(image_array) plt.show() # data augmentations # ------------------------------------------------------------------ data_manager = DataManager(dataset_name, 'train') train_data = data_manager.load_data() arg_to_class = data_manager.arg_to_class colors = get_colors(25) val_data = DataManager(dataset_name, 'val').load_data() # image_prefix = dataset_manager.images_path generator = ImageGenerator(train_data, val_data, prior_boxes, batch_size=21) # , path_prefix=image_prefix) generated_data = next(generator.flow('train')) transformed_image_batch = generated_data[0]['input_1'] generated_output = generated_data[1]['predictions'] for batch_arg, transformed_image in enumerate(transformed_image_batch): positive_mask = generated_output[batch_arg, :, 4] != 1 regressed_boxes = generated_output[batch_arg] unregressed_boxes = unregress_boxes(regressed_boxes, prior_boxes) unregressed_positive_boxes = unregressed_boxes[positive_mask] plot_box_data(unregressed_positive_boxes, transformed_image, arg_to_class, colors=colors) plt.imshow(transformed_image.astype('uint8')) plt.show()