Exemple #1
0
def generator(dataset_name, batch_size, classes):
    data_loader = DataManager(dataset_name)
    ground_truth_data = data_loader.get_data()
    images_path = data_loader.dataset_path
    train_keys, val_keys = split_data(ground_truth_data, 0)
    image_generator = ImageGenerator(ground_truth_data,
                                     batch_size, [224, 224, 3],
                                     train_keys,
                                     val_keys,
                                     classes,
                                     None,
                                     path_prefix=images_path,
                                     grayscale=False)
    train_generator = image_generator.flow(mode='train')
    train_num = len(train_keys) / batch_size
    return train_generator, train_num
#callbacks
log_base_path = base_path + dataset_name + '_emotion_training.log'
csv_logger = CSVLogger(log_base_path, append=False)
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss',
                              factor=0.1,
                              patience=int(patience / 4),
                              verbose=1)
trained_models_path = base_path + dataset_name + '_mini_XCEPTION'
model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names,
                                   'val_loss',
                                   verbose=1,
                                   save_best_only=True)
callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]

# loading dataset
data_loader = DataManager(dataset_name, image_size=input_shape[:2])
faces, emotions = data_loader.get_data()
faces = preprocess_input(faces)
num_samples, num_classes = emotions.shape
train_data, val_data = split_data(faces, emotions, validation_split)
train_faces, train_emotions = train_data
model.fit_generator(data_generator.flow(train_faces, train_emotions,
                                        batch_size),
                    steps_per_epoch=len(train_faces) / batch_size,
                    epochs=num_epochs,
                    verbose=1,
                    callbacks=callbacks,
                    validation_data=val_data)
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input

sys.path.append('../utils')
from data_augmentation_reid import ImageGenerator
from datasets import DataManager
from datasets import split_data

if __name__ == '__main__':
    dataset_name = 'market'
    batch_size = 32
    input_shape = (224, 224, 3)
    classes = 751
    data_loader = DataManager(dataset_name)
    images_path = data_loader.dataset_path
    ground_truth_data = data_loader.get_data()
    train_keys, val_keys = split_data(ground_truth_data, 0.2)
    #print ground_truth_data
    f = open('../result/market_ground_truth.txt', 'w')
    f.write(str(ground_truth_data))
    f.close()

    #np.savetxt('../result/market_ground_truth.txt',ground_truth_data)
    #print len(ground_truth_data)
    #print len(train_keys)
    #print train_keys[0]
    #print ground_truth_data[train_keys[0]]
    #print images_path
    #print ground_truth_data[train_keys[1]]
    #print ground_truth_data[train_keys[0]][0]
    #print ground_truth_data[train_keys[1]][0]