def create_model():
    model = WideResNet(image_size, depth=depth, k=k)()

    # Load weights
    weight_file = os.path.join("pretrained_models", "weights.18-4.06.hdf5")
    model.load_weights(weight_file, by_name=True)

    # # set the first 50 layers
    # # to non-trainable (weights will not be updated)
    # print(len(model.layers))
    # if freeze_layers > 0 :
    #     for layer in model.layers[:freeze_layers]:
    #         layer.trainable = False

    # Compile model
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss=["binary_crossentropy"],
                  metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    return model
def main():
    args = get_args()
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    opt_name = args.opt
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    output_path.mkdir(parents=True, exist_ok=True)

    logging.debug("Loading data...")

    train_gen = ImageGenerator(
        "/home/dodat/Documents/python-projects/age-gender-estimation/data/imdb_crop",
        "imdb",
        image_size=64,
        batch_size=batch_size)
    val_gen = ImageGenerator(
        "/home/dodat/Documents/python-projects/age-gender-estimation/data/wiki_crop",
        "wiki",
        image_size=64,
        batch_size=batch_size)
    print(train_gen.len())
    print(val_gen.len())
    print(train_gen.img_path())
    model = WideResNet(64, depth=16, k=8)()
    opt = get_optimizer(opt_name, lr)
    model.compile(
        optimizer=opt,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(str(output_path) +
                        "/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")

    hist = model.fit_generator(generator=train_gen,
                               epochs=nb_epochs,
                               validation_data=val_gen,
                               verbose=1,
                               callbacks=callbacks)

    logging.debug("Saving history...")
    pd.DataFrame(hist.history).to_hdf(
        output_path.joinpath("history_{}_{}.h5".format(16, 8)), "history")
Beispiel #3
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    model = WideResNet(image_size, depth=depth, k=k)()
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")
    hist = model.fit(X_data, [y_data_g, y_data_a],
                     batch_size=batch_size,
                     epochs=nb_epochs,
                     callbacks=callbacks,
                     validation_split=validation_split)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    use_augmentation = args.aug
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    output_path.mkdir(parents=True, exist_ok=True)

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    model = WideResNet(image_size, depth=depth, k=k)()
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint(str(output_path) +
                        "/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    if use_augmentation:
        datagen = ImageDataGenerator(width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     horizontal_flip=True,
                                     preprocessing_function=get_random_eraser(
                                         v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, [y_train_g, y_train_a],
                                            batch_size=batch_size,
                                            alpha=0.2,
                                            datagen=datagen)()
        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test,
                                                    [y_test_g, y_test_a]),
                                   epochs=nb_epochs,
                                   verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, [y_train_g, y_train_a],
                         batch_size=batch_size,
                         epochs=nb_epochs,
                         callbacks=callbacks,
                         validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving history...")
    pd.DataFrame(hist.history).to_hdf(
        output_path.joinpath("history_{}_{}.h5".format(depth, k)), "history")
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    freeze_layers = args.freeze_layers
    depth = args.depth
    k = args.width
    validation_split = args.validation_split

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    #Load weights
    weight_file = os.path.join("pretrained_models", "weights.18-4.06.hdf5")

    model = WideResNet(image_size, depth=depth, k=k)()
    model.load_weights(weight_file)

    # set the first 50 layers
    # to non-trainable (weights will not be updated)
    print(len(model.layers))
    if freeze_layers > 0:
        for layer in model.layers[:freeze_layers]:
            layer.trainable = False

    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")
    # print('length of X', len(X_data))
    # print('length of y_data_g', y_data_g)
    # print('length of y_data_a', len(y_data_a))
    hist = model.fit(X_data, [y_data_g, y_data_a],
                     batch_size=batch_size,
                     epochs=nb_epochs,
                     callbacks=callbacks,
                     validation_split=validation_split)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
Beispiel #6
0
def main():
    logging.debug("Reading Configuration...")
    args = get_args()    
    Config = configparser.ConfigParser()
    Config.read(args.config_path)
    def ConfigSectionMap(section):
        dict1 = {}
        options = Config.options(section)
        for option in options:
            try:
                dict1[option] = Config.get(section, option)
                if dict1[option] == -1:
                    DebugPrint("skip: %s" % option)
            except:
                print("exception on %s!" % option)
                dict1[option] = None
        return dict1

    #Loading Fixed parameters
    input_path          = ConfigSectionMap("Fixed")['input_path']
    batch_size          = int(ConfigSectionMap("Fixed")['batch_size'])
    nb_epochs           = int(ConfigSectionMap("Fixed")['nb_epochs'])
    validation_split    = float(ConfigSectionMap("Fixed")['validation_split'])
    dim                 = int(ConfigSectionMap("Fixed")['dimension'])
    use_augmentation    = bool(ConfigSectionMap("Fixed")['use_augmentation'])
    metrics_type        = ConfigSectionMap("Fixed")['metrics']
    history             = ConfigSectionMap("Fixed")['history_save_path']
    checkpoints         = ConfigSectionMap("Fixed")['checkpoint_save_path']
    logs_dir            = ConfigSectionMap("Fixed")['log_save_path']

    #Loading parameters that vary over different configurations
    config_number       = args.config_number
    distribution        = ConfigSectionMap(config_number)['distribution']
    feature_extractor   = ConfigSectionMap(config_number)['feature_extractor']
    sigma               = float(ConfigSectionMap(config_number)['sigma'])
    optimizer_type      = ConfigSectionMap(config_number)['optimizer']
    loss_type           = ConfigSectionMap(config_number)['loss']


    logging.debug("Loading data...")
    image, _, age, _, image_size, _ = load_data(input_path)
    X_data = image

    #Alter age according to distribution type
    if distribution == "GaussBins":    
        age = age[:,np.newaxis]
        lin_y = np.linspace(0,100,dim)[:,np.newaxis]
        y_data_a = (1/(sigma*np.sqrt(2*np.pi)))*np.exp(-np.square((age-lin_y.T)/(np.sqrt(2)*sigma)))
    elif distribution == "Cls":
        y_data_a = np_utils.to_categorical(age, dim)
    print(y_data_a.shape)

    data_num = len(X_data)
    #Randomly shuffle data
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_a = y_data_a[indexes]
    #Split in test train set
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    #Choose network
    if feature_extractor == "WideResNet":
        model = WideResNet(image_size, depth=16, k=8)()
    
    #Choose optimizer
    if optimizer_type == "sgd":
        optimizer = SGD(lr=0.1, momentum=0.9, nesterov=True)

    #Choose loss
    if loss_type == "kullback_leibler_divergence":
        loss = "kullback_leibler_divergence"
    elif loss_type == "Wasserstein":
        loss = Wasserstein  
    elif loss_type == "wass1":
        loss = Wass1  
    elif loss_type == "loss1":
        loss = custom_loss1      
    elif loss_type == "loss2":
        loss = custom_loss2     
    elif loss_type == "loss3":
        loss = "mean_squared_error"                        
    elif loss_type == "loss4":
        loss = "mean_absolute_error"                        
    elif loss_type == "categorical_crossentropy":
        loss = "categorical_crossentropy"

    #Choose metric
    if metrics_type == "mean_absolute_error":
        metric = mean_absolute_error

    #Final compilation
    model.compile(optimizer=optimizer, loss=[loss], metrics=[metric])

    logging.debug("Model summary...")
    model.count_params()
    # model.summary()

    #Callbacks
    json_log = open(os.path.join(logs_dir,"{}_{}_{:.5}_{}_{}.log".format(distribution,feature_extractor,loss_type,optimizer_type,sigma)),
                    mode='wt',
                    buffering=1)
    logging_callback = LambdaCallback(
        on_train_begin=lambda logs: json_log.write(
            json.dumps({'distribution': distribution, 'feature_extractor': feature_extractor,
                         'loss_type': loss_type, 'optimizer_type': optimizer_type, 'sigma': sigma}) + '\n'),
        on_epoch_end=lambda epoch, logs: json_log.write(
            json.dumps({'epoch': epoch, 'val_mean_absolute_error': logs['val_mean_absolute_error'], 'val_loss': logs['val_loss'], 'mean_absolute_error': logs['mean_absolute_error'], 'loss': logs['loss']}) + '\n'),
        on_train_end=lambda logs: json_log.close()
    )
    callbacks = [LearningRateScheduler(schedule=Schedule(nb_epochs)),
                 ModelCheckpoint(os.path.join(checkpoints,"weights.{}_{}_{:.5}_{}_{}.hdf5".format(distribution,feature_extractor,loss_type,optimizer_type,sigma)),
                                 monitor="val_mean_absolute_error",
                                 verbose=1,
                                 save_best_only=True,
                                 mode="auto"),
                 logging_callback,
                 TensorBoard(log_dir=os.path.join(logs_dir,"{}_{}_{:.5}_{}_{}/".format(distribution,feature_extractor,loss_type,optimizer_type,sigma)),
                 histogram_freq=0, batch_size=batch_size, write_graph=False, write_grads=False, write_images=False)                 ]


    logging.debug("Running training...")
    if use_augmentation:
        datagen = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
            preprocessing_function=get_random_eraser(v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, y_train_a, batch_size=batch_size, alpha=0.2,
                                            datagen=datagen)()
        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test, y_test_a),
                                   epochs=nb_epochs, verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, y_train_a, batch_size=batch_size, epochs=nb_epochs, verbose=1, callbacks=callbacks,
                         validation_data=(X_test, y_test_a))

    logging.debug("Saving history and graphs...")
    pd.DataFrame(hist.history).to_hdf(os.path.join(history, "history.{}_{}_{:.5}_{}_{}.hdf5".format(distribution,feature_extractor,loss_type,optimizer_type,sigma)), "history")
Beispiel #7
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    use_augmentation = args.aug

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    model = WideResNet(image_size, depth=depth, k=k)()
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss=["categorical_crossentropy", "categorical_crossentropy"],
                  metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)), "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [LearningRateScheduler(schedule=Schedule(nb_epochs)),
                 ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                                 monitor="val_loss",
                                 verbose=1,
                                 save_best_only=True,
                                 mode="auto")
                 ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    if use_augmentation:
        datagen = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
            preprocessing_function=get_random_eraser(v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, [y_train_g, y_train_a], batch_size=batch_size, alpha=0.2,
                                            datagen=datagen)()
        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test, [y_test_g, y_test_a]),
                                   epochs=nb_epochs, verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, [y_train_g, y_train_a], batch_size=batch_size, epochs=nb_epochs, callbacks=callbacks,
                         validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)), overwrite=True)
    pd.DataFrame(hist.history).to_hdf(os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
Beispiel #8
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    max_age = args.max_age + 1
    depth = args.depth
    k = args.width
    transfer_learning = args.transfer_learning
    validation_split = args.validation_split
    use_augmentation = args.aug
    initial_weights = '/home/paula/THINKSMARTER_/Model/demographics-model-prediction/pretrained_models/weights.18-4.06.hdf5'
    # weight_file = '/home/paula/THINKSMARTER_/Model/age-gender-estimation-adapted/checkpoints/weights.09-4.32.hdf5'

    _weight_decay = 0.0005
    _use_bias = False
    _weight_init = "he_normal"

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, max_age)

    if transfer_learning:

        model = WideResNet(image_size, depth=depth, k=k, units_age=101)()
        model.load_weights(initial_weights)

        inputs = model.input
        flatten = model.layers[-3].output  # capa flatten
        dense1 = Dense(units=2,
                       kernel_initializer=_weight_init,
                       use_bias=_use_bias,
                       kernel_regularizer=l2(_weight_decay),
                       activation="softmax")(flatten)
        dense2 = Dense(units=117,
                       kernel_initializer=_weight_init,
                       use_bias=_use_bias,
                       kernel_regularizer=l2(_weight_decay),
                       activation="softmax")(flatten)
        model = Model(inputs=inputs, outputs=[dense1, dense2])

        # ---------------------------------
        # IDEA: fine tuning (nomes entreno les dos ultimes capes)
        # for layer in model.layers[:-2]:
        #     layer.trainable = False

    else:
        model = WideResNet(image_size, depth=depth, k=k, units_age=max_age)()

    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    if args.plot_model:
        plot_model(model,
                   to_file='experiments_pictures/model_plot.png',
                   show_shapes=True,
                   show_layer_names=True)

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    # tensorBoard = TensorBoard(log_dir='events', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None)

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    if use_augmentation:
        datagen = ImageDataGenerator(width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     horizontal_flip=True,
                                     preprocessing_function=get_random_eraser(
                                         v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, [y_train_g, y_train_a],
                                            batch_size=batch_size,
                                            alpha=0.2,
                                            datagen=datagen)()

        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test,
                                                    [y_test_g, y_test_a]),
                                   epochs=nb_epochs,
                                   verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, [y_train_g, y_train_a],
                         batch_size=batch_size,
                         epochs=nb_epochs,
                         callbacks=callbacks,
                         validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")

    with open('history_tmp.txt', 'w') as f:
        for key in hist.history:
            print(key, file=f)
        f.write('\n')
        json.dump(hist.history, f)
Beispiel #9
0
def main():
    nb_class = 2

    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    opt_name = args.opt
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    output_path.mkdir(parents=True, exist_ok=True)

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    #vggface = VGGFace(model='resnet50')

    #
    vgg_model = VGGFace(model='resnet50',
                        include_top=False,
                        input_shape=(224, 224, 3))
    last_layer = vgg_model.get_layer('avg_pool').output
    x = Flatten(name='flatten')(last_layer)
    out = Dense(nb_class, activation='softmax', name='classifier')(x)
    custom_vgg_model = Model(vgg_model.input, out)
    #

    model = WideResNet(image_size, depth=depth, k=k)()
    opt = get_optimizer(opt_name, lr)
    model.compile(
        optimizer=opt,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(str(output_path) +
                        "/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=False,
                        mode="auto")
    ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    hist = model.fit(X_train, [y_train_g, y_train_a],
                     batch_size=batch_size,
                     epochs=nb_epochs,
                     callbacks=callbacks,
                     validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving history...")
    pd.DataFrame(hist.history).to_hdf(
        output_path.joinpath("history_{}_{}.h5".format(depth, k)), "history")