def create_model():
    model = WideResNet(image_size, depth=depth, k=k)()

    # Load weights
    weight_file = os.path.join("pretrained_models", "weights.18-4.06.hdf5")
    model.load_weights(weight_file, by_name=True)

    # # set the first 50 layers
    # # to non-trainable (weights will not be updated)
    # print(len(model.layers))
    # if freeze_layers > 0 :
    #     for layer in model.layers[:freeze_layers]:
    #         layer.trainable = False

    # Compile model
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss=["binary_crossentropy"],
                  metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    return model
def main():
    args = get_args()
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    opt_name = args.opt
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    output_path.mkdir(parents=True, exist_ok=True)

    logging.debug("Loading data...")

    train_gen = ImageGenerator(
        "/home/dodat/Documents/python-projects/age-gender-estimation/data/imdb_crop",
        "imdb",
        image_size=64,
        batch_size=batch_size)
    val_gen = ImageGenerator(
        "/home/dodat/Documents/python-projects/age-gender-estimation/data/wiki_crop",
        "wiki",
        image_size=64,
        batch_size=batch_size)
    print(train_gen.len())
    print(val_gen.len())
    print(train_gen.img_path())
    model = WideResNet(64, depth=16, k=8)()
    opt = get_optimizer(opt_name, lr)
    model.compile(
        optimizer=opt,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(str(output_path) +
                        "/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")

    hist = model.fit_generator(generator=train_gen,
                               epochs=nb_epochs,
                               validation_data=val_gen,
                               verbose=1,
                               callbacks=callbacks)

    logging.debug("Saving history...")
    pd.DataFrame(hist.history).to_hdf(
        output_path.joinpath("history_{}_{}.h5".format(16, 8)), "history")
Example #3
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    model = WideResNet(image_size, depth=depth, k=k)()
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")
    hist = model.fit(X_data, [y_data_g, y_data_a],
                     batch_size=batch_size,
                     epochs=nb_epochs,
                     callbacks=callbacks,
                     validation_split=validation_split)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    use_augmentation = args.aug
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    output_path.mkdir(parents=True, exist_ok=True)

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    model = WideResNet(image_size, depth=depth, k=k)()
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint(str(output_path) +
                        "/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    if use_augmentation:
        datagen = ImageDataGenerator(width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     horizontal_flip=True,
                                     preprocessing_function=get_random_eraser(
                                         v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, [y_train_g, y_train_a],
                                            batch_size=batch_size,
                                            alpha=0.2,
                                            datagen=datagen)()
        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test,
                                                    [y_test_g, y_test_a]),
                                   epochs=nb_epochs,
                                   verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, [y_train_g, y_train_a],
                         batch_size=batch_size,
                         epochs=nb_epochs,
                         callbacks=callbacks,
                         validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving history...")
    pd.DataFrame(hist.history).to_hdf(
        output_path.joinpath("history_{}_{}.h5".format(depth, k)), "history")
def main():
    args = get_args()
    input_agender_path = args.input_agender
    input_emotion_path = args.input_emotion
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    staircase_decay_at_epochs = args.staircase_decay_at_epochs
    opt_name = args.opt
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    use_augmentation = args.aug
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    output_path.mkdir(parents=True, exist_ok=True)

    logging.debug("Loading data...")
    # load imdb: 171852 images
    # loadk wiki: 38138 images
    imdb_imgs, imdb_genders, imdb_ages, _, _, _ = load_imdb_or_wiki(
        input_agender_path)
    # gender_class_weight = class_weight.compute_class_weight('balanced', np.unique(imdb_genders), imdb_genders)
    # age_class_weight = class_weight.compute_class_weight('balanced', np.unique(imdb_ages), imdb_ages)
    if small_volume != 0:
        imdb_imgs = imdb_imgs[0:small_volume]
        imdb_genders = imdb_genders[0:small_volume]
        imdb_ages = imdb_ages[0:small_volume]
    imdb_genders = np_utils.to_categorical(imdb_genders, 2)
    imdb_ages = np_utils.to_categorical(imdb_ages, 101)
    imdb_emotions = np.full((len(imdb_imgs), emotion_class_num), 0)
    # imdb_emotions = np.full((len(imdb_imgs), emotion_class_num), -1, dtype=np.int8)
    print(
        'imdb_imgs.shape: {}, imdb_genders.shape: {}, imdb_ages.shape: {}, imdb_emotions.shape: {}'
        .format(imdb_imgs.shape, imdb_genders.shape, imdb_ages.shape,
                imdb_emotions.shape))

    # load fer2013: 35887 images
    fer_imgs, fer_emotions = load_fer2013(input_emotion_path,
                                          resize=(image_size, image_size))
    # emotion_class_weight = class_weight.compute_class_weight('balanced', np.unique(fer_emotions), fer_emotions)
    if small_volume != 0:
        fer_imgs = fer_imgs[0:small_volume]
        fer_emotions = fer_emotions[0:small_volume]
    fer_imgs = np.squeeze(np.stack((fer_imgs, ) * 3,
                                   -1))  # convert gray into color
    fer_emotions = pd.get_dummies(fer_emotions).as_matrix()
    fer_genders = np.full((len(fer_imgs), 2), 0)
    fer_ages = np.full((len(fer_imgs), 101), 0)
    # fer_genders = np.full((len(fer_imgs), 2), -1, dtype=np.int8)
    # fer_ages = np.full((len(fer_imgs), 101), -1, dtype=np.int8)
    print(
        'fer_imgs.shape: {}, fer_genders.shape: {}, fer_ages.shape: {}, fer_emotions.shape: {}'
        .format(fer_imgs.shape, fer_genders.shape, fer_ages.shape,
                fer_emotions.shape))

    logging.debug("Splitting data...")
    # split imdb into train and validate set
    imdb_imgs_train, imdb_imgs_val, imdb_genders_train, imdb_genders_val, imdb_ages_train, imdb_ages_val, imdb_emotions_train, imdb_emotions_val \
        = train_test_split(
        imdb_imgs,
        imdb_genders,
        imdb_ages,
        imdb_emotions,
        test_size=validation_split,
        shuffle=False)
    print(
        'imdb_imgs_train.shape: {}, imdb_imgs_val.shape: {}, imdb_genders_train.shape: {}, imdb_genders_val.shape: {}, imdb_ages_train.shape: {} \
        , imdb_ages_val.shape: {}, imdb_emotions_train.shape: {}, imdb_emotions_val.shape: {}'
        .format(imdb_imgs_train.shape, imdb_imgs_val.shape,
                imdb_genders_train.shape, imdb_genders_val.shape,
                imdb_ages_train.shape, imdb_ages_val.shape,
                imdb_emotions_train.shape, imdb_emotions_val.shape))
    # split fer2013 into train and validate set
    fer_imgs_train, fer_imgs_val, fer_genders_train, fer_genders_val, fer_ages_train, fer_ages_val, fer_emotions_train, fer_emotions_val \
        = train_test_split(
        fer_imgs,
        fer_genders,
        fer_ages,
        fer_emotions,
        test_size=validation_split,
        shuffle=False)
    print(
        'fer_imgs_train.shape: {}, fer_imgs_val.shape: {}, fer_genders_train.shape: {}, fer_genders_val.shape: {}, fer_ages_train.shape: {} \
        , fer_ages_val.shape: {}, fer_emotions_train.shape: {}, fer_emotions_val.shape: {}'
        .format(fer_imgs_train.shape, fer_imgs_val.shape,
                fer_genders_train.shape, fer_genders_val.shape,
                fer_ages_train.shape, fer_ages_val.shape,
                fer_emotions_train.shape, fer_emotions_val.shape))

    # merge imdb and fer2013 validate set
    logging.debug("Merge validation set...")
    X_val = np.vstack((imdb_imgs_val, fer_imgs_val))
    y_genders_val = np.vstack((imdb_genders_val, fer_genders_val))
    y_ages_val = np.vstack((imdb_ages_val, fer_ages_val))
    y_emotions_val = np.vstack((imdb_emotions_val, fer_emotions_val))

    model = WideResNet(image_size, depth=depth, k=k)()
    opt = get_optimizer(opt_name, lr)
    # model.compile(optimizer=opt,
    #               loss=[myloss, myloss, myloss],
    #               metrics=['accuracy'])
    model.compile(optimizer=opt,
                  loss={
                      'output_gender': 'categorical_crossentropy',
                      'output_age': 'categorical_crossentropy',
                      'output_emotion': 'categorical_crossentropy'
                  },
                  metrics={
                      'output_gender': 'accuracy',
                      'output_age': myMAE,
                      'output_emotion': 'accuracy'
                  })

    logging.debug("Model summary...")
    # model.count_params()
    model.summary()
    # Total params: 25,381,360
    # Trainable params: 25,374,160
    # Non-trainable params: 7,200

    # print(model.get_layer(name='conv2d_1').kernel_regularizer)

    lr_schedule = LearningRateScheduler(
        schedule=StaircaseSchedule(staircase_decay_at_epochs, lr))
    # lr_schedule = LearningRateScheduler(schedule=Schedule(nb_epochs, lr))
    callbacks = [
        lr_schedule,
        ModelCheckpoint(str(output_path) +
                        "/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto"),
        TensorBoard(log_dir='./logs', write_graph=True, write_images=True),
        CSVLogger('logs/train_log.csv', append=False)
    ]

    logging.debug("Running training...")

    # if use_augmentation:
    # datagen_agender = ImageDataGenerator(
    #     width_shift_range=0.1,
    #     height_shift_range=0.1,
    #     horizontal_flip=True,
    #     preprocessing_function=get_random_eraser(v_l=0, v_h=255))
    # datagen_emotion = ImageDataGenerator(
    #     featurewise_center=False,
    #     featurewise_std_normalization=False,
    #     rotation_range=10,
    #     width_shift_range=0.1,
    #     height_shift_range=0.1,
    #     zoom_range=.1,
    #     horizontal_flip=True)
    # datagen_agender = ImageDataGenerator(
    #     featurewise_center=False,
    #     featurewise_std_normalization=False,
    #     rotation_range=10,
    #     width_shift_range=0.1,
    #     height_shift_range=0.1,
    #     zoom_range=.1,
    #     horizontal_flip=True,
    #     preprocessing_function=get_random_eraser(v_l=0, v_h=255))
    # training_generator = MixupGenerator(X_train, [y_genders_train, y_ages_train, y_emotions_train],
    #                                     batch_size=batch_size,
    #                                     alpha=0.2,
    #                                     datagen=datagen_agender)()
    # else:
    # hist = model.fit(X_train, [y_genders_train, y_ages_train, y_emotions_train], batch_size=batch_size,
    #                  epochs=nb_epochs,
    #                  callbacks=callbacks,
    #                  validation_data=(X_val, [y_genders_val, y_ages_val, y_emotions_val]))

    # fit_class_weight = {'output_gender': dict(enumerate(gender_class_weight)),
    #                     'output_age': dict(enumerate(age_class_weight)),
    #                     'output_emotion': dict(enumerate(emotion_class_weight))}
    hist = model.fit_generator(
        generator=sample_generator((imdb_imgs_train, imdb_genders_train,
                                    imdb_ages_train, imdb_emotions_train),
                                   (fer_imgs_train, fer_genders_train,
                                    fer_ages_train, fer_emotions_train),
                                   batch_size=32),
        steps_per_epoch=(len(imdb_imgs_train) + len(fer_imgs_train)) //
        batch_size,
        validation_data=(X_val, [y_genders_val, y_ages_val, y_emotions_val]),
        epochs=nb_epochs,
        verbose=1,
        callbacks=callbacks)

    logging.debug("Saving history...")
    pd.DataFrame(hist.history).to_hdf(
        output_path.joinpath("history_{}_{}.h5".format(depth, k)), "history")
def train_from_db():
    '''
	train the solution gadget: CNN "WideResNet" 
	WideResNet is designed to predict both age and gender; I am omitting
	gender prediction for my project
	'''

    data = scipy.io.loadmat('utk.mat')

    # initialize model for training
    model = WideResNet(data['img_size'][0, 0])()
    model.compile(
        optimizer=SGD(lr=0.1, momentum=0.9, nesterov=True),
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'],
    )
    model.summary()

    nb_epochs = 32
    lr = 0.1

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(
            "weights.{epoch:02d}-{val_loss:.2f}.hdf5",
            monitor="val_loss",
            verbose=1,
            save_best_only=True,
            mode="auto",
        )
    ]

    X_data = data['image']
    y_data_a = np_utils.to_categorical(data['age'][0], 101)
    y_data_g = np_utils.to_categorical(
        [0] * len(data['age'][0]), 2)  # place holder for unused gender output

    indexes = np.arange(len(X_data))
    np.random.shuffle(indexes)

    # shuffle the dataset
    X_data = X_data[indexes]
    y_data_a = y_data_a[indexes]

    # train on 90%; validate on 10%
    train_num = int(len(X_data) * (1 - 0.1))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    # training
    hist = model.fit(
        X_train,
        [y_train_g, y_train_a],
        batch_size=32,
        epochs=nb_epochs,
        callbacks=callbacks,
        validation_data=(X_test, [y_test_g, y_test_a]),
    )
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    freeze_layers = args.freeze_layers
    depth = args.depth
    k = args.width
    validation_split = args.validation_split

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    #Load weights
    weight_file = os.path.join("pretrained_models", "weights.18-4.06.hdf5")

    model = WideResNet(image_size, depth=depth, k=k)()
    model.load_weights(weight_file)

    # set the first 50 layers
    # to non-trainable (weights will not be updated)
    print(len(model.layers))
    if freeze_layers > 0:
        for layer in model.layers[:freeze_layers]:
            layer.trainable = False

    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")
    # print('length of X', len(X_data))
    # print('length of y_data_g', y_data_g)
    # print('length of y_data_a', len(y_data_a))
    hist = model.fit(X_data, [y_data_g, y_data_a],
                     batch_size=batch_size,
                     epochs=nb_epochs,
                     callbacks=callbacks,
                     validation_split=validation_split)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
Example #8
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size*args.gpus
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    img_size = args.img_size
    val_path = args.val_path
    pretrained_fil = args.pretrained_fil
    input_shape = [img_size, img_size, 3]
    patience = 30
    gpu_num = args.gpus
    train_db = args.db

    logging.debug("Loading data...")
    '''
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)
    '''
    batchdataload = BatchLoader(input_path,batch_size,img_size,train_db)
    valdataload = BatchLoader(val_path,batch_size,img_size)
    model = WideResNet(img_size, depth=depth, k=k)()
    #model = mini_XCEPTION(input_shape,101)
    with open(os.path.join("ag_models", "WRN_{}_{}.json".format(depth, k)), "w") as f:
        f.write(model.to_json())
    if pretrained_fil :
        model.load_weights(pretrained_fil)
    #sgd = SGD(lr=0.001, momentum=0.7, nesterov=True)
    adam = Adam(lr=0.0001,beta_1=0.9,beta_2=0.999,epsilon=1e-5)
    #model.compile(optimizer=sgd, loss=["categorical_crossentropy", "categorical_crossentropy"],
    #              metrics=['accuracy'])
    #if gpu_num >1:
        #model = multi_gpu_model(model,gpu_num)
    model.compile(optimizer=adam, loss=["categorical_crossentropy"],
                  metrics=['accuracy'])
    logging.debug("Model summary...")
    #model.count_params()
    model.summary()
    logging.debug("Saving model...")
    if not os.path.exists("./ag_models"):
        mk_dir("ag_models")

    reduce_lr = ReduceLROnPlateau(monitor="val_loss",factor=0.1,patience=patience*2,verbose=1,min_lr=0.0000001)
    early_stop = EarlyStopping('val_loss', patience=patience)
    modelcheckpoint = ModelCheckpoint("ag_models/weights.{epoch:02d}-{val_loss:.2f}.hdf5",\
                        monitor="val_loss",verbose=1,save_best_only=True,mode="auto",period=1000)
    #mk_dir("checkpoints")
    #reduce_lr = LearningRateScheduler(schedule=reduce_lr)
    #callbacks = [modelcheckpoint,early_stop,reduce_lr]
    callbacks = [modelcheckpoint,reduce_lr]
    logging.debug("Running training...")
    #whole training
    error_min = 0
    if whole_data :
        hist = model.fit_generator(data_geneter(batchdataload), steps_per_epoch=batchdataload.batch_num,
                              epochs=nb_epochs, verbose=1,
                              callbacks=callbacks,
                              validation_data=data_geneter(valdataload),
                              nb_val_samples=valdataload.batch_num,
                              nb_worker=1)
        logging.debug("Saving weights...")
        model.save_weights(os.path.join("ag_models", "WRN_{}_{}.h5".format(depth, k)),overwrite=True)
        #pd.DataFrame(hist.history).to_hdf(os.path.join("ag_models", "history_{}_{}.h5".format(depth, k)), "history")
    else:
        epoch_step = 0
        while epoch_step < nb_epochs:
            step = 0
            while step < batchdataload.batch_num:
                #X_data, y_data_g, y_data_a = batch_geneter(batchdataload)
                X_data, y_data_g = batch_geneter(batchdataload)
                #hist = model.fit(X_data, [y_data_g, y_data_a], batch_size=batch_size, epochs=1, verbose=2)
                hist = model.fit(X_data, y_data_g, batch_size=batch_size, epochs=1, verbose=2)
                step+=1
                if step % 100 ==0:
                    #val_data,val_g,val_a = batch_geneter(valdataload)
                    val_data,val_g = batch_geneter(valdataload)
                    #error_t = model.evaluate(val_data,[val_g,val_a],batch_size=batch_size,verbose=1)
                    error_t = model.evaluate(val_data,val_g,batch_size=batch_size,verbose=1)
                    print ("****** Epoch {} Step {}: ***********".format(str(epoch_step),str(step)) )
                    print (" loss: {}".format(error_t))
                    if epoch_step % 5 ==0:
                        #logging.debug("Saving weights...")
                        #val_data,val_g,val_a = batch_geneter(valdataload)
                        #error_t = model.evaluate(val_data,[val_g,val_a],batch_size=batch_size,verbose=1)
                        if error_t[4] >error_min:
                            logging.debug("Saving weights...")
                            model.save_weights(os.path.join("ag_models", "WRN_{}_{}_epoch{}_step{}.h5".format(depth, k,epoch_step,step)))
                            error_min = error_t[4]
            epoch_step+=1
            if epoch_step % 5 ==0:
                logging.debug("Saving weights...")
                #val_data,val_g,val_a = batch_geneter(valdataload)
                val_data,val_g = batch_geneter(valdataload)
                #error_t = model.evaluate(val_data,[val_g,val_a],batch_size=batch_size,verbose=1)
                error_t = model.evaluate(val_data,val_g,batch_size=batch_size,verbose=1)
                if error_t[1] >error_min:
                    model.save_weights(os.path.join("ag_models", "WRN_{}_{}_epoch{}.h5".format(depth, k,epoch_step)))
                    error_min = error_t[4]
                error_min =0
Example #9
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    use_augmentation = args.aug

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    model = WideResNet(image_size, depth=depth, k=k)()
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss=["categorical_crossentropy", "categorical_crossentropy"],
                  metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)), "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [LearningRateScheduler(schedule=Schedule(nb_epochs)),
                 ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                                 monitor="val_loss",
                                 verbose=1,
                                 save_best_only=True,
                                 mode="auto")
                 ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    if use_augmentation:
        datagen = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
            preprocessing_function=get_random_eraser(v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, [y_train_g, y_train_a], batch_size=batch_size, alpha=0.2,
                                            datagen=datagen)()
        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test, [y_test_g, y_test_a]),
                                   epochs=nb_epochs, verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, [y_train_g, y_train_a], batch_size=batch_size, epochs=nb_epochs, callbacks=callbacks,
                         validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)), overwrite=True)
    pd.DataFrame(hist.history).to_hdf(os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
Example #10
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    max_age = args.max_age + 1
    depth = args.depth
    k = args.width
    transfer_learning = args.transfer_learning
    validation_split = args.validation_split
    use_augmentation = args.aug
    initial_weights = '/home/paula/THINKSMARTER_/Model/demographics-model-prediction/pretrained_models/weights.18-4.06.hdf5'
    # weight_file = '/home/paula/THINKSMARTER_/Model/age-gender-estimation-adapted/checkpoints/weights.09-4.32.hdf5'

    _weight_decay = 0.0005
    _use_bias = False
    _weight_init = "he_normal"

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, max_age)

    if transfer_learning:

        model = WideResNet(image_size, depth=depth, k=k, units_age=101)()
        model.load_weights(initial_weights)

        inputs = model.input
        flatten = model.layers[-3].output  # capa flatten
        dense1 = Dense(units=2,
                       kernel_initializer=_weight_init,
                       use_bias=_use_bias,
                       kernel_regularizer=l2(_weight_decay),
                       activation="softmax")(flatten)
        dense2 = Dense(units=117,
                       kernel_initializer=_weight_init,
                       use_bias=_use_bias,
                       kernel_regularizer=l2(_weight_decay),
                       activation="softmax")(flatten)
        model = Model(inputs=inputs, outputs=[dense1, dense2])

        # ---------------------------------
        # IDEA: fine tuning (nomes entreno les dos ultimes capes)
        # for layer in model.layers[:-2]:
        #     layer.trainable = False

    else:
        model = WideResNet(image_size, depth=depth, k=k, units_age=max_age)()

    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    if args.plot_model:
        plot_model(model,
                   to_file='experiments_pictures/model_plot.png',
                   show_shapes=True,
                   show_layer_names=True)

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    # tensorBoard = TensorBoard(log_dir='events', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None)

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    if use_augmentation:
        datagen = ImageDataGenerator(width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     horizontal_flip=True,
                                     preprocessing_function=get_random_eraser(
                                         v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, [y_train_g, y_train_a],
                                            batch_size=batch_size,
                                            alpha=0.2,
                                            datagen=datagen)()

        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test,
                                                    [y_test_g, y_test_a]),
                                   epochs=nb_epochs,
                                   verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, [y_train_g, y_train_a],
                         batch_size=batch_size,
                         epochs=nb_epochs,
                         callbacks=callbacks,
                         validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")

    with open('history_tmp.txt', 'w') as f:
        for key in hist.history:
            print(key, file=f)
        f.write('\n')
        json.dump(hist.history, f)
Example #11
0
def main():
    nb_class = 2

    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    lr = args.lr
    opt_name = args.opt
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    output_path = Path(__file__).resolve().parent.joinpath(args.output_path)
    output_path.mkdir(parents=True, exist_ok=True)

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    #vggface = VGGFace(model='resnet50')

    #
    vgg_model = VGGFace(model='resnet50',
                        include_top=False,
                        input_shape=(224, 224, 3))
    last_layer = vgg_model.get_layer('avg_pool').output
    x = Flatten(name='flatten')(last_layer)
    out = Dense(nb_class, activation='softmax', name='classifier')(x)
    custom_vgg_model = Model(vgg_model.input, out)
    #

    model = WideResNet(image_size, depth=depth, k=k)()
    opt = get_optimizer(opt_name, lr)
    model.compile(
        optimizer=opt,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs, lr)),
        ModelCheckpoint(str(output_path) +
                        "/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=False,
                        mode="auto")
    ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    hist = model.fit(X_train, [y_train_g, y_train_a],
                     batch_size=batch_size,
                     epochs=nb_epochs,
                     callbacks=callbacks,
                     validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving history...")
    pd.DataFrame(hist.history).to_hdf(
        output_path.joinpath("history_{}_{}.h5".format(depth, k)), "history")