Example #1
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    model = WideResNet(image_size, depth=depth, k=k)()
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")
    hist = model.fit(X_data, [y_data_g, y_data_a],
                     batch_size=batch_size,
                     epochs=nb_epochs,
                     callbacks=callbacks,
                     validation_split=validation_split)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
Example #2
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    use_augmentation = args.aug

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    model = WideResNet(image_size, depth=depth, k=k)()
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    if use_augmentation:
        datagen = ImageDataGenerator(width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     horizontal_flip=True,
                                     preprocessing_function=get_random_eraser(
                                         v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, [y_train_g, y_train_a],
                                            batch_size=batch_size,
                                            alpha=0.2,
                                            datagen=datagen)()
        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test,
                                                    [y_test_g, y_test_a]),
                                   epochs=nb_epochs,
                                   verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, [y_train_g, y_train_a],
                         batch_size=batch_size,
                         epochs=nb_epochs,
                         callbacks=callbacks,
                         validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    freeze_layers = args.freeze_layers
    depth = args.depth
    k = args.width
    validation_split = args.validation_split

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    #Load weights
    weight_file = os.path.join("pretrained_models", "weights.18-4.06.hdf5")

    model = WideResNet(image_size, depth=depth, k=k)()
    model.load_weights(weight_file)

    # set the first 50 layers
    # to non-trainable (weights will not be updated)
    print(len(model.layers))
    if freeze_layers > 0:
        for layer in model.layers[:freeze_layers]:
            layer.trainable = False

    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")
    # print('length of X', len(X_data))
    # print('length of y_data_g', y_data_g)
    # print('length of y_data_a', len(y_data_a))
    hist = model.fit(X_data, [y_data_g, y_data_a],
                     batch_size=batch_size,
                     epochs=nb_epochs,
                     callbacks=callbacks,
                     validation_split=validation_split)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
Example #4
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size*args.gpus
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    img_size = args.img_size
    val_path = args.val_path
    pretrained_fil = args.pretrained_fil
    input_shape = [img_size, img_size, 3]
    patience = 30
    gpu_num = args.gpus
    train_db = args.db

    logging.debug("Loading data...")
    '''
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)
    '''
    batchdataload = BatchLoader(input_path,batch_size,img_size,train_db)
    valdataload = BatchLoader(val_path,batch_size,img_size)
    model = WideResNet(img_size, depth=depth, k=k)()
    #model = mini_XCEPTION(input_shape,101)
    with open(os.path.join("ag_models", "WRN_{}_{}.json".format(depth, k)), "w") as f:
        f.write(model.to_json())
    if pretrained_fil :
        model.load_weights(pretrained_fil)
    #sgd = SGD(lr=0.001, momentum=0.7, nesterov=True)
    adam = Adam(lr=0.0001,beta_1=0.9,beta_2=0.999,epsilon=1e-5)
    #model.compile(optimizer=sgd, loss=["categorical_crossentropy", "categorical_crossentropy"],
    #              metrics=['accuracy'])
    #if gpu_num >1:
        #model = multi_gpu_model(model,gpu_num)
    model.compile(optimizer=adam, loss=["categorical_crossentropy"],
                  metrics=['accuracy'])
    logging.debug("Model summary...")
    #model.count_params()
    model.summary()
    logging.debug("Saving model...")
    if not os.path.exists("./ag_models"):
        mk_dir("ag_models")

    reduce_lr = ReduceLROnPlateau(monitor="val_loss",factor=0.1,patience=patience*2,verbose=1,min_lr=0.0000001)
    early_stop = EarlyStopping('val_loss', patience=patience)
    modelcheckpoint = ModelCheckpoint("ag_models/weights.{epoch:02d}-{val_loss:.2f}.hdf5",\
                        monitor="val_loss",verbose=1,save_best_only=True,mode="auto",period=1000)
    #mk_dir("checkpoints")
    #reduce_lr = LearningRateScheduler(schedule=reduce_lr)
    #callbacks = [modelcheckpoint,early_stop,reduce_lr]
    callbacks = [modelcheckpoint,reduce_lr]
    logging.debug("Running training...")
    #whole training
    error_min = 0
    if whole_data :
        hist = model.fit_generator(data_geneter(batchdataload), steps_per_epoch=batchdataload.batch_num,
                              epochs=nb_epochs, verbose=1,
                              callbacks=callbacks,
                              validation_data=data_geneter(valdataload),
                              nb_val_samples=valdataload.batch_num,
                              nb_worker=1)
        logging.debug("Saving weights...")
        model.save_weights(os.path.join("ag_models", "WRN_{}_{}.h5".format(depth, k)),overwrite=True)
        #pd.DataFrame(hist.history).to_hdf(os.path.join("ag_models", "history_{}_{}.h5".format(depth, k)), "history")
    else:
        epoch_step = 0
        while epoch_step < nb_epochs:
            step = 0
            while step < batchdataload.batch_num:
                #X_data, y_data_g, y_data_a = batch_geneter(batchdataload)
                X_data, y_data_g = batch_geneter(batchdataload)
                #hist = model.fit(X_data, [y_data_g, y_data_a], batch_size=batch_size, epochs=1, verbose=2)
                hist = model.fit(X_data, y_data_g, batch_size=batch_size, epochs=1, verbose=2)
                step+=1
                if step % 100 ==0:
                    #val_data,val_g,val_a = batch_geneter(valdataload)
                    val_data,val_g = batch_geneter(valdataload)
                    #error_t = model.evaluate(val_data,[val_g,val_a],batch_size=batch_size,verbose=1)
                    error_t = model.evaluate(val_data,val_g,batch_size=batch_size,verbose=1)
                    print ("****** Epoch {} Step {}: ***********".format(str(epoch_step),str(step)) )
                    print (" loss: {}".format(error_t))
                    if epoch_step % 5 ==0:
                        #logging.debug("Saving weights...")
                        #val_data,val_g,val_a = batch_geneter(valdataload)
                        #error_t = model.evaluate(val_data,[val_g,val_a],batch_size=batch_size,verbose=1)
                        if error_t[4] >error_min:
                            logging.debug("Saving weights...")
                            model.save_weights(os.path.join("ag_models", "WRN_{}_{}_epoch{}_step{}.h5".format(depth, k,epoch_step,step)))
                            error_min = error_t[4]
            epoch_step+=1
            if epoch_step % 5 ==0:
                logging.debug("Saving weights...")
                #val_data,val_g,val_a = batch_geneter(valdataload)
                val_data,val_g = batch_geneter(valdataload)
                #error_t = model.evaluate(val_data,[val_g,val_a],batch_size=batch_size,verbose=1)
                error_t = model.evaluate(val_data,val_g,batch_size=batch_size,verbose=1)
                if error_t[1] >error_min:
                    model.save_weights(os.path.join("ag_models", "WRN_{}_{}_epoch{}.h5".format(depth, k,epoch_step)))
                    error_min = error_t[4]
                error_min =0
Example #5
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    depth = args.depth
    k = args.width
    validation_split = args.validation_split
    use_augmentation = args.aug

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, 101)

    model = WideResNet(image_size, depth=depth, k=k)()
    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss=["categorical_crossentropy", "categorical_crossentropy"],
                  metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)), "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    callbacks = [LearningRateScheduler(schedule=Schedule(nb_epochs)),
                 ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                                 monitor="val_loss",
                                 verbose=1,
                                 save_best_only=True,
                                 mode="auto")
                 ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    if use_augmentation:
        datagen = ImageDataGenerator(
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
            preprocessing_function=get_random_eraser(v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, [y_train_g, y_train_a], batch_size=batch_size, alpha=0.2,
                                            datagen=datagen)()
        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test, [y_test_g, y_test_a]),
                                   epochs=nb_epochs, verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, [y_train_g, y_train_a], batch_size=batch_size, epochs=nb_epochs, callbacks=callbacks,
                         validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)), overwrite=True)
    pd.DataFrame(hist.history).to_hdf(os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")
Example #6
0
def main():
    args = get_args()
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    max_age = args.max_age + 1
    depth = args.depth
    k = args.width
    transfer_learning = args.transfer_learning
    validation_split = args.validation_split
    use_augmentation = args.aug
    initial_weights = '/home/paula/THINKSMARTER_/Model/demographics-model-prediction/pretrained_models/weights.18-4.06.hdf5'
    # weight_file = '/home/paula/THINKSMARTER_/Model/age-gender-estimation-adapted/checkpoints/weights.09-4.32.hdf5'

    _weight_decay = 0.0005
    _use_bias = False
    _weight_init = "he_normal"

    logging.debug("Loading data...")
    image, gender, age, _, image_size, _ = load_data(input_path)
    X_data = image
    y_data_g = np_utils.to_categorical(gender, 2)
    y_data_a = np_utils.to_categorical(age, max_age)

    if transfer_learning:

        model = WideResNet(image_size, depth=depth, k=k, units_age=101)()
        model.load_weights(initial_weights)

        inputs = model.input
        flatten = model.layers[-3].output  # capa flatten
        dense1 = Dense(units=2,
                       kernel_initializer=_weight_init,
                       use_bias=_use_bias,
                       kernel_regularizer=l2(_weight_decay),
                       activation="softmax")(flatten)
        dense2 = Dense(units=117,
                       kernel_initializer=_weight_init,
                       use_bias=_use_bias,
                       kernel_regularizer=l2(_weight_decay),
                       activation="softmax")(flatten)
        model = Model(inputs=inputs, outputs=[dense1, dense2])

        # ---------------------------------
        # IDEA: fine tuning (nomes entreno les dos ultimes capes)
        # for layer in model.layers[:-2]:
        #     layer.trainable = False

    else:
        model = WideResNet(image_size, depth=depth, k=k, units_age=max_age)()

    sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
    model.compile(
        optimizer=sgd,
        loss=["categorical_crossentropy", "categorical_crossentropy"],
        metrics=['accuracy'])

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    if args.plot_model:
        plot_model(model,
                   to_file='experiments_pictures/model_plot.png',
                   show_shapes=True,
                   show_layer_names=True)

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "WRN_{}_{}.json".format(depth, k)),
              "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")
    # tensorBoard = TensorBoard(log_dir='events', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None)

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)),
        ModelCheckpoint("checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ]

    logging.debug("Running training...")

    data_num = len(X_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    X_data = X_data[indexes]
    y_data_g = y_data_g[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    X_train = X_data[:train_num]
    X_test = X_data[train_num:]
    y_train_g = y_data_g[:train_num]
    y_test_g = y_data_g[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]

    if use_augmentation:
        datagen = ImageDataGenerator(width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     horizontal_flip=True,
                                     preprocessing_function=get_random_eraser(
                                         v_l=0, v_h=255))
        training_generator = MixupGenerator(X_train, [y_train_g, y_train_a],
                                            batch_size=batch_size,
                                            alpha=0.2,
                                            datagen=datagen)()

        hist = model.fit_generator(generator=training_generator,
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(X_test,
                                                    [y_test_g, y_test_a]),
                                   epochs=nb_epochs,
                                   verbose=1,
                                   callbacks=callbacks)
    else:
        hist = model.fit(X_train, [y_train_g, y_train_a],
                         batch_size=batch_size,
                         epochs=nb_epochs,
                         callbacks=callbacks,
                         validation_data=(X_test, [y_test_g, y_test_a]))

    logging.debug("Saving weights...")
    model.save_weights(os.path.join("models", "WRN_{}_{}.h5".format(depth, k)),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join("models", "history_{}_{}.h5".format(depth, k)), "history")

    with open('history_tmp.txt', 'w') as f:
        for key in hist.history:
            print(key, file=f)
        f.write('\n')
        json.dump(hist.history, f)