def main():
    args = get_args()
    input_path1 = args.input1
    input_path2 = args.input2
    db_name = args.db
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    netType = args.netType

    logging.debug("Loading training data...")
    image1, age1, image_size = load_data_npz(input_path1)
    logging.debug("Loading testing data...")
    image2, age2, image_size = load_data_npz(input_path2)

    start_decay_epoch = [30, 60]

    optMethod = Adam()

    if netType == 1:
        model_type = 'MobileNet'
        alpha = 0.25
        model = TYY_MobileNet_reg(image_size, alpha)()
        save_name = 'mobilenet_reg_%s_%d' % (alpha, image_size)
        model.compile(optimizer=optMethod,
                      loss=["mae"],
                      metrics={'pred_a': 'mae'})

    elif netType == 2:
        model_type = 'MobileNet'
        alpha = 0.5
        model = TYY_MobileNet_reg(image_size, alpha)()
        save_name = 'mobilenet_reg_%s_%d' % (alpha, image_size)
        model.compile(optimizer=optMethod,
                      loss=["mae"],
                      metrics={'pred_a': 'mae'})

    elif netType == 3:
        model_type = 'DenseNet'
        N_densenet = 3
        depth_densenet = 3 * N_densenet + 4
        model = TYY_DenseNet_reg(image_size, depth_densenet)()
        save_name = 'densenet_reg_%d_%d' % (depth_densenet, image_size)
        model.compile(optimizer=optMethod,
                      loss=["mae"],
                      metrics={'pred_a': 'mae'})

    elif netType == 4:
        model_type = 'DenseNet'
        N_densenet = 5
        depth_densenet = 3 * N_densenet + 4
        model = TYY_DenseNet_reg(image_size, depth_densenet)()
        save_name = 'densenet_reg_%d_%d' % (depth_densenet, image_size)
        model.compile(optimizer=optMethod,
                      loss=["mae"],
                      metrics={'pred_a': 'mae'})

    if db_name == "meagaage":
        weight_file = "../pre-trained/wiki/" + save_name + "/" + save_name + ".h5"
        model.load_weights(weight_file)

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")

    mk_dir(db_name + "_models")
    mk_dir(db_name + "_models/" + model_type + "/")
    mk_dir(db_name + "_models/" + model_type + "/batch_size_%d/" %
           (batch_size))
    mk_dir(db_name + "_models/" + model_type + "/batch_size_%d/" %
           (batch_size) + save_name)
    mk_dir(db_name + "_checkpoints")
    mk_dir(db_name + "_checkpoints/" + model_type)
    mk_dir(db_name + "_checkpoints/" + model_type + "/batch_size_%d/" %
           (batch_size))
    plot_model(model,
               to_file=db_name + "_models/" + model_type + "/batch_size_%d/" %
               (batch_size) + save_name + "/" + save_name + ".png")

    with open(
            os.path.join(
                db_name + "_models/" + model_type + "/batch_size_%d/" %
                (batch_size) + save_name, save_name + '.json'), "w") as f:
        f.write(model.to_json())

    decaylearningrate = TYY_callbacks.DecayLearningRate(start_decay_epoch)

    callbacks = [
        ModelCheckpoint(db_name + "_checkpoints/" + model_type +
                        "/batch_size_%d/" % (batch_size) +
                        "weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto"), decaylearningrate
    ]

    logging.debug("Running training...")

    data_num = len(image1) + len(image2)
    indexes1 = np.arange(len(image1))
    indexes2 = np.arange(len(image2))
    np.random.shuffle(indexes1)
    np.random.shuffle(indexes2)
    x_train = image1[indexes1]
    x_test = image2[indexes2]
    y_train_a = age1[indexes1]
    y_test_a = age2[indexes2]
    train_num = len(image1)

    hist = model.fit_generator(generator=data_generator_reg(
        X=x_train, Y=y_train_a, batch_size=batch_size),
                               steps_per_epoch=train_num // batch_size,
                               validation_data=(x_test, [y_test_a]),
                               epochs=nb_epochs,
                               verbose=1,
                               callbacks=callbacks)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join(
        db_name + "_models/" + model_type + "/batch_size_%d/" % (batch_size) +
        save_name, save_name + '.h5'),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join(
            db_name + "_models/" + model_type + "/batch_size_%d/" %
            (batch_size) + save_name, 'history_' + save_name + '.h5'),
        "history")
Beispiel #2
0
def main():
    args = get_args()
    input_path = args.input
    db_name = args.db
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    validation_split = args.validation_split
    netType1 = args.netType1
    netType2 = args.netType2

    logging.debug("Loading data...")
    image, gender, age, image_size = load_data_npz(input_path)
    
    x_data = image
    y_data_a = age

    start_decay_epoch = [30,60]

    optMethod = Adam()

    stage_num = [3,3,3]
    lambda_local = 0.25*(netType1%5)
    lambda_d = 0.25*(netType2%5)

    model = SSR_net(image_size,stage_num, lambda_local, lambda_d)()
    save_name = 'ssrnet_%d_%d_%d_%d_%s_%s' % (stage_num[0],stage_num[1],stage_num[2], image_size, lambda_local, lambda_d)
    model.compile(optimizer=optMethod, loss=["mae"], metrics={'pred_a':'mae'})


    if db_name == "wiki":
        weight_file = f'imdb_models/{save_name}/{save_name}.h5'
        if os.path.isfile(weight_file): # error out if file not exist
            print("previous weight loading...")
            model.load_weights(weight_file)
    elif db_name == "morph": 
        weight_file = f'wiki_models/{save_name}/{save_name}.h5'
        if os.path.isfile(weight_file):
            print("previous weight loading...")
            model.load_weights(weight_file) 

    
    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir(db_name+"_models")
    mk_dir(db_name+"_models/"+save_name)
    mk_dir(db_name+"_checkpoints")
    plot_model(model, to_file=db_name+"_age_models/"+save_name+"/"+save_name+".png")

    with open(os.path.join(db_name+"_age_models/"+save_name, save_name+'.json'), "w") as f:
        f.write(model.to_json())

    
    decaylearningrate = TYY_callbacks.DecayLearningRate(start_decay_epoch)

    callbacks = [ModelCheckpoint(db_name+"_checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                                 monitor="val_loss",
                                 verbose=1,
                                 save_best_only=True,
                                 mode="auto"), decaylearningrate
                        ]

    logging.debug("Running training...")
    


    data_num = len(x_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    x_data = x_data[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    
    x_train = x_data[:train_num]
    x_test = x_data[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]


    hist = model.fit_generator(generator=data_generator_reg(X=x_train, Y=y_train_a, batch_size=batch_size),
                                   steps_per_epoch=train_num // batch_size,
                                   validation_data=(x_test, [y_test_a]),
                                   epochs=nb_epochs, verbose=1,
                                   callbacks=callbacks)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join(db_name+"_models/"+save_name, save_name+'.h5'), overwrite=True)
    pd.DataFrame(hist.history).to_hdf(os.path.join(db_name+"_models/"+save_name, 'history_'+save_name+'.h5'), "history")
Beispiel #3
0
def main():
    args = get_args()
    input_path = args.input
    db_name = args.db
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    validation_split = args.validation_split
    netType = args.netType

    logging.debug("Loading data...")
    image, gender, age, image_size = load_data_npz(input_path)
    
    x_data = image
    y_data_a = age


    start_decay_epoch = [30,60]

    optMethod = Adam()

    if netType == 1:
        alpha = 0.25
        model = TYY_MobileNet_reg(image_size,alpha)()
        save_name = 'mobilenet_reg_%s_%d' % (alpha, image_size)
        model.compile(optimizer=optMethod, loss=["mae"], metrics={'pred_a':'mae'})

    elif netType == 2:
        alpha = 0.5
        model = TYY_MobileNet_reg(image_size,alpha)()
        save_name = 'mobilenet_reg_%s_%d' % (alpha, image_size)
        model.compile(optimizer=optMethod, loss=["mae"], metrics={'pred_a':'mae'})

    elif netType == 3:
        N_densenet = 3
        depth_densenet = 3*N_densenet+4
        model = TYY_DenseNet_reg(image_size,depth_densenet)()
        save_name = 'densenet_reg_%d_%d' % (depth_densenet, image_size)
        model.compile(optimizer=optMethod, loss=["mae"], metrics={'pred_a':'mae'})

    elif netType == 4:
        N_densenet = 5
        depth_densenet = 3*N_densenet+4
        model = TYY_DenseNet_reg(image_size,depth_densenet)()
        save_name = 'densenet_reg_%d_%d' % (depth_densenet, image_size)
        model.compile(optimizer=optMethod, loss=["mae"], metrics={'pred_a':'mae'})



    if db_name == "wiki":
        weight_file = "imdb_models/"+save_name+"/"+save_name+".h5"
        model.load_weights(weight_file)
        #start_decay_epoch = range(0,nb_epochs,30)
    elif db_name == "morph": 
        weight_file = "wiki_models/"+save_name+"/"+save_name+".h5"
        model.load_weights(weight_file) 

    
    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir(db_name+"_models")
    mk_dir(db_name+"_models/"+save_name)
    mk_dir(db_name+"_checkpoints")
    plot_model(model, to_file=db_name+"_models/"+save_name+"/"+save_name+".png")

    with open(os.path.join(db_name+"_models/"+save_name, save_name+'.json'), "w") as f:
        f.write(model.to_json())

    decaylearningrate = TYY_callbacks.DecayLearningRate(start_decay_epoch)

    callbacks = [ModelCheckpoint(db_name+"_checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                                 monitor="val_loss",
                                 verbose=1,
                                 save_best_only=True,
                                 mode="auto"), decaylearningrate
                        ]

    logging.debug("Running training...")
    


    data_num = len(x_data)
    indexes = np.arange(data_num)
    np.random.shuffle(indexes)
    x_data = x_data[indexes]
    y_data_a = y_data_a[indexes]
    train_num = int(data_num * (1 - validation_split))
    
    x_train = x_data[:train_num]
    x_test = x_data[train_num:]
    y_train_a = y_data_a[:train_num]
    y_test_a = y_data_a[train_num:]


    hist = model.fit_generator(generator=data_generator_reg(X=x_train, Y=y_train_a, batch_size=batch_size),
                               steps_per_epoch=train_num // batch_size,
                               validation_data=(x_test, [y_test_a]),
                               epochs=nb_epochs, verbose=1,
                               callbacks=callbacks)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join(db_name+"_models/"+save_name, save_name+'.h5'), overwrite=True)
    pd.DataFrame(hist.history).to_hdf(os.path.join(db_name+"_models/"+save_name, 'history_'+save_name+'.h5'), "history")

def MAE(a, b):
    mae = np.sum(np.absolute(a - b))
    mae /= len(b)
    return mae


'''''' '''''' '''''' '''''' '''''' '''''' '''''' '''
  file name
''' '''''' '''''' '''''' '''''' '''''' '''''' ''''''
test_file = sys.argv[1]
netType = int(sys.argv[2])

logging.debug("Loading testing data...")
image2, age2, image_size = load_data_npz(test_file)

if netType == 3:
    N_densenet = 3
    depth_densenet = 3 * N_densenet + 4
    model_file = 'megaage_models/DenseNet/batch_size_50/densenet_reg_%d_64/densenet_reg_%d_64.h5' % (
        depth_densenet, depth_densenet)
    model = TYY_DenseNet_reg(image_size, depth_densenet)()
    mk_dir('Results_csv')
    save_name = 'Results_csv/densenet_reg_%d_%d.csv' % (depth_densenet,
                                                        image_size)

elif netType == 4:
    N_densenet = 5
    depth_densenet = 3 * N_densenet + 4
    model_file = 'megaage_models/DenseNet/batch_size_50/densenet_reg_%d_64/densenet_reg_%d_64.h5' % (
def main():
    args = get_args()
    input_path1 = args.input1
    input_path2 = args.input2
    db_name = args.db
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    netType1 = args.netType1
    netType2 = args.netType2

    logging.debug("Loading training data...")
    image1, age1, image_size = load_data_npz(input_path1)
    logging.debug("Loading testing data...")
    image2, age2, image_size = load_data_npz(input_path2)

    start_decay_epoch = [30, 60]

    optMethod = Adam()

    stage_num = [3, 3, 3]
    lambda_local = 0.25 * (netType1 % 5)
    lambda_d = 0.25 * (netType2 % 5)

    model = SSR_net(image_size, stage_num, lambda_local, lambda_d)()
    save_name = 'ssrnet_%d_%d_%d_%d_%s_%s' % (stage_num[0], stage_num[1],
                                              stage_num[2], image_size,
                                              lambda_local, lambda_d)
    model.compile(optimizer=optMethod, loss=["mae"], metrics={'pred_a': 'mae'})

    if db_name == "megaage":
        weight_file = "./pre-trained/wiki/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5"
        #weight_file = "./pre-trained/imdb/ssrnet_3_3_3_64_1.0_1.0/ssrnet_3_3_3_64_1.0_1.0.h5"
        model.load_weights(weight_file)

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir(db_name + "_models")
    mk_dir(db_name + "_models/batch_size_%d/" % (batch_size))
    mk_dir(db_name + "_models/batch_size_%d/" % (batch_size) + save_name)
    mk_dir(db_name + "_checkpoints")
    mk_dir(db_name + "_checkpoints/batch_size_%d/" % (batch_size))
    plot_model(model,
               to_file=db_name + "_models/batch_size_%d/" % (batch_size) +
               save_name + "/" + save_name + ".png")

    with open(
            os.path.join(
                db_name + "_models/batch_size_%d/" % (batch_size) + save_name,
                save_name + '.json'), "w") as f:
        f.write(model.to_json())

    decaylearningrate = TYY_callbacks.DecayLearningRate(start_decay_epoch)

    callbacks = [
        ModelCheckpoint(db_name + "_checkpoints/batch_size_%d/" %
                        (batch_size) +
                        "weights.{epoch:02d}-{val_loss:.2f}.hdf5",
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto"), decaylearningrate
    ]
    logging.debug("Running training...")

    data_num = len(image1) + len(image2)
    indexes1 = np.arange(len(image1))
    indexes2 = np.arange(len(image2))
    np.random.shuffle(indexes1)
    np.random.shuffle(indexes2)
    x_train = image1[indexes1]
    x_test = image2[indexes2]
    y_train_a = age1[indexes1]
    y_test_a = age2[indexes2]
    train_num = len(image1)

    hist = model.fit_generator(generator=data_generator_reg(
        X=x_train, Y=y_train_a, batch_size=batch_size),
                               steps_per_epoch=train_num // batch_size,
                               validation_data=(x_test, [y_test_a]),
                               epochs=nb_epochs,
                               verbose=1,
                               callbacks=callbacks)

    logging.debug("Saving weights...")
    model.save_weights(os.path.join(
        db_name + "_models/batch_size_%d/" % (batch_size) + save_name,
        save_name + '.h5'),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(
        os.path.join(
            db_name + "_models/batch_size_%d/" % (batch_size) + save_name,
            'history_' + save_name + '.h5'), "history")