import tensorflow as tf
import os

tf.app.flags.DEFINE_string("configfile", "config/config_mpii.cfg",
                           "config file name")
tf.app.flags.DEFINE_string("loadmodel", None,
                           "model name used to continue training")

FLAGS = tf.app.flags.FLAGS

if __name__ == '__main__':
    print('--Parsing Config File')
    params = process_config(FLAGS.configfile)
    os.system('mkdir -p {}'.format(params['saver_directory']))
    os.system('cp {0} {1}'.format(FLAGS.configfile, params['saver_directory']))

    print('--Creating Dataset')
    dataset = DataGenerator(params['joint_list'], params['img_directory'],
                            params['training_txt_file'], params['img_size'])
    dataset._create_train_table()
    dataset._randomize()
    dataset._create_sets()

    model = HourglassModel(params=params, dataset=dataset, training=True)
    model.create_model()
    model.do_train(nEpochs=params['nepochs'],
                   epochSize=params['epoch_size'],
                   saveStep=params['saver_step'],
                   dataset=None,
                   load=FLAGS.loadmodel)
Exemple #2
0
    params = process_config('config.cfg')

    print('--Creating Dataset')
    dataset = DataGenerator(params['img_directory'],
                            params['training_txt_file'],
                            params['num_joints'],
                            params['val_directory'],
                            params['val_txt_file'],
                            resolutions=params['resolutions'],
                            headed=params['headed'],
                            head_train=params['head_train'],
                            head_test=params['head_test'],
                            head_val=params['head_val'])
    dataset._create_train_table(
    )  #creates the lists with dicts of the coord. of boxes, joints and the corresp. weights
    dataset._randomize()  # shuffles the previous lists
    dataset._create_sets()  # validation and training lists

    model = HourglassModel(nFeat=params['nfeats'],
                           nStack=params['nstacks'],
                           nModules=params['nmodules'],
                           nLow=params['nlow'],
                           outputDim=params['num_joints'],
                           batch_size=params['batch_size'],
                           attention=params['mcam'],
                           training=True,
                           drop_rate=params['dropout_rate'],
                           lear_rate=params['learning_rate'],
                           decay=params['learning_rate_decay'],
                           decay_step=params['decay_step'],
                           dataset=dataset,
Exemple #3
0
                             img_dir_test=params['img_directory_test2'],
                             test_data_file=params['test_txt_file2'])
    dataset3 = DataGenerator(params['joint_list'],
                             params['img_directory3'],
                             params['training_txt_file3'],
                             remove_joints=params['remove_joints'],
                             img_dir_test=params['img_directory_test3'],
                             test_data_file=params['test_txt_file3'])
    dataset4 = DataGenerator(params['joint_list'],
                             params['img_directory4'],
                             params['training_txt_file4'],
                             remove_joints=params['remove_joints'],
                             img_dir_test=params['img_directory_test4'],
                             test_data_file=params['test_txt_file4'])
    dataset1._create_train_table()
    dataset1._randomize()
    dataset1._create_sets()
    dataset2._create_train_table()
    dataset2._randomize()
    dataset2._create_sets()
    dataset3._create_train_table()
    dataset3._randomize()
    dataset3._create_sets()
    dataset4._create_train_table()
    dataset4._randomize()
    dataset4._create_sets()
    dataset = [dataset1, dataset2, dataset3, dataset4]

    camera1 = scipy.io.loadmat(params['camera1'])
    camera2 = scipy.io.loadmat(params['camera2'])
    camera3 = scipy.io.loadmat(params['camera3'])
				params[option] = eval(config.get(section, option))
		if section == 'Network':
			for option in config.options(section):
				params[option] = eval(config.get(section, option))
		if section == 'Train':
			for option in config.options(section):
				params[option] = eval(config.get(section, option))
		if section == 'Validation':
			for option in config.options(section):
				params[option] = eval(config.get(section, option))
		if section == 'Saver':
			for option in config.options(section):
				params[option] = eval(config.get(section, option))
	return params


if __name__ == '__main__':
	print('--Parsing Config File')
	params = process_config('config.cfg')
	
	print('--Creating Dataset')
	dataset = DataGenerator(params['joint_list'], params['img_directory'], params['training_txt_file'], remove_joints=params['remove_joints'])
	dataset._create_train_table()
	dataset._randomize()
	dataset._create_sets()
	
	model = HourglassModel(nFeat=params['nfeats'], nStack=params['nstacks'], nModules=params['nmodules'], nLow=params['nlow'], outputDim=params['num_joints'], batch_size=params['batch_size'], attention = params['mcam'],training=True, drop_rate= params['dropout_rate'], lear_rate=params['learning_rate'], decay=params['learning_rate_decay'], decay_step=params['decay_step'], dataset=dataset, name=params['name'], logdir_train=params['log_dir_train'], logdir_test=params['log_dir_test'], tiny= params['tiny'], w_loss=params['weighted_loss'] , joints= params['joint_list'],modif=False)
	model.generate_model()
	model.training_init(nEpochs=params['nepochs'], epochSize=params['epoch_size'], saveStep=params['saver_step'], dataset = None)