Beispiel #1
0
                       name=params['name'],
                       data_stream_train=input_data,
                       data_stream_valid=valid_data,
                       data_stream_test=None,
                       logdir_train=params['log_dir_train'],
                       logdir_test=params['log_dir_test'],
                       saver_directory=params['saver_directory'],
                       tiny=params['tiny'],
                       w_loss=params['weighted_loss'],
                       w_summary=True,
                       joints=params['joint_list'],
                       modif=False)
model.generate_model()
load_file = None
model.training_init(nEpochs=params['nepochs'],
                    epochSize=epochSize,
                    saveStep=summary_steps,
                    load=load_file)

heatmaps = model.get_heatmaps(load=load_file)
print("Output heatmaps result. Shape: " + str(heatmaps.shape))

df_file_names = valid_data.df[valid_data.file_name_col]
gt_coords = valid_data.df[valid_data.coords_cols].as_matrix()
lm_cnt = valid_data.lm_cnt

pred_coord = heatmap_to_coord(heatmaps, valid_data.img_width,
                              valid_data.img_height)

write_point_result(pred_coord, gt_coords, lm_cnt, params,
                   params['valid_result_dir'])
Beispiel #2
0
    print('--Creating Dataset')
    dataset = DataGenerator(params['joint_list'], params['img_directory'],
                            params['training_txt_file'])
    dataset._create_train_table()
    dataset._randomize()
    dataset._create_sets()

    model = HourglassModel(nFeat=params['nfeats'],
                           nStack=params['nstacks'],
                           nModules=params['nmodules'],
                           nLow=params['nlow'],
                           outputDim=params['num_joints'],
                           batch_size=params['batch_size'],
                           training=True,
                           drop_rate=params['dropout_rate'],
                           lear_rate=params['learning_rate'],
                           decay=params['learning_rate_decay'],
                           decay_step=params['decay_step'],
                           dataset=dataset,
                           name=params['name'],
                           logdir_train=params['log_dir_train'],
                           logdir_test=params['log_dir_test'],
                           tiny=params['tiny'],
                           modif=False)
    model.generate_model()
    model.training_init(nEpochs=params['nepochs'],
                        epochSize=params['epoch_size'],
                        saveStep=params['saver_step'],
                        dataset=None)
Beispiel #3
0
	# add some parameters from the terminal
	parser = argparse.ArgumentParser(description='Launch the training of the Hourglass model.', add_help=True, epilog='Just a test for this parameter')
	parser.add_argument('--version', action='version', version='Version 1.0')
	parser.add_argument('--cfg', required=False, default = './config.cfg', help='The path for your config file')
	args = parser.parse_args()

	print('>>>>> Parsing Config File From %s' %(args.cfg))
	params = process_config(args.cfg)
	
	print('>>>>> Creating Dataset Now')
	# dataset.train_set is the table of the training set's names
	dataset = DataGenerator(joints_name = params['joint_list'],img_dir = params['img_directory'], train_data_file = params['training_txt_file'],
							camera_extrinsic = params['camera_extrinsic'], camera_intrinsic = params['camera_intrinsic'])
	dataset._create_train_table()
	# nfeats:256, nstacks:4 nmodules:1(not used)
	# nlow:4 (Number of downsampling in one stack)
	# mcam:false (attention system(not needed))
	# name:pretrained model
	# tiny:false weighted_loss:false

	os.environ["CUDA_VISIBLE_DEVICES"] = "0"
	model = HourglassModel(nFeat=params['nfeats'], nStack=params['nstacks'], nModules=params['nmodules'], 
		nLow=params['nlow'], outputDim=params['num_joints'], batch_size=params['batch_size'], training=True, 
		drop_rate= params['dropout_rate'], lear_rate=params['learning_rate'], decay=params['learning_rate_decay'], decay_step=params['decay_step'], 
		dataset=dataset, name=params['name'], w_summary = True, logdir_train=params['log_dir_train'], logdir_test=params['log_dir_test'], tiny= params['tiny'],
		w_loss=params['weighted_loss'] , joints= params['joint_list'], gpu_frac=params['gpu_frac'], model_save_dir=params['model_save_dir'])
	
	print('>>>>> Creating Hourglass Model')
	model.generate_model()
	model.training_init(nEpochs=params['nepochs'], epochSize=params['epoch_size'], saveStep=params['saver_step'],valid_iter=params['valid_iteration'], pre_trained = params['pretrained_model'], human_pretrained_model = params['human_pretrained_model'])
                            outputDim=3,
                            batch_size=16,
                            training=True,
                            drop_rate=0.2,
                            lear_rate=2.5 * 1e-4,
                            decay=0.96,
                            decay_step=1000,
                            logdir_train='./logdir_train',
                            logdir_test='./logdir_test',
                            tiny=True,
                            w_loss=False,
                            modif=False)
     model.generate_model()
     model.training_init(data_gen,
                         nEpochs=50,
                         epochSize=1000,
                         batchSize=16,
                         saveStep=500,
                         load=None)
 else:
     data_gen = SFSTestDataProvider(str(data_dir + "test/"))
     model_test = HourglassModel(nFeat=256,
                                 nStack=4,
                                 nLow=4,
                                 outputDim=3,
                                 batch_size=16,
                                 training=False,
                                 drop_rate=0.2,
                                 lear_rate=2.5 * 1e-4,
                                 decay=0.96,
                                 decay_step=1000,
                                 logdir_train='./logdir_train',
Beispiel #5
0
                            params['train_img_directory'],
                            params['training_data_file'])
    dataset.creator()
    model = HourglassModel(nFeat=params['nfeats'],
                           nStack=params['nstacks'],
                           nModules=params['nmodules'],
                           nLow=params['nlow'],
                           outputDim=params['num_joints'],
                           batch_size=params['batch_size'],
                           attention=params['mcam'],
                           training=True,
                           drop_rate=params['dropout_rate'],
                           lear_rate=params['learning_rate'],
                           decay=params['learning_rate_decay'],
                           decay_step=params['decay_step'],
                           dataset=dataset,
                           name=params['name'],
                           logdir_train=params['log_dir_train'],
                           logdir_test=params['log_dir_test'],
                           tiny=params['tiny'],
                           w_loss=params['weighted_loss'],
                           joints=params['joint_list'],
                           modif=False)
    model.generate_model()
    model.training_init(nEpochs=params['nepochs'],
                        epochSize=params['epoch_size'],
                        saveStep=params['saver_step'],
                        saver_dir=params['model_file'],
                        dataset=None,
                        load=params['model_file'] + params['load_file'])
Beispiel #6
0
    model = HourglassModel(f_loss=f_loss,
                           nFeat=params['nfeats'],
                           nStack=params['nstacks'],
                           nModules=params['nmodules'],
                           nLow=params['nlow'],
                           outputDim=params['num_joints'],
                           batch_size=params['batch_size'],
                           attention=params['mcam'],
                           training=True,
                           drop_rate=params['dropout_rate'],
                           lear_rate=params['learning_rate'],
                           decay=params['learning_rate_decay'],
                           decay_step=params['decay_step'],
                           dataset=dataset,
                           name=params['name'],
                           logdir_train=params['log_dir_train'],
                           logdir_test=params['log_dir_test'],
                           saver_dir=params['saver_directory'],
                           tiny=params['tiny'],
                           w_loss=params['weighted_loss'],
                           joints=params['joint_list'],
                           modif=modif)
    model.generate_model()
    model.training_init(nEpochs=params['nepochs'],
                        epochSize=params['epoch_size'],
                        saveStep=params['saver_step'],
                        config=config,
                        dataset=dataset,
                        load=model_n,
                        reset=reset)  ##load need change
				params[option] = eval(config.get(section, option))
		if section == 'Network':
			for option in config.options(section):
				params[option] = eval(config.get(section, option))
		if section == 'Train':
			for option in config.options(section):
				params[option] = eval(config.get(section, option))
		if section == 'Validation':
			for option in config.options(section):
				params[option] = eval(config.get(section, option))
		if section == 'Saver':
			for option in config.options(section):
				params[option] = eval(config.get(section, option))
	return params


if __name__ == '__main__':
	print('--Parsing Config File')
	params = process_config('config.cfg')
	
	print('--Creating Dataset')
	dataset = DataGenerator(params['joint_list'], params['img_directory'], params['training_txt_file'], remove_joints=params['remove_joints'])
	dataset._create_train_table()
	dataset._randomize()
	dataset._create_sets()
	
	model = HourglassModel(nFeat=params['nfeats'], nStack=params['nstacks'], nModules=params['nmodules'], nLow=params['nlow'], outputDim=params['num_joints'], batch_size=params['batch_size'], attention = params['mcam'],training=True, drop_rate= params['dropout_rate'], lear_rate=params['learning_rate'], decay=params['learning_rate_decay'], decay_step=params['decay_step'], dataset=dataset, name=params['name'], logdir_train=params['log_dir_train'], logdir_test=params['log_dir_test'], tiny= params['tiny'], w_loss=params['weighted_loss'] , joints= params['joint_list'],modif=False)
	model.generate_model()
	model.training_init(nEpochs=params['nepochs'], epochSize=params['epoch_size'], saveStep=params['saver_step'], dataset = None)