def main(argv): """ Run training of the Deeppose stg-1 """ args = cmd_options.get_arguments(argv) if not os.path.exists(args.o_dir): os.makedirs(args.o_dir) suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S") with open(os.path.join(args.o_dir, 'params.dump_{}.txt'.format(suffix)), 'w') as f: f.write('{}\n'.format(pprint.pformat(args))) net, loss_op, pose_loss_op, train_op = regressionnet.create_regression_net( n_joints=args.n_joints, init_snapshot_path=args.snapshot, is_resume=args.resume, reset_iter_counter=args.reset_iter_counter, reset_moving_averages=args.reset_moving_averages, optimizer_type=args.optimizer, gpu_memory_fraction=0.32, # Set how much GPU memory to reserve for the network net_type=args.net_type) with net.graph.as_default(): saver = tf.train.Saver() print 'args.resume: {}\nargs.snapshot: {}'.format(args.resume, args.snapshot) bbox_extension_range = (args.bbox_extension_min, args.bbox_extension_max) if bbox_extension_range[0] is None or bbox_extension_range[1] is None: bbox_extension_range = None test_bbox_extension_range = None else: test_bbox_extension_range = (bbox_extension_range[1], bbox_extension_range[1]) train_dataset = dataset.PoseDataset( args.train_csv_fn, args.img_path_prefix, args.im_size, fliplr=args.fliplr, rotate=args.rotate, rotate_range=args.rotate_range, shift=args.shift, bbox_extension_range=bbox_extension_range, min_dim=args.min_dim, coord_normalize=args.coord_normalize, gcn=args.gcn, fname_index=args.fname_index, joint_index=args.joint_index, symmetric_joints=args.symmetric_joints, ignore_label=args.ignore_label, should_downscale_images=args.should_downscale_images, downscale_height=args.downscale_height ) test_dataset = dataset.PoseDataset( args.test_csv_fn, args.img_path_prefix, args.im_size, fliplr=False, rotate=False, shift=None, bbox_extension_range=test_bbox_extension_range, coord_normalize=args.coord_normalize, gcn=args.gcn, fname_index=args.fname_index, joint_index=args.joint_index, symmetric_joints=args.symmetric_joints, ignore_label=args.ignore_label, should_return_bbox=True, should_downscale_images=args.should_downscale_images, downscale_height=args.downscale_height ) np.random.seed(args.seed) train_iterator = iterators.MultiprocessIterator(train_dataset, args.batch_size, n_processes=args.workers, n_prefetch=3) test_iterator = iterators.MultiprocessIterator( test_dataset, args.batch_size, repeat=False, shuffle=False, n_processes=1, n_prefetch=1) val_iterator = None if args.val_csv_fn is not None and args.val_csv_fn != '': small_train_dataset = dataset.PoseDataset( args.val_csv_fn, args.img_path_prefix, args.im_size, fliplr=False, rotate=False, shift=None, bbox_extension_range=test_bbox_extension_range, coord_normalize=args.coord_normalize, gcn=args.gcn, fname_index=args.fname_index, joint_index=args.joint_index, symmetric_joints=args.symmetric_joints, ignore_label=args.ignore_label, should_return_bbox=True, should_downscale_images=args.should_downscale_images, downscale_height=args.downscale_height ) val_iterator = iterators.MultiprocessIterator( small_train_dataset, args.batch_size, repeat=False, shuffle=False, n_processes=1, n_prefetch=1) train_loop(net, saver, loss_op, pose_loss_op, train_op, args.dataset_name, train_iterator, test_iterator, val_iterator=val_iterator, max_iter=args.max_iter, test_step=args.test_step, log_step=args.log_step, snapshot_step=args.snapshot_step, batch_size=args.batch_size, conv_lr=args.conv_lr, fc_lr=args.fc_lr, fix_conv_iter=args.fix_conv_iter, output_dir=args.o_dir )
result_dir = create_result_dir(args.model, args.resume_model) create_logger(args, result_dir) model = get_model(args.model, args.n_joints, result_dir, args.resume_model) model = loss.PoseEstimationError(model) opt = get_optimizer(model, args.opt, args.lr, adam_alpha=args.adam_alpha, adam_beta1=args.adam_beta1, adam_beta2=args.adam_beta2, adam_eps=args.adam_eps, weight_decay=args.weight_decay, resume_opt=args.resume_opt) train_dataset = dataset.PoseDataset( args.train_csv_fn, args.img_dir, args.im_size, args.fliplr, args.rotate, args.rotate_range, args.zoom, args.base_zoom, args.zoom_range, args.translate, args.translate_range, args.min_dim, args.coord_normalize, args.gcn, args.n_joints, args.fname_index, args.joint_index, args.symmetric_joints, args.ignore_label) test_dataset = dataset.PoseDataset( args.test_csv_fn, args.img_dir, args.im_size, args.fliplr, args.rotate, args.rotate_range, args.zoom, args.base_zoom, args.zoom_range, args.translate, args.translate_range, args.min_dim, args.coord_normalize, args.gcn, args.n_joints, args.fname_index, args.joint_index, args.symmetric_joints, args.ignore_label) train_iter = iterators.MultiprocessIterator(train_dataset, args.batchsize) test_iter = iterators.MultiprocessIterator(test_dataset, args.batchsize, repeat=False, shuffle=False)