def load_cape_network(): reference_mesh_file = 'data/template_mesh.obj' reference_mesh = Mesh(filename=reference_mesh_file) ds_factors = [1, 2, 1, 2, 1, 2, 1, 1] print("Pre-computing mesh pooling matrices ..") M,A,D,U, _ = mesh_sampling.generate_transform_matrices(reference_mesh, ds_factors) p = list(map(lambda x: x.shape[0], A)) A = list(map(lambda x: x.astype('float32'), A)) D = list(map(lambda x: x.astype('float32'), D)) U = list(map(lambda x: x.astype('float32'), U)) L = [mesh_sampling.laplacian(a, normalized=True) for a in A] # load pre-computed graph laplacian and pooling matrices for discriminator L_ds2, D_ds2, U_ds2 = load_graph_mtx(project_dir) with open('configs/CAPE-affineconv_nz64_pose32_clotype32_male.yaml') as fl: params = yaml.load(fl, Loader=yaml.FullLoader) params['lr_scaler'] = 1e-1 params['lambda_gan'] = 0.1 params['regularization'] = 2e-3 nf = params["nf"] if params["num_conv_layers"]==4: params['F'] = [nf, 2*nf, 2*nf, nf] elif params["num_conv_layers"]==6: params['F'] = [nf, nf, 2*nf, 2*nf, 4*nf, 4*nf] elif params["num_conv_layers"] == 8: params['F'] = [nf, nf, 2*nf, 2*nf, 4*nf, 4*nf, 8*nf, 8*nf] else: raise NotImplementedError params['p'] = p params['K'] = [2] * params["num_conv_layers"] params['restart'] = 1 params['nn_input_channel'] = 3 params['Kd'] = 3 params['cond_dim'] = 14*9 params['cond2_dim'] = 4 params['n_layer_cond'] = 1 params['optimizer'] = 'sgd' params['optim_condnet'] = 1 non_model_params = ['demo_n_sample', 'mode', 'dataset', 'num_conv_layers', 'ds_factor', 'nf', 'config', 'pose_type', 'decay_every', 'gender', 'save_obj', 'vis_demo', 'smpl_model_folder'] for key in non_model_params: params.pop(key,None) print("Building model graph...") model = models.CAPE(L=L, D=D, U=U, L_d=L_ds2, D_d=D_ds2, **params) model.build_graph(model.input_num_verts, model.nn_input_channel, phase='demo') print('Model loaded') return model
def init_sampling(refer_mesh, data_dir, dataname, ds_factors=(4, 4, 4, 4)): # Sampling factor of the mesh at each stage of sampling # Generates adjecency matrices A, downsampling matrices D, and upsamling matrices U by sampling # the mesh 4 times. Each time the mesh is sampled by a factor of 4 adj_path = os.path.join(data_dir, 'adjacency') ds_path = os.path.join(data_dir, 'downsamp_trans') us_path = os.path.join(data_dir, 'upsamp_trans') lap_path = os.path.join(data_dir, 'laplacians') if not os.path.isfile(lap_path + '0.npz'): logger = logging.getLogger('x') logger.info('Computing Sampling Parameters') adjacencies, downsamp_trans, upsamp_trans = mesh_sampling.generate_transform_matrices( dataname, refer_mesh['vertices'], refer_mesh['faces'], ds_factors) adjacencies = [x.astype('float32') for x in adjacencies] downsamp_trans = [x.astype('float32') for x in downsamp_trans] upsamp_trans = [x.astype('float32') for x in upsamp_trans] laplacians = [graph.laplacian(a, normalized=True) for a in adjacencies] if not os.path.exists(data_dir): os.makedirs(data_dir) for i, a in enumerate(adjacencies): sp.save_npz(adj_path + '{}.npz'.format(i), a) for i, d in enumerate(downsamp_trans): sp.save_npz(ds_path + '{}.npz'.format(i), d) for i, u in enumerate(upsamp_trans): sp.save_npz(us_path + '{}.npz'.format(i), u) for i, l in enumerate(laplacians): sp.save_npz(lap_path + '{}.npz'.format(i), l) else: adjacencies = [] downsamp_trans = [] upsamp_trans = [] laplacians = [] for a in sorted(glob('{}*.npz'.format(adj_path))): adjacencies.append(sp.load_npz(a)) for d in sorted(glob('{}*.npz'.format(ds_path))): downsamp_trans.append(sp.load_npz(d)) for u in sorted(glob('{}*.npz'.format(us_path))): upsamp_trans.append(sp.load_npz(u)) for l in sorted(glob('{}*.npz'.format(lap_path))): laplacians.append(sp.load_npz(l)) pool_size = [x.shape[0] for x in adjacencies] return laplacians, downsamp_trans, upsamp_trans, pool_size
print("Loading data .. ") reference_mesh_file = 'data/template_ceasar.obj' facedata = FaceData(nVal=100, train_file=args.data + '/train.npy', test_file=args.data + '/test.npy', reference_mesh_file=reference_mesh_file, pca_n_comp=nz) ds_factors = [4, 4, 4, 4] # Sampling factor of the mesh at each stage of sampling print("Generating Transform Matrices ..") # Generates adjecency matrices A, downsampling matrices D, and upsamling matrices U by sampling # the mesh 4 times. Each time the mesh is sampled by a factor of 4 M, A, D, U = mesh_sampling.generate_transform_matrices(facedata.reference_mesh, ds_factors) #A = list(map(lambda x:x.astype('float32'), A)) D = list(map(lambda x: x.astype('float32'), D)) U = list(map(lambda x: x.astype('float32'), U)) #p = list(map(lambda x:x.shape[0], A)) p = list(map(lambda x: x.v.shape[0], A)) X_train = facedata.vertices_train.astype('float32') X_val = facedata.vertices_val.astype('float32') X_test = facedata.vertices_test.astype('float32') print("Computing Graph Laplacians ..") L = [graph.laplacian(a, mode='cotan', normalized=True) for a in A] L = list(map(lambda x: x.astype('float32'), L))
parser.add_argument('--val_mode', default=True, help=True or False) parser.add_argument('--train_data_path', default='data/train/df4305outlier.tfrecords', type=str) parser.add_argument('--checkpoint_path', default='checkpoints/doublefusion/model-53700', type=str) args = parser.parse_args() np.random.seed(args.seed) ds_factors = [4, 4, 4] # Sampling factor of the mesh at each stage of sampling # Generates adjecency matrices A, downsampling matrices D, and upsamling matrices U by sampling # the mesh 4 times. Each time the mesh is sampled by a factor of 4 M, A, D, U = mesh_sampling.generate_transform_matrices(args.mesh_path, ds_factors) A = [x.astype('float32') for x in A] adjacency = utils.get_adjs(A) D = [x.astype('float32') for x in D] U = [x.astype('float32') for x in U] p = [x.shape[0] for x in A] params = dict() params['dir_name'] = args.name params['num_epochs'] = args.num_epochs params['batch_size'] = args.batch_size params['eval_frequency'] = args.eval_frequency params['Ti_min'] = smpl_min_max[0] params['Ti_max'] = smpl_min_max[1]
test_mesh_fn=data_dir + '/test/test_disp.npy', test_cond1_fn=data_dir + '/test/test_{}.npy'.format(args.pose_type), test_cond2_fn=data_dir + '/test/test_{}.npy'.format('clo_label'), reference_mesh_file=reference_mesh_file) if args.num_conv_layers == 4: ds_factors = [1, args.ds_factor, 1, 1] elif args.num_conv_layers == 6: ds_factors = [1, args.ds_factor, 1, args.ds_factor, 1, 1] elif args.num_conv_layers == 8: ds_factors = [ 1, args.ds_factor, 1, args.ds_factor, 1, args.ds_factor, 1, 1 ] print("Pre-computing mesh pooling matrices ..") M, A, D, U, _ = mesh_sampling.generate_transform_matrices( reference_mesh, ds_factors) p = list(map(lambda x: x.shape[0], A)) A = list(map(lambda x: x.astype('float32'), A)) D = list(map(lambda x: x.astype('float32'), D)) U = list(map(lambda x: x.astype('float32'), U)) L = [mesh_sampling.laplacian(a, normalized=True) for a in A] # load pre-computed graph laplacian and pooling matrices for discriminator L_ds2, D_ds2, U_ds2 = load_graph_mtx(project_dir) # pass params and build model params = copy.deepcopy(args_dict) params['restart'] = bool(args.restart) params['use_res_block'], params['use_res_block_dec'] = bool( args.use_res_block), bool(args.use_res_block_dec) params['nn_input_channel'] = 3