def __init__(self,
                 garment_class,
                 shape_idx,
                 style_idx,
                 split,
                 gender='female',
                 smooth_level=0):
        super(OneStyleShape, self).__init__()

        self.garment_class = garment_class
        self.split, self.gender = split, gender
        self.style_idx, self.shape_idx = style_idx, shape_idx
        self.smooth_level = smooth_level

        data_dir = os.path.join(global_var.DATA_DIR,
                                '{}_{}'.format(garment_class, gender))

        beta = np.load(
            os.path.join(data_dir, 'shape/beta_{}.npy'.format(shape_idx)))
        gamma = np.load(
            os.path.join(data_dir, 'style/gamma_{}.npy'.format(style_idx)))
        # I had a hard time figuring out the bug in the following line:
        # gamma = np.load(os.path.join(data_dir, 'style/gamma_{}.npy'.format(shape_idx)))

        thetas = []
        pose_order = []
        verts_d = []
        smooth_verts_d = []
        seq_idx = 0
        while True:
            seq_path = os.path.join(
                data_dir,
                'pose/{}_{}/poses_{:03d}.npz'.format(shape_idx, style_idx,
                                                     seq_idx))
            if not os.path.exists(seq_path):
                break
            data = np.load(seq_path)
            verts_d_path = os.path.join(
                data_dir,
                'pose/{}_{}/unposed_{:03d}.npy'.format(shape_idx, style_idx,
                                                       seq_idx))
            if not os.path.exists(verts_d_path):
                print("{} doesn't exist. This is not an error. "
                      "It's just that this sequence was not simulated well.".
                      format(verts_d_path))
                seq_idx += 1
                continue

            thetas.append(data['thetas'])
            pose_order.append(data['pose_order'])
            verts_d.append(np.load(verts_d_path))

            if smooth_level == 1 and global_var.SMOOTH_STORED:
                smooth_verts_d_path = os.path.join(
                    global_var.SMOOTH_DATA_DIR,
                    '{}_{}'.format(garment_class, gender),
                    'pose/{}_{}/smooth_unposed_{:03d}.npy'.format(
                        shape_idx, style_idx, seq_idx))
                if not os.path.exists(smooth_verts_d_path):
                    print("{} doesn't exist.".format(smooth_verts_d_path))
                    exit(-1)
                smooth_verts_d.append(np.load(smooth_verts_d_path))

            seq_idx += 1
            # print("Using just one sequence file")
            # break

        thetas = np.concatenate(thetas, axis=0)
        pose_order = np.concatenate(pose_order, axis=0)
        verts_d = np.concatenate(verts_d, axis=0)
        if smooth_level == 1 and global_var.SMOOTH_STORED:
            smooth_verts_d = np.concatenate(smooth_verts_d, axis=0)

        if split is not None:
            assert (split in ['test', 'train'])
            # SMPL has 1782 poses. We set aside 350 poses as test set and remaining in train set. So if a frame has a
            # pose from these 1782 poses, it's easy to classify them as train or test.
            # But during simulation of these poses, we add some intermediate poses for simulation stability.
            # To classify these intermediate poses in test and train split, we follow this policy:
            # - For train pivots, intermediate poses go into train set because there are significant amount of
            #   intermediate poses and we can't afford to give them away during training.
            # - For test pivots, we add intermediate poses to test set. Assuming that intermediate poses are randomly
            #   distributed, it's fair to assume that any intermediate test pose will be unseen from training.
            split_file_path = os.path.join(global_var.DATA_DIR,
                                           global_var.POSE_SPLIT_FILE)
            if seq_idx > 1:  # train pivot
                test_orig_idx = np.load(split_file_path)['test']
                test_idx = np.in1d(pose_order, test_orig_idx)
                chosen_idx = np.where(
                    test_idx)[0] if split == 'test' else np.where(~test_idx)[0]
            else:  # test pivot
                train_orig_idx = np.load(split_file_path)['train']
                train_idx = np.in1d(pose_order, train_orig_idx)
                chosen_idx = np.where(
                    train_idx)[0] if split == 'train' else np.where(
                        ~train_idx)[0]

            thetas = thetas[chosen_idx]
            verts_d = verts_d[chosen_idx]
            if smooth_level == 1 and global_var.SMOOTH_STORED:
                smooth_verts_d = smooth_verts_d[chosen_idx]

        self.verts_d = torch.from_numpy(verts_d.astype(np.float32))
        self.thetas = torch.from_numpy(thetas.astype(np.float32))
        self.beta = torch.from_numpy(beta[:10].astype(np.float32))
        self.gamma = torch.from_numpy(gamma.astype(np.float32))
        if smooth_level == 1 and global_var.SMOOTH_STORED:
            self.smooth_verts_d = torch.from_numpy(
                smooth_verts_d.astype(np.float32))
            return

        if self.smooth_level != 0 and self.smooth_level != -1:
            with open(
                    os.path.join(global_var.DATA_DIR,
                                 global_var.GAR_INFO_FILE), 'rb') as f:
                class_info = pickle.load(f)
            num_v = len(class_info[garment_class]['vert_indices'])
            self.smoothing = DiffusionSmoothing(np.zeros((num_v, 3)),
                                                class_info[garment_class]['f'])
            self.smpl = TorchSMPL4Garment(gender=gender)
        else:
            self.smoothing = None
            self.smpl = None
def save_smooth():
    """Helper function to save smooth garment displacements."""
    garment_class = 'shirt'
    gender = 'male'
    smooth_level = 1
    OUT_DIR = global_var.SMOOTH_DATA_DIR

    data_dir = os.path.join(global_var.DATA_DIR,
                            '{}_{}'.format(garment_class, gender))
    with open(os.path.join(data_dir, "pivots.txt"), "r") as f:
        train_pivots = [l.strip().split('_') for l in f.readlines()]

    with open(os.path.join(global_var.DATA_DIR, global_var.GAR_INFO_FILE),
              'rb') as f:
        class_info = pickle.load(f)
    num_v = len(class_info[garment_class]['vert_indices'])
    smoothing = DiffusionSmoothing(np.zeros((num_v, 3)),
                                   class_info[garment_class]['f'])
    smpl = TorchSMPL4Garment(gender=gender)

    for shape_idx, style_idx in train_pivots:
        beta = torch.from_numpy(
            np.load(
                os.path.join(data_dir,
                             'shape/beta_{}.npy'.format(shape_idx))).astype(
                                 np.float32)[:10])
        gamma = torch.from_numpy(
            np.load(
                os.path.join(data_dir,
                             'style/gamma_{}.npy'.format(shape_idx))).astype(
                                 np.float32))
        outdir = os.path.join(OUT_DIR, "{}_{}".format(garment_class, gender),
                              "pose/{}_{}".format(shape_idx, style_idx))
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        seq_idx = 0
        while True:
            seq_path = os.path.join(
                data_dir,
                'pose/{}_{}/poses_{:03d}.npz'.format(shape_idx, style_idx,
                                                     seq_idx))
            if not os.path.exists(seq_path):
                break
            data = np.load(seq_path)
            verts_d_path = os.path.join(
                data_dir,
                'pose/{}_{}/unposed_{:03d}.npy'.format(shape_idx, style_idx,
                                                       seq_idx))
            if not os.path.exists(verts_d_path):
                print("{} doesn't exist.".format(verts_d_path))
                seq_idx += 1
                continue
            outpath = os.path.join(outdir,
                                   "smooth_unposed_{:03d}.npy".format(seq_idx))
            if os.path.exists(outpath):
                print("{} exists.".format(outpath))
                seq_idx += 1
                continue
            print(verts_d_path)
            thetas = torch.from_numpy(data['thetas'].astype(np.float32))
            verts_d = torch.from_numpy(
                np.load(verts_d_path).astype(np.float32))
            smooth_verts_d = []
            for theta, vert_d in zip(thetas, verts_d):
                svert_d = smooth_it(smoothing, smooth_level, smpl, theta, beta,
                                    vert_d, garment_class)
                smooth_verts_d.append(svert_d.numpy())
            smooth_verts_d = np.stack(smooth_verts_d)
            np.save(outpath, smooth_verts_d)

            seq_idx += 1
        type=str)
    parser.add_argument(
        '-out_dir',
        '--out_dir',
        default='/BS/cloth3d/static00/nasa_data/smpl_pose/train_data',
        type=str)

    args = parser.parse_args()

    nasa_data_dir = args.out_dir
    if not os.path.exists(nasa_data_dir):
        os.makedirs(nasa_data_dir)

    pose_file = args.pose_file
    poses = pkl.load(open(pose_file, 'rb'), encoding="latin1")
    smpl_torch = TorchSMPL4Garment('female')
    sample_num = 1000000
    beta = np.load(args.beta_file)
    sub_id = '{:03}'.format(args.frame)

    sub_folder = os.path.join(nasa_data_dir, sub_id)
    if not os.path.exists(sub_folder):
        os.makedirs(sub_folder)
    mesh_folder = os.path.join(
        '/BS/cloth3d/static00/nasa_data/smpl_pose/meshes/{}'.format(sub_id))
    if not os.path.exists(mesh_folder):
        os.makedirs(mesh_folder)
    print(len(poses))
    for j in range(len(poses)):
        theta_normalized = normalize_y_rotation(poses[j])
        frame_num = '{:06}'.format(j)
Exemple #4
0
    def __init__(self, ckpt, params):
        self.device = device
        self.params = params
        self.gender = params['gender']
        self.garment_class = params['garment_class']
        self.bs = params['batch_size']
        self.garment_layer = params['garment_layer']
        self.res_name = params['res']
        self.hres = True
        if self.res_name == 'lres':
            self.hres = False

        # log and backup
        LOG_DIR = '/scratch/BS/pool1/garvita/sizer'
        self.model_name = "EncDec_{}".format(self.res_name)

        log_name = os.path.join(
            self.garment_class, '{}_{}'.format(self.garment_layer,
                                               self.res_name))
        self.log_dir = os.path.join(LOG_DIR, log_name)
        if not os.path.exists(self.log_dir):
            print('making %s' % self.log_dir)
            os.makedirs(self.log_dir)

        with open(os.path.join(self.log_dir, "params.json"), 'w') as f:
            json.dump(params, f)

        self.iter_nums = 0 if 'iter_nums' not in params else params['iter_nums']

        #load smpl data
        self.layer_size, self.smpl_size = get_res_vert(params['garment_class'],
                                                       self.hres,
                                                       params['garment_layer'])

        # get active vert id
        input_dim = self.layer_size * 3
        output_dim = input_dim

        self.vert_indices = get_vid(self.garment_layer, self.garment_class,
                                    self.hres)
        self.vert_indices = torch.tensor(self.vert_indices.astype(
            np.long)).long().cuda()

        # dataset and dataloader
        self.test_dataset = SizerData(garment_class=self.garment_class,
                                      garment_layer=self.garment_layer,
                                      mode='test',
                                      batch_size=self.bs,
                                      res='hres',
                                      gender='male')
        self.test_loader = DataLoader(
            self.test_dataset,
            batch_size=self.bs,
            num_workers=12,
            shuffle=True,
            drop_last=True if len(self.train_dataset) > self.bs else False)

        #create smpl
        self.smpl = TorchSMPL4Garment(gender=self.gender).to(device)
        self.smpl_faces_np = self.smpl.faces
        self.smpl_faces = torch.tensor(self.smpl_faces_np.astype('float32'),
                                       dtype=torch.long).cuda()

        #interpenetraion loss term
        self.body_f_np = self.smpl.faces
        self.garment_f_np = Mesh(filename=os.path.join(
            DATA_DIR, 'real_{}_{}_{}.obj'.format(
                self.garment_class, self.res_name, self.garment_layer))).f

        self.garment_f_torch = torch.tensor(self.garment_f_np.astype(
            np.long)).long().to(device)
        # models and optimizer
        latent_dim = 50
        self.model = getattr(network_layers,
                             self.model_name)(input_size=input_dim,
                                              latent_size=latent_dim,
                                              output_size=output_dim)

        self.model.to(device)

        print("loading {}".format(ckpt))
        state_dict = torch.load(ckpt)
        self.model.load_state_dict(state_dict)
        self.model.eval()
Exemple #5
0
    def __init__(self, train_dataset, val_dataset, opt):
        self.device = opt['train']['device']

        ### garment data from experiment params
        self.garment_class = opt['experiment']['garment_class']
        self.garment_layer = opt['experiment']['garment_layer']
        self.res = opt['experiment']['resolution']
        self.gender = opt['experiment']['gender']
        self.feat = opt['experiment']['feat']
        self.num_neigh = opt['experiment']['num_neigh']

        ##create smpl layer from TailorNet, you can use any SMPL pytorch implementation(TailorNet has hres SMPL also)
        self.smpl = TorchSMPL4Garment(gender=self.gender).to(self.device)

        # load training parameters etc
        self.layer_size, _ = get_res_vert(self.garment_class, self.res,
                                          self.garment_layer)

        input_dim = self.layer_size * 3
        if self.feat == 'vn':
            input_dim = self.layer_size * 6
        output_dim = self.layer_size * 3

        mesh = load_objs_as_meshes([
            os.path.join(
                opt['data']['meta_data'], "{}/{}_{}.obj".format(
                    self.garment_class, self.garment_layer, self.res))
        ],
                                   device=self.device)
        mesh_verts, mesh_faces = mesh.get_mesh_verts_faces(0)
        self.garment_f_torch = mesh_faces

        # geo_weights = np.load(os.path.join(DATA_DIR, 'real_g5_geo_weights.npy'))  todo: do we need this???
        self.d_tol = 0.002

        # create exp name based on experiment params
        self.loss_weight = {
            'wgt': opt['train']['wgt_wgt'],
            'data': opt['train']['data_wgt'],
            'spr_wgt': opt['train']['spr_wgt']
        }

        self.exp_name = '{}_{}_{}_{}_{}_{}_{}'.format(
            self.loss_weight['wgt'], self.loss_weight['data'],
            self.loss_weight['spr_wgt'], self.garment_layer,
            self.garment_class, self.feat, self.num_neigh)
        self.exp_path = '{}/{}/'.format(opt['experiment']['root_dir'],
                                        self.exp_name)
        self.checkpoint_path = self.exp_path + 'checkpoints/'.format(
            self.exp_name)
        if not os.path.exists(self.checkpoint_path):
            print(self.checkpoint_path)
            os.makedirs(self.checkpoint_path)
        self.writer = SummaryWriter(self.exp_path +
                                    'summary'.format(self.exp_name))

        self.val_min = None
        self.train_min = None
        self.loss = opt['train']['loss_type']
        self.n_part = opt['experiment']['num_part']
        self.loss_mse = torch.nn.MSELoss()
        self.batch_size = opt['train']['batch_size']
        # weight initialiser

        ## train and val dataset
        self.train_dataset = train_dataset
        self.val_dataset = val_dataset

        ### load model and optimizer
        self.model = getattr(net_modules, opt['model']['name'])
        latent_dim = 100
        self.model = self.model(opt['model'], input_dim, latent_dim,
                                output_dim).to(self.device)
        self.optimizer = getattr(optim, opt['train']['optimizer'])
        self.optimizer = self.optimizer(self.model.parameters(),
                                        opt['train']['optimizer_param'])

        if self.loss == 'l1':
            self.loss_l1 = torch.nn.L1Loss()
        elif self.loss == 'l2':
            self.loss_l1 = torch.nn.MSELoss()
Exemple #6
0
    def __init__(self, ckpt, params):
        self.device = device
        self.params = params
        self.gender = params['gender']
        self.garment_class = params['garment_class']
        self.bs = params['batch_size']
        self.garment_layer = params['garment_layer']
        self.res_name = params['res']
        self.num_neigh = params['num_neigh']
        self.feat = params['feat']
        self.hres = True
        if self.res_name == 'lres':
            self.hres = False
        self.model_name = 'FC_correspondence_{}'.format(self.res_name)
        self.layer_size, self.smpl_size = get_res_vert(params['garment_class'],
                                                       self.hres,
                                                       params['garment_layer'])

        layer_neigh = np.array(
            np.load(
                os.path.join(
                    DATA_DIR,
                    "real_{}_neighborheuristics_{}_{}_{}_gar_order2.npy".
                    format(self.garment_class, self.res_name,
                           self.garment_layer, self.num_neigh))))

        all_neighbors = np.array([[vid] for k in layer_neigh for vid in k])
        self.neigh_id2 = all_neighbors
        if self.garment_layer == 'Body':
            self.idx2 = torch.from_numpy(self.neigh_id2).view(
                len(self.body_vert), self.num_neigh).cuda()
        else:
            self.idx2 = torch.from_numpy(self.neigh_id2).view(
                self.layer_size, self.num_neigh).cuda()

        self.test_dataset = ParserData(garment_class=self.garment_class,
                                       garment_layer=self.garment_layer,
                                       mode='test',
                                       batch_size=self.bs,
                                       res=self.res_name,
                                       gender=self.gender,
                                       feat=self.feat)
        self.test_loader = DataLoader(self.test_dataset,
                                      batch_size=self.bs,
                                      num_workers=12,
                                      shuffle=True,
                                      drop_last=False)
        # #create smpl
        self.smpl = TorchSMPL4Garment(gender=self.gender).to(device)
        self.smpl_faces_np = self.smpl.faces
        self.smpl_faces = torch.tensor(self.smpl_faces_np.astype('float32'),
                                       dtype=torch.long).cuda()

        if self.garment_layer == 'Body':
            self.garment_f_np = self.body_f_np
            self.garment_f_torch = self.smpl_faces
        else:
            self.garment_f_np = Mesh(filename=os.path.join(
                DATA_DIR, 'real_{}_{}_{}.obj'.format(
                    self.garment_class, self.res_name, self.garment_layer))).f
            self.garment_f_torch = torch.tensor(
                self.garment_f_np.astype(np.long)).long().to(device)

        self.out_layer = torch.nn.Softmax(dim=2)
        input_dim = self.smpl_size * 3
        if self.feat == 'vn':
            input_dim = self.smpl_size * 6
        output_dim = self.layer_size * self.num_neigh

        self.model = getattr(network_layers,
                             self.model_name)(input_size=input_dim,
                                              output_size=output_dim)
        self.model.to(self.device)
        print("loading {}".format(ckpt))
        state_dict = torch.load(ckpt)
        self.model.load_state_dict(state_dict)
        self.model.eval()
Exemple #7
0
    def __init__(self, params):
        self.device = device
        self.params = params
        self.gender = params['gender']
        self.garment_class = params['garment_class']
        self.bs = params['batch_size']
        self.garment_layer = params['garment_layer']
        self.res_name = params['res']
        self.num_neigh = params['num_neigh']
        self.feat = params['feat']
        self.hres = True
        if self.res_name == 'lres':
            self.hres = False
        # log
        LOG_DIR = params['log_dir']

        self.model_name = 'FC_correspondence_{}'.format(self.res_name)
        self.note = "FC_corr_{}_{}_{}".format(self.garment_class,
                                              self.garment_layer,
                                              self.res_name)
        log_name = os.path.join(
            self.garment_class,
            '{}_{}_{}_{}'.format(self.garment_layer, self.feat, self.num_neigh,
                                 self.res_name))

        self.log_dir = os.path.join(LOG_DIR, log_name)
        if not os.path.exists(self.log_dir):
            print('making %s' % self.log_dir)
            os.makedirs(self.log_dir)

        with open(os.path.join(self.log_dir, "params.json"), 'w') as f:
            json.dump(params, f)

        self.iter_nums = 0 if 'iter_nums' not in params else params['iter_nums']

        #load smpl and garment data

        self.layer_size, self.smpl_size = get_res_vert(params['garment_class'],
                                                       self.hres,
                                                       params['garment_layer'])
        if self.garment_layer == 'Body':
            self.layer_size = 4448
        # get active vert id
        input_dim = self.smpl_size * 3
        if self.feat == 'vn':
            input_dim = self.smpl_size * 6
        output_dim = self.layer_size * self.num_neigh

        layer_neigh = np.array(
            np.load(
                os.path.join(
                    DATA_DIR,
                    "real_{}_neighborheuristics_{}_{}_{}_gar_order2.npy".
                    format(self.garment_class, self.res_name,
                           self.garment_layer, self.num_neigh))))
        self.layer_neigh = torch.from_numpy(layer_neigh).cuda()

        #separate for body layer
        body_vert = range(self.smpl_size)
        vert_id_upper = get_vid('UpperClothes', self.garment_class, self.hres)
        vert_id_lower = get_vid('Pants', self.garment_class, self.hres)
        body_vert2 = [i for i in body_vert if i not in vert_id_upper]
        body_vert2 = [i for i in body_vert2 if i not in vert_id_lower]
        self.body_vert = body_vert2

        all_neighbors = np.array([[vid] for k in layer_neigh for vid in k])
        self.neigh_id2 = all_neighbors
        if self.garment_layer == 'Body':
            self.idx2 = torch.from_numpy(self.neigh_id2).view(
                len(self.body_vert), self.num_neigh).cuda()
        else:
            self.idx2 = torch.from_numpy(self.neigh_id2).view(
                self.layer_size, self.num_neigh).cuda()

        #get vert indixed of layer
        self.vert_indices = get_vid(self.garment_layer, self.garment_class,
                                    self.hres)
        self.vert_indices = torch.tensor(self.vert_indices.astype(
            np.long)).long().cuda()

        # dataset and dataloader
        self.train_dataset = ParserData(garment_class=self.garment_class,
                                        garment_layer=self.garment_layer,
                                        mode='train',
                                        batch_size=self.bs,
                                        res=self.res_name,
                                        gender=self.gender,
                                        feat=self.feat)
        self.train_loader = DataLoader(
            self.train_dataset,
            batch_size=self.bs,
            num_workers=12,
            shuffle=True,
            drop_last=True if len(self.train_dataset) > self.bs else False)

        self.val_dataset = ParserData(garment_class=self.garment_class,
                                      garment_layer=self.garment_layer,
                                      mode='val',
                                      batch_size=self.bs,
                                      res=self.res_name,
                                      gender=self.gender,
                                      feat=self.feat)
        self.val_loader = DataLoader(self.val_dataset,
                                     batch_size=self.bs,
                                     num_workers=12,
                                     shuffle=True,
                                     drop_last=False)

        #create smpl
        self.smpl = TorchSMPL4Garment(gender=self.gender).to(device)
        self.smpl_faces_np = self.smpl.faces
        self.smpl_faces = torch.tensor(self.smpl_faces_np.astype('float32'),
                                       dtype=torch.long).cuda()

        if self.garment_layer == 'Body':
            self.garment_f_np = self.body_f_np
            self.garment_f_torch = self.smpl_faces
        else:
            self.garment_f_np = Mesh(filename=os.path.join(
                DATA_DIR, 'real_{}_{}_{}.obj'.format(
                    self.garment_class, self.res_name, self.garment_layer))).f
            self.garment_f_torch = torch.tensor(
                self.garment_f_np.astype(np.long)).long().to(device)

        self.num_faces = len(self.garment_f_np)

        self.model = getattr(network_layers,
                             self.model_name)(input_size=input_dim,
                                              output_size=output_dim)
        self.model.to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=params['lr'],
                                          weight_decay=1e-6)
        self.out_layer = torch.nn.Softmax(dim=2)
        if params['checkpoint']:
            ckpt_path = params['checkpoint']
            print('loading ckpt from {}'.format(ckpt_path))
            state_dict = torch.load(os.path.join(ckpt_path, 'lin.pth.tar'))
            self.model.load_state_dict(state_dict)
            state_dict = torch.load(
                os.path.join(ckpt_path, 'optimizer.pth.tar'))
            self.optimizer.load_state_dict(state_dict)

        geo_weights = np.load(os.path.join(DATA_DIR,
                                           'real_g5_geo_weights.npy'))
        self.geo_weights = torch.tensor(geo_weights[body_vert2].astype(
            np.float32)).cuda()
        self.best_error = np.inf
        self.best_epoch = -1
        self.logger = tensorboardX.SummaryWriter(os.path.join(self.log_dir))
        self.val_min = None
        self.d_tol = 0.002

        self.sideddistance = SidedDistance()
        self.relu = nn.ReLU()
        #weight initialiser
        vert_id = self.vert_indices.cpu().numpy()
        init_weights = torch.from_numpy(
            np.array([
                layer_neigh[i] == vert_id[i] for i in range(self.layer_size)
            ]).astype('int64'))
        self.init_weight = torch.stack([init_weights
                                        for _ in range(self.bs)]).cuda()