Exemplo n.º 1
0
class TrainerP0(object):
    def __init__(self, batch_size, dataloader, model, build_id):
        self.batch_size = batch_size
        self.dataloader = dataloader
        self.model = model

        self.save_path = f'results/{build_id}.pt'

        self.writer = SummaryWriter(f'results/{build_id}')
        input_example = next(iter(dataloader))['uv']

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)

        self.writer.add_graph(self.model, input_example)
        self.writer.close()

        self.criterion = nn.MSELoss()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=0.0001,
                                   momentum=0.9)

        self.load_state()

        ncomps = 45
        self.mano_layer = ManoLayer(mano_root='mano/models',
                                    use_pca=False,
                                    ncomps=ncomps,
                                    flat_hand_mean=False)

        self.mano_layer.to(self.device)
        self.mano_layer.eval()

    def train(self, epochs, save_rate):
        self.running_loss = 0.0

        for epoch in range(epochs):
            for i, sample in enumerate(self.dataloader, 0):
                uv = sample['uv']
                outputs_gt = sample['mano']

                uv = uv.to(self.device)
                outputs_gt = outputs_gt.to(self.device)

                self.optimizer.zero_grad()

                outputs = self.model(uv) * 3.12

                # losses = []
                # losses.append(self.criterion(outputs, outputs_gt))

                loss = self.criterion(outputs, outputs_gt)

                # poses_pred = outputs[:, :48]  # .unsqueeze(0)
                # shapes_pred = outputs[:, 48:]  # .unsqueeze(0)
                # hand_verts_p, hand_joints_p = self.mano_layer(poses_pred, shapes_pred)
                #
                # poses_gt = outputs_gt[:, :48]
                # shapes_gt = outputs_gt[:, 48:]
                # hand_verts_gt, hand_joints_gt = self.mano_layer(poses_gt, shapes_gt)

                # losses.append(self.criterion(hand_verts_p, hand_verts_gt))
                # losses.append(self.criterion(hand_joints_p, hand_joints_gt))

                # loss = sum(losses)

                loss.backward()
                self.optimizer.step()

                self.running_loss += loss.item()
                self.g_step += 1

            self.running_loss /= 128
            self.save_state()
            self.writer.add_scalar('training loss', self.running_loss / 128,
                                   self.g_step)
            print(self.running_loss / 128, self.g_step)
            self.running_loss = 0.0

    def save_state(self):
        torch.save(
            {
                'g_step': self.g_step,
                'model_state_dict': self.model.state_dict(),
                'optimizer_state_dict': self.optimizer.state_dict(),
                'running_loss': self.running_loss / 128,
            }, self.save_path)

        print(
            f'Model saved at step {self.g_step} with running loss {self.running_loss / 128}.'
        )

    def load_state(self):
        if os.path.exists(self.save_path):
            checkpoint = torch.load(self.save_path)
            self.g_step = checkpoint['g_step'] + 1
            self.running_loss = checkpoint['running_loss']

            self.model.load_state_dict(checkpoint['model_state_dict'])
            self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            print(
                f'Model loaded. g_step: {self.g_step}; running_loss: {self.running_loss}'
            )
        else:
            print(
                f'File "{self.save_path}" does not exist. Initializing parameters from scratch.'
            )
            self.g_step = 0
            self.running_loss = 0.0
Exemplo n.º 2
0
def main() :
       mano_layer = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=6, flat_hand_mean=False, side='right')
       gpu = True
       num_flows = 2
       flow_length = 10

       pd = np.load('normalized_bh_inMANOorder.npy') ## Need to Download it
       model_path = 'BH2MANO_model/model_BH2MANO.pth'

       bg_path = 'BG_data/'
       bg_files = os.listdir(os.path.join(os.getcwd(), bg_path))

       data_dir = 'results/'

       enc_layer = Encoder_BH2MANO()
       enc_layer.load_state_dict(torch.load(model_path))
       if gpu:
              enc_layer.cuda()
              mano_layer.cuda()

       mano_layer.eval()
       enc_layer.eval()

       colors = get_hand_colors()

       gt = {}
       gt['joints_2d'] = []
       gt['cam_params'] = []
       gt['joints_3d'] = []
       gt['verts_3d'] = []
       for frame_idx in tqdm(range(num_flows)):

              pose_flow = gen_3d_pose_flow(pd.reshape((pd.shape[0], pd.shape[1] * pd.shape[2])), flow_length=flow_length)

              samples = pose_flow
              input_pose_joints_ = torch.tensor(samples).float().cuda()
              input_pose_joints = input_pose_joints_.view(input_pose_joints_.size(0), -1)
              pose_param,shape_param = enc_layer(input_pose_joints)
              pose_param[:, :3] *= 0.
              pose_param[:, 0] += np.pi
              shape_param = torch.rand(1, 10).expand(pose_param.size(0), -1).cuda().float() * 4. - 2.

              hand_verts, hand_joints = mano_layer(pose_param.clone(), shape_param.clone())
              hand_verts = hand_verts.cpu().detach().numpy() / 1000.
              hand_joints = hand_joints.cpu().detach().numpy() / 1000.

              f = mano_layer.th_faces.cpu().detach().numpy()

              color = colors[random.randint(0, 26)]
              size = 224
              w, h = size, size

              # flow_length = pose_flow.shape[0]
              assert flow_length == pose_flow.shape[0]

              ss, tu, tv, rot = get_ss_tu_tv(hand_verts[0], hand_joints[0], w, h)

              ss_end, tu_end, tv_end, rot_end = get_ss_tu_tv(hand_verts[-1], hand_joints[-1], w, h)
              rot_var_speed = random.uniform(0, 0.6) # random rotation speed

              ## Get Background
              while True :
                     bg_orig = imageio.imread(os.path.join(bg_path, random.choice(bg_files)))
                     if (bg_orig.shape[0] > size) and (bg_orig.shape[1] > size) : # get background that is large enough
                            break 
              bg_cent_x, bg_cent_y = get_bg(bg_orig.shape, size)
              bg_cent_end_x, bg_cent_end_y = get_bg(bg_orig.shape, size)
              bg = bg_orig[bg_cent_x - int(size / 2): bg_cent_x + int(size / 2),
                   bg_cent_y - int(size / 2): bg_cent_y + int(size / 2), :]

              ## Collect GTs
              images = []
              masks = []
              joints_2d = np.zeros((flow_length, 42))
              cam_params = np.zeros((flow_length, 27))
              joints_3d  = np.zeros((flow_length, 21, 3))
              verts_3d = np.zeros((flow_length, 778, 3))
              for i in range(flow_length):

                     img, mask, vert_3d, joint_3d, vert, joint = create_synth(hand_verts[i], hand_joints[i], color, f, ss, tu, tv, rot, w, h, bg)
                     images.append(img)
                     masks.append(mask * 255)
                     cam_params[i, :] = np.concatenate([np.array([1., ss, tu, tv]), rot,
                                                        pose_param[i, 3:].detach().cpu().numpy(),
                                                        shape_param[i].detach().cpu().numpy()], 0)
                     joints_2d[i, :] = joint[:,:2].reshape((42))
                     joints_3d[i, :, :] = joint_3d
                     verts_3d[i, :, :] = vert_3d

                     ss = ss + (ss_end - ss) / flow_length * 0.5
                     tu = tu + (tu_end - tu) / flow_length * 0.2
                     tv = tv + (tv_end - tv) / flow_length * 0.2
                     rot = rot + (rot_end - rot) / flow_length * rot_var_speed
                     bg_cent_x = int(bg_cent_x + (bg_cent_end_x - bg_cent_x) / flow_length)
                     bg_cent_y = int(bg_cent_y + (bg_cent_end_y - bg_cent_y) / flow_length)
                     bg = bg_orig[bg_cent_x - int(size / 2):bg_cent_x + int(size / 2),
                          bg_cent_y - int(size / 2):bg_cent_y + int(size / 2), :]

              gt['joints_2d'].append(joints_2d)
              gt['joints_3d'].append(joints_3d)
              gt['verts_3d'].append(verts_3d)
              gt['cam_params'].append(cam_params)
              imageio.mimsave(data_dir + 'gifs/%s.gif' % (frame_idx), images)
              imageio.mimsave(data_dir + 'masks/synth_%s_mask.gif' % (frame_idx), masks)
       with open(data_dir + 'ground_truths.pickle', 'wb') as fo:
              pickle.dump(gt, fo, protocol=pickle.HIGHEST_PROTOCOL)