Esempio n. 1
0
    def _convert_input_data(self, data_tuple):
        # Unfolding
        x, nan_masks, tasks, tasks_mask, _, _, towards, _, _, _ = data_tuple

        # Convert numpy to torch.tensor
        tasks = torch.from_numpy(tasks).long().to(self.device)
        tasks_mask = torch.from_numpy(tasks_mask * 1 + 1e-5).float().to(
            self.device)
        nan_masks = torch.from_numpy(nan_masks * 1 + 1e-5).float().to(
            self.device)
        x, towards = numpy2tensor(
            self.device, x,
            expand1darr(towards.astype(np.int64), 3, self.seq_dim))

        # Construct tuple
        input_data = (x, towards)
        input_info = (x, nan_masks, tasks, tasks_mask)
        return input_data, input_info
Esempio n. 2
0
    def _convert_input_data(self, data_tuple):
        # Unfolding
        x, nan_masks, fut_np, fut_mask_np, fut_avail_mask_np, tasks_np, tasks_mask_np, phenos_np, phenos_mask_np, towards, _, _, idpatients_np = data_tuple

        # Convert numpy to torch.tensor
        tasks = torch.from_numpy(tasks_np).long().to(self.device)
        tasks_mask = torch.from_numpy(tasks_mask_np * 1 + 1e-5).float().to(self.device)
        nan_masks = torch.from_numpy(nan_masks * 1 + 1e-5).float().to(self.device)
        fut_mask = torch.from_numpy(fut_mask_np * 1 + 1e-5).float().to(self.device)
        fut_avail_mask = torch.from_numpy(fut_avail_mask_np.astype(int)).to(self.device)
        x, fut, towards = numpy2tensor(self.device,
                                x,
                                fut_np,
                                expand1darr(towards.astype(np.int64), 3, self.seq_dim)
                                )

        # Construct tuple
        input_data = (x, towards, fut_np, fut_mask_np, tasks_np, tasks_mask_np, idpatients_np, phenos_np, phenos_mask_np)
        input_info = (x, nan_masks, fut, fut_mask, fut_avail_mask, tasks, tasks_mask)
        return input_data, input_info
Esempio n. 3
0
    def forward_decode_only(self, motion_info, towards, num_var_dim, num_datapoints, num_kld):
        motion_z, motion_mu, motion_logvar = motion_info
        towards = torch.from_numpy(expand1darr(towards.astype(np.int64), 3, self.seq_dim)).float().to(self.device)

        kld = torch.mean(-0.5 * (1 + motion_logvar - motion_mu.pow(2) - motion_logvar.exp()), dim=0)
        _, sorted_ind = torch.topk(kld, num_kld)

        sorted_ind = [42, 90, 12, 70, 108]

        recon_motion = torch.zeros(num_kld, num_var_dim*num_datapoints, self.fea_dim, motion_z.shape[1])

        for idx, val in enumerate(sorted_ind):
            motion_z_one = motion_z[:,val].view(motion_z.shape[0])
            # Get 90th percentile of motion_z_one
            motion_cpu_copy = motion_z_one.cpu()
            max_kld = np.percentile(motion_cpu_copy.numpy(), 90)
            # Get 10th percentile of motion_z_one
            min_kld = np.percentile(motion_cpu_copy.numpy(), 10)
            motion_z_one.cuda()
            recon_motion[idx,:,:,:] = self.model.decode_only(motion_z, towards, sorted_ind, min_kld, max_kld, num_var_dim, num_datapoints)

        return recon_motion