Example #1
0
class CryptoPatcher:
    def __init__(self, exec_path):
        self.path = exec_path
        self.patcher = Patcher(self.path)

    def test_inject_patch(self, binary):
        # First get addresses of code and data by calling dummy patcher
        dummy = Patcher(self.path)
        with dummy.bin.collect() as patchset:
            addr = patchset.inject(raw=binary)
        return addr

    def apply_patch(self, patch_desc, old_entry, entry_name):
        # First get addresses of code and data by calling dummy patcher
        dummy = Patcher(self.path)
        dummy_addr1 = hex(0x800000)
        dummy_addr2 = hex(0x900000)
        dummy_addr3 = hex(0xa00000)
        self._run_script(patch_desc.patch_dir, patch_desc.script_name, dummy_addr1, dummy_addr2, dummy_addr3, patch_desc.data_name, patch_desc.code_name, entry_name)
        with dummy.bin.collect() as patchset:
            data_addr = self._inject_exec(patchset, patch_desc.patch_dir, patch_desc.data_name)
            code_addr = self._inject_exec(patchset, patch_desc.patch_dir, patch_desc.code_name)
            entry_addr = self._inject_exec(patchset, patch_desc.patch_dir, entry_name)

        # Now inject the real patch with correct addresses
        self._run_script(patch_desc.patch_dir, patch_desc.script_name, hex(data_addr), hex(code_addr), hex(entry_addr), patch_desc.data_name, patch_desc.code_name, entry_name)
        with self.patcher.bin.collect() as patchset:
            data_addr = self._inject_exec(patchset, patch_desc.patch_dir, patch_desc.data_name)
            code_addr = self._inject_exec(patchset, patch_desc.patch_dir, patch_desc.code_name)
            entry_addr = self._inject_exec(patchset, patch_desc.patch_dir, entry_name)
            patchset.patch(old_entry, jmp=entry_addr)

    def _run_script(self, script_dir, script_name, *script_args):
        pwd = os.getcwd()
        os.chdir(script_dir)
        script = [os.path.join(os.getcwd(), script_name)] + list(script_args)
        Log.debug('Script input: ' + str(list(script_args)))
        popen = subprocess.Popen(script, stdout=subprocess.PIPE)
        popen.wait() 
        Log.debug('Script output: ' + popen.stdout.read())
        os.chdir(pwd)

    def _inject_exec(self, pt, base_dir, file_name):
        with open(os.path.join(base_dir, file_name), "rb") as f:
            binary = f.read()
            addr = pt.inject(raw=binary)
        return addr

    def save(self, path):
        self.patcher.save(path)
Example #2
0
 def writeindexhtm(self):
     text = Scraper.getIndexhtm()
     if not os.path.isfile(self.path() + 'index.htm'):
         with open(self.path() + 'index.htm', 'w') as f:
             f.write(Patcher.patchindexhtm(text))
     else:
         print 'index.htm already saved'
Example #3
0
    def apply_patch(self, patch_desc, old_entry, entry_name):
        # First get addresses of code and data by calling dummy patcher
        dummy = Patcher(self.path)
        dummy_addr1 = hex(0x800000)
        dummy_addr2 = hex(0x900000)
        dummy_addr3 = hex(0xa00000)
        self._run_script(patch_desc.patch_dir, patch_desc.script_name,
                         dummy_addr1, dummy_addr2, dummy_addr3,
                         patch_desc.data_name, patch_desc.code_name,
                         entry_name)
        with dummy.bin.collect() as patchset:
            data_addr = self._inject_exec(patchset, patch_desc.patch_dir,
                                          patch_desc.data_name)
            code_addr = self._inject_exec(patchset, patch_desc.patch_dir,
                                          patch_desc.code_name)
            entry_addr = self._inject_exec(patchset, patch_desc.patch_dir,
                                           entry_name)

        # Now inject the real patch with correct addresses
        self._run_script(patch_desc.patch_dir, patch_desc.script_name,
                         hex(data_addr), hex(code_addr), hex(entry_addr),
                         patch_desc.data_name, patch_desc.code_name,
                         entry_name)
        with self.patcher.bin.collect() as patchset:
            data_addr = self._inject_exec(patchset, patch_desc.patch_dir,
                                          patch_desc.data_name)
            code_addr = self._inject_exec(patchset, patch_desc.patch_dir,
                                          patch_desc.code_name)
            entry_addr = self._inject_exec(patchset, patch_desc.patch_dir,
                                           entry_name)
            patchset.patch(old_entry, jmp=entry_addr)
def do_format(args):
    global patcher
    global doc_repo

    if not doc_repo:
        doc_repo = DocRepo()
    doc_repo.setup(args)
    if not patcher:
        patcher = Patcher(doc_repo.git_repo_path)

    modpath = os.path.dirname(__file__)
    output = os.path.join(modpath, '..', 'static', 'html')
    doc_repo.output = output
    doc_repo.format()
Example #5
0
 def test_inject_patch(self, binary):
     # First get addresses of code and data by calling dummy patcher
     dummy = Patcher(self.path)
     with dummy.bin.collect() as patchset:
         addr = patchset.inject(raw=binary)
     return addr
Example #6
0
 def __init__(self, exec_path):
     self.path = exec_path
     self.patcher = Patcher(self.path)
Example #7
0
        tmpfd, tmpname = tempfile.mkstemp(dir=os.path.dirname(jar))
        os.close(tmpfd)

        with zipfile.ZipFile(jar, 'r') as z_in, zipfile.ZipFile(tmpname,
                                                                'w') as z_out:
            readFunc = functools.partial(readArchive, z_in)

            targets_dsm = disassembleSub(readFunc,
                                         out_dsm,
                                         targets=targets,
                                         roundtrip=True)

            for i, cls in enumerate(patches):
                with open('dsm_classes/' + str(cls['class']) + '.j',
                          'r+') as file:
                    patcher = Patcher(cls, file)
                    patch_res = patcher.patch()

                    patch_id = str(cls['id'])

                    if 'depends' in cls:
                        patch_id += '/d' + str(cls['depends'])

                    patches_finished.append([patch_id, patch_res])

                    file.close()

                    gauage += iter_cnt
                    d.gauge_update(percent=int(gauage))

            for item in z_in.infolist():
    def save_segmentation_examples(self, nr_cubes=3, inference_full_image=True):

        # deal with recursion when defaulting to patchign

        if "lidc" in self.dataset_name:
            return

        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        dump_tensors()

        if hasattr(self.trainer, "model"):
            del self.trainer.model
            del self.trainer
            sleep(15)
            self.trainer = Trainer(config=self.config, dataset=None)
            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()
            sleep(5)
        if inference_full_image is False:
            print("PATCHING Will be Done")

        dump_tensors()
        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        dump_tensors()

        self.trainer.load_model(from_path=True, path=self.model_path, phase="sup", ensure_sup_is_completed=True)

        cubes_to_use = []
        cubes_to_use.extend(self.sample_k_full_cubes_which_were_used_for_testing(nr_cubes))
        cubes_to_use.extend(self.sample_k_full_cubes_which_were_used_for_training(nr_cubes))

        cubes_to_use_path = [os.path.join(self.dataset_dir, i) for i in cubes_to_use]
        label_cubes_of_cubes_to_use_path = [os.path.join(self.dataset_labels_dir, i) for i in cubes_to_use]

        for cube_idx, cube_path in enumerate(cubes_to_use_path):
            np_array = self._load_cube_to_np_array(cube_path)  # (x,y,z)
            self.original_cube_dimensions = np_array.shape
            if sum([i for i in np_array.shape]) > 550 and self.two_dim is False:
                inference_full_image = False

            if self.dataset_name.lower() in ("task04_sup", "task01_sup", "cellari_heart_sup_10_192", "cellari_heart_sup"):
                if self.tried is False:
                    inference_full_image = True
                else:
                    inference_full_image = False

            if inference_full_image is False:
                print("CUBE TOO BIG, PATCHING")
                patcher = Patcher(np_array, two_dim=self.two_dim)

                with torch.no_grad():
                    self.trainer.model.eval()
                    for idx, patch in patcher:

                        patch = torch.unsqueeze(patch, 0)  # (1,C,H,W or 1) -> (1,1,C,H,W or 1)
                        if self.config.model.lower() in (
                            "vnet_mg",
                            "unet_3d",
                            "unet_acs",
                            "unet_acs_axis_aware_decoder",
                            "unet_acs_with_cls",
                        ):
                            patch, pad_tuple = pad_if_necessary_one_array(patch, return_pad_tuple=True)

                        pred = self.trainer.model(patch)
                        assert pred.shape == patch.shape, "{} vs {}".format(pred.shape, patch.shape)
                        # need to then unpad to reconstruct
                        if self.two_dim is True:
                            raise RuntimeError("SHOULD  NOT BE USED HERE")

                        pred = self._unpad_3d_array(pred, pad_tuple)
                        pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H,W) -> (1,C,H,W)
                        pred_mask = pred  # self._make_pred_mask_from_pred(pred)
                        del pred

                        patcher.predicitons_to_reconstruct_from[
                            :, idx
                        ] = pred_mask  # update array in patcher that will construct full cube predicted mask

                        dump_tensors()
                        torch.cuda.ipc_collect()
                        torch.cuda.empty_cache()
                        dump_tensors()

                pred_mask_full_cube = patcher.get_pred_mask_full_cube()
                # segmentations.append(patcher.get_pred_mask_full_cube())
            else:

                full_cube_tensor = torch.Tensor(np_array)
                full_cube_tensor = torch.unsqueeze(full_cube_tensor, 0)  # (C,H,W) -> (1,C,H,W)
                full_cube_tensor = torch.unsqueeze(full_cube_tensor, 0)  # (1,C,H,W) -> (1,1,C,H,W)

                with torch.no_grad():
                    self.trainer.model.eval()
                    if self.two_dim is False:
                        if self.config.model.lower() in (
                            "vnet_mg",
                            "unet_3d",
                            "unet_acs",
                            "unet_acs_axis_aware_decoder",
                            "unet_acs_with_cls",
                        ):
                            full_cube_tensor, pad_tuple = pad_if_necessary_one_array(full_cube_tensor, return_pad_tuple=True)
                            try:
                                p = self.trainer.model(full_cube_tensor)
                                p.to("cpu")
                                pred = p
                                del p
                                dump_tensors()
                                torch.cuda.ipc_collect()
                                torch.cuda.empty_cache()
                                dump_tensors()
                                torch.cuda.empty_cache()
                                pred = self._unpad_3d_array(pred, pad_tuple)
                                pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H,W) -> (1,C,H,W)
                                pred = torch.squeeze(pred, dim=0)
                                pred_mask_full_cube = pred  # self._make_pred_mask_from_pred(pred)
                                torch.cuda.ipc_collect()
                                torch.cuda.empty_cache()
                                del pred

                            except RuntimeError as e:
                                if "out of memory" in str(e) or "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED" in str(e):
                                    print("TOO BIG FOR MEMORY, DEFAULTING TO PATCHING")
                                    # exit(0)
                                    dump_tensors()
                                    torch.cuda.ipc_collect()
                                    torch.cuda.empty_cache()
                                    dump_tensors()
                                    self.tried = True
                                    self.save_segmentation_examples(inference_full_image=False)
                                    return

                            # segmentations.append(pred_mask_full_cube)
                    else:
                        pred_mask_full_cube = torch.zeros(self.original_cube_dimensions)
                        for z_idx in range(full_cube_tensor.size()[-1]):
                            tensor_slice = full_cube_tensor[..., z_idx]  # SLICE : (1,1,C,H,W) -> (1,1,C,H)
                            assert tensor_slice.shape == (1, 1, self.original_cube_dimensions[0], self.original_cube_dimensions[1])
                            pred = self.trainer.model(tensor_slice)
                            pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H) -> (1,C,H)
                            pred = torch.squeeze(pred, dim=0)  # (1,C,H) -> (C,H)
                            pred_mask_slice = pred  # self._make_pred_mask_from_pred(pred)
                            pred_mask_full_cube[..., z_idx] = pred_mask_slice

                        # segmentations.append(pred_mask_full_cube)

            # for idx, pred_mask_full_cube in enumerate(segmentations):

            print(cube_idx)

            if cube_idx < nr_cubes:
                if inference_full_image is True:
                    save_dir = os.path.join(self.save_dir, self.dataset_name, "testing_examples_full/", cubes_to_use[cube_idx][:-4])
                else:
                    save_dir = os.path.join(
                        self.save_dir, self.dataset_name, "testing_examples_full/", cubes_to_use[cube_idx][:-4] + "_with_patcher"
                    )
            else:
                if inference_full_image is True:
                    save_dir = os.path.join(self.save_dir, self.dataset_name, "training_examples_full/", cubes_to_use[cube_idx][:-4])
                else:
                    save_dir = os.path.join(
                        self.save_dir, self.dataset_name, "training_examples_full/", cubes_to_use[cube_idx][:-4] + "_with_patcher"
                    )

            make_dir(save_dir)

            # save nii of segmentation
            pred_mask_full_cube = pred_mask_full_cube.cpu()  # logits mask
            pred_mask_full_cube_binary = self._make_pred_mask_from_pred(pred_mask_full_cube)  # binary mask

            nifty_img = nibabel.Nifti1Image(np.array(pred_mask_full_cube).astype(np.float32), np.eye(4))
            nibabel.save(nifty_img, os.path.join(save_dir, cubes_to_use[cube_idx][:-4] + "_logits_mask.nii.gz"))

            nifty_img = nibabel.Nifti1Image(np.array(pred_mask_full_cube_binary).astype(np.float32), np.eye(4))
            nibabel.save(nifty_img, os.path.join(save_dir, cubes_to_use[cube_idx][:-4] + "_binary_mask.nii.gz"))

            # save .nii.gz of cube if is npy original full cube file
            if ".npy" in cube_path:
                nifty_img = nibabel.Nifti1Image(np_array.astype(np.float32), np.eye(4))
                nibabel.save(nifty_img, os.path.join(save_dir, cubes_to_use[cube_idx][:-4] + "_cube.nii.gz"))

            # self.save_3d_plot(np.array(pred_mask_full_cube), os.path.join(save_dir, "{}_plt3d.png".format(cubes_to_use[idx])))

            label_tensor_of_cube = torch.Tensor(self._load_cube_to_np_array(label_cubes_of_cubes_to_use_path[cube_idx]))
            label_tensor_of_cube = self.adjust_label_cube_acording_to_dataset(label_tensor_of_cube)
            label_tensor_of_cube_masked = np.array(label_tensor_of_cube)
            label_tensor_of_cube_masked = np.ma.masked_where(
                label_tensor_of_cube_masked < 0.5, label_tensor_of_cube_masked
            )  # it's binary anyway

            pred_mask_full_cube_binary_masked = np.array(pred_mask_full_cube_binary)
            pred_mask_full_cube_binary_masked = np.ma.masked_where(
                pred_mask_full_cube_binary_masked < 0.5, pred_mask_full_cube_binary_masked
            )  # it's binary anyway

            pred_mask_full_cube_logits_masked = np.array(pred_mask_full_cube)
            pred_mask_full_cube_logits_masked = np.ma.masked_where(
                pred_mask_full_cube_logits_masked < 0.3, pred_mask_full_cube_logits_masked
            )  # it's binary anyway

            make_dir(os.path.join(save_dir, "slices/"))

            for z_idx in range(pred_mask_full_cube.shape[-1]):

                # binary
                fig = plt.figure(figsize=(10, 5))
                plt.imshow(np_array[:, :, z_idx], cmap=cm.Greys_r)
                plt.imshow(pred_mask_full_cube_binary_masked[:, :, z_idx], cmap="Accent")
                plt.axis("off")
                fig.savefig(
                    os.path.join(save_dir, "slices/", "slice_{}_binary.jpg".format(z_idx + 1)),
                    bbox_inches="tight",
                    dpi=150,
                )
                plt.close(fig=fig)

                # logits
                fig = plt.figure(figsize=(10, 5))
                plt.imshow(np_array[:, :, z_idx], cmap=cm.Greys_r)
                plt.imshow(pred_mask_full_cube_logits_masked[:, :, z_idx], cmap="Blues", alpha=0.5)
                plt.axis("off")
                fig.savefig(
                    os.path.join(save_dir, "slices/", "slice_{}_logits.jpg".format(z_idx + 1)),
                    bbox_inches="tight",
                    dpi=150,
                )
                plt.close(fig=fig)

                # dist of logits histogram
                distribution_logits = np.array(pred_mask_full_cube[:, :, z_idx].contiguous().view(-1))
                fig = plt.figure(figsize=(10, 5))
                plt.hist(distribution_logits, bins=np.arange(min(distribution_logits), max(distribution_logits) + 0.05, 0.05))
                fig.savefig(
                    os.path.join(save_dir, "slices/", "slice_{}_logits_histogram.jpg".format(z_idx + 1)),
                    bbox_inches="tight",
                    dpi=150,
                )
                plt.close(fig=fig)

                # save ground truth as wel, overlayed on original
                fig = plt.figure(figsize=(10, 5))
                plt.imshow(np_array[:, :, z_idx], cmap=cm.Greys_r)
                plt.imshow(label_tensor_of_cube_masked[:, :, z_idx], cmap="jet")
                plt.axis("off")
                fig.savefig(
                    os.path.join(save_dir, "slices/", "slice_{}_gt.jpg".format(z_idx + 1)),
                    bbox_inches="tight",
                    dpi=150,
                )
                plt.close(fig=fig)

            dice_score_soft = float(DiceLoss.dice_loss(pred_mask_full_cube, label_tensor_of_cube, return_loss=False))
            dice_score_binary = float(DiceLoss.dice_loss(pred_mask_full_cube_binary, label_tensor_of_cube, return_loss=False))
            x_flat = pred_mask_full_cube_binary.contiguous().view(-1)
            y_flat = pred_mask_full_cube_binary.contiguous().view(-1)
            x_flat = x_flat.cpu()
            y_flat = y_flat.cpu()
            jaccard_scr = jaccard_score(y_flat, x_flat)
            metrics = {"dice_logits": dice_score_soft, "dice_binary": dice_score_binary, "jaccard": jaccard_scr}
            # print(dice)
            with open(os.path.join(save_dir, "dice.json"), "w") as f:
                json.dump(metrics, f)

            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()
            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()
            sleep(10)
    def compute_metrics_for_all_cubes(self, inference_full_image=True):

        cubes_to_use = []

        dump_tensors()
        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        dump_tensors()
        torch.cuda.empty_cache()

        if "lidc" in self.dataset_name:
            return

        if hasattr(self.trainer, "model"):
            del self.trainer.model
            del self.trainer
            sleep(20)
            self.trainer = Trainer(config=self.config, dataset=None)
            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()

        dump_tensors()
        torch.cuda.ipc_collect()
        torch.cuda.empty_cache()
        dump_tensors()

        self.trainer.load_model(from_path=True, path=self.model_path, phase="sup", ensure_sup_is_completed=True)

        if inference_full_image is False:
            print("PATCHING Will be Done")

        full_cubes_used_for_testing = self.get_all_cubes_which_were_used_for_testing()
        full_cubes_used_for_training = self.get_all_cubes_which_were_used_for_training()
        cubes_to_use.extend(full_cubes_used_for_testing)
        cubes_to_use.extend(full_cubes_used_for_training)

        cubes_to_use_path = [os.path.join(self.dataset_dir, i) for i in cubes_to_use]
        label_cubes_of_cubes_to_use_path = [os.path.join(self.dataset_labels_dir, i) for i in cubes_to_use]

        metric_dict = dict()

        (
            dice_logits_test,
            dice_logits_train,
            dice_binary_test,
            dice_binary_train,
            jaccard_test,
            jaccard_train,
            hausdorff_test,
            hausdorff_train,
        ) = ([], [], [], [], [], [], [], [])

        for idx, cube_path in enumerate(cubes_to_use_path):
            np_array = self._load_cube_to_np_array(cube_path)  # (x,y,z)
            self.original_cube_dimensions = np_array.shape
            if sum([i for i in np_array.shape]) > 550 and self.two_dim is False:
                inference_full_image = False

            if self.dataset_name.lower() in ("task04_sup", "task01_sup", "cellari_heart_sup_10_192", "cellari_heart_sup"):
                if self.tried is False:
                    inference_full_image = True
                else:
                    inference_full_image = False

            if inference_full_image is False:
                print("CUBE TOO BIG, PATCHING")

                patcher = Patcher(np_array, two_dim=self.two_dim)

                with torch.no_grad():
                    self.trainer.model.eval()
                    for patch_idx, patch in patcher:

                        patch = torch.unsqueeze(patch, 0)  # (1,C,H,W or 1) -> (1,1,C,H,W or 1)
                        if self.config.model.lower() in (
                            "vnet_mg",
                            "unet_3d",
                            "unet_acs",
                            "unet_acs_axis_aware_decoder",
                            "unet_acs_with_cls",
                        ):
                            patch, pad_tuple = pad_if_necessary_one_array(patch, return_pad_tuple=True)

                        pred = self.trainer.model(patch)
                        assert pred.shape == patch.shape, "{} vs {}".format(pred.shape, patch.shape)
                        # need to then unpad to reconstruct
                        if self.two_dim is True:
                            raise RuntimeError("SHOULD  NOT BE USED HERE")

                        pred = self._unpad_3d_array(pred, pad_tuple)
                        pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H,W) -> (1,C,H,W)
                        # pred_mask = self._make_pred_mask_from_pred(pred)
                        patcher.predicitons_to_reconstruct_from[
                            :, patch_idx
                        ] = pred  # update array in patcher that will construct full cube predicted mask
                        del pred
                        dump_tensors()
                        torch.cuda.ipc_collect()
                        torch.cuda.empty_cache()
                        dump_tensors()

                pred_mask_full_cube = patcher.get_pred_mask_full_cube()

            else:

                full_cube_tensor = torch.Tensor(np_array)
                full_cube_tensor = torch.unsqueeze(full_cube_tensor, 0)  # (C,H,W) -> (1,C,H,W)
                full_cube_tensor = torch.unsqueeze(full_cube_tensor, 0)  # (1,C,H,W) -> (1,1,C,H,W)

                with torch.no_grad():
                    self.trainer.model.eval()
                    if self.two_dim is False:
                        if self.config.model.lower() in (
                            "vnet_mg",
                            "unet_3d",
                            "unet_acs",
                            "unet_acs_axis_aware_decoder",
                            "unet_acs_with_cls",
                        ):
                            full_cube_tensor, pad_tuple = pad_if_necessary_one_array(full_cube_tensor, return_pad_tuple=True)
                            try:
                                p = self.trainer.model(full_cube_tensor)
                                p.to("cpu")
                                pred = p
                                del p
                                dump_tensors()
                                torch.cuda.ipc_collect()
                                torch.cuda.empty_cache()
                                dump_tensors()
                                torch.cuda.empty_cache()
                                pred = self._unpad_3d_array(pred, pad_tuple)
                                pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H,W) -> (1,C,H,W)
                                pred = torch.squeeze(pred, dim=0)
                                pred_mask_full_cube = pred  # self._make_pred_mask_from_pred(pred)
                                torch.cuda.ipc_collect()
                                torch.cuda.empty_cache()
                                del pred

                            except RuntimeError as e:
                                if "out of memory" in str(e) or "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED" in str(e):
                                    print("TOO BIG FOR MEMORY, DEFAULTING TO PATCHING")
                                    # exit(0)
                                    dump_tensors()
                                    torch.cuda.ipc_collect()
                                    torch.cuda.empty_cache()
                                    dump_tensors()
                                    self.tried = True
                                    res = self.compute_metrics_for_all_cubes(inference_full_image=False)
                                    return res

                    else:
                        pred_mask_full_cube = torch.zeros(self.original_cube_dimensions)
                        for z_idx in range(full_cube_tensor.size()[-1]):
                            tensor_slice = full_cube_tensor[..., z_idx]  # SLICE : (1,1,C,H,W) -> (1,1,C,H)
                            assert tensor_slice.shape == (1, 1, self.original_cube_dimensions[0], self.original_cube_dimensions[1])
                            pred = self.trainer.model(tensor_slice)
                            pred = torch.squeeze(pred, dim=0)  # (1, 1, C,H) -> (1,C,H)
                            pred = torch.squeeze(pred, dim=0)  # (1,C,H) -> (C,H)
                            pred_mask_slice = pred  # self._make_pred_mask_from_pred(pred)
                            pred_mask_full_cube[..., z_idx] = pred_mask_slice

            full_cube_label_tensor = torch.Tensor(self._load_cube_to_np_array(label_cubes_of_cubes_to_use_path[idx]))
            full_cube_label_tensor = self.adjust_label_cube_acording_to_dataset(full_cube_label_tensor)

            pred_mask_full_cube = pred_mask_full_cube.to("cpu")
            threshold = self._set_threshold(pred_mask_full_cube, full_cube_label_tensor)
            pred_mask_full_cube_binary = self._make_pred_mask_from_pred(pred_mask_full_cube, threshold=threshold)

            dice_score_soft = float(DiceLoss.dice_loss(pred_mask_full_cube, full_cube_label_tensor, return_loss=False))
            dice_score_binary = float(DiceLoss.dice_loss(pred_mask_full_cube_binary, full_cube_label_tensor, return_loss=False))
            hausdorff = hausdorff_distance(np.array(pred_mask_full_cube_binary), np.array(full_cube_label_tensor))
            x_flat = pred_mask_full_cube_binary.contiguous().view(-1)
            y_flat = full_cube_label_tensor.contiguous().view(-1)
            x_flat = x_flat.cpu()
            y_flat = y_flat.cpu()
            jac_score = jaccard_score(y_flat, x_flat)

            if idx < len(full_cubes_used_for_testing):
                dice_logits_test.append(dice_score_soft)
                dice_binary_test.append(dice_score_binary)
                jaccard_test.append(jac_score)
                hausdorff_test.append(hausdorff)
            else:
                dice_logits_train.append(dice_score_soft)
                dice_binary_train.append(dice_score_binary)
                jaccard_train.append(jac_score)
                hausdorff_train.append(hausdorff)

            dump_tensors()
            torch.cuda.ipc_collect()
            torch.cuda.empty_cache()
            dump_tensors()
            sleep(10)
            print(idx)

        avg_jaccard_test = sum(jaccard_test) / len(jaccard_test)
        avg_jaccard_train = sum(jaccard_train) / len(jaccard_train)

        avg_dice_test_soft = sum(dice_logits_test) / len(dice_logits_test)
        avg_dice_test_binary = sum(dice_binary_test) / len(dice_binary_test)

        avg_dice_train_soft = sum(dice_logits_train) / len(dice_logits_train)
        avg_dice_train_binary = sum(dice_binary_train) / len(dice_binary_train)

        avg_hausdorff_train = sum(hausdorff_train) / len(hausdorff_train)
        avg_hausdorff_test = sum(hausdorff_test) / len(hausdorff_test)

        metric_dict["dice_test_soft"] = avg_dice_test_soft
        metric_dict["dice_test_binary"] = avg_dice_test_binary
        metric_dict["dice_train_soft"] = avg_dice_train_soft
        metric_dict["dice_train_binary"] = avg_dice_train_binary
        metric_dict["jaccard_test"] = avg_jaccard_test
        metric_dict["jaccard_train"] = avg_jaccard_train
        metric_dict["hausdorff_test"] = avg_hausdorff_test
        metric_dict["hausdorff_train"] = avg_hausdorff_train

        return metric_dict
Example #10
0
    def patch_code(self, enable_trampoline=False):
        patcher = Patcher(self.filename)
        assert len(self.paths.items()) > 1
        self.logger.info("-" * 30 + 'begin patch' + "-" * 30)
        # nop irrelevant nodes
        for node in self.deflat_analyzer.irrelevant_nodes:
            if self.arch_type == QL_ARCH.ARM64:
                start_addr = node.addr  # irrelevant nodes keeps its zero base
                self.logger.debug(
                    f"nop at {hex(start_addr)}, size {node.size}")
                assert node.size % 4 == 0
                nop_count = int(node.size / 4)
                instruction_value = arm64_util.assemble_nop_instruction(
                ) * nop_count
                patcher.patch(start_addr, node.size, instruction_value)
            else:
                raise Exception("Unsupported Arch")

        max_trampoline_pool_cnt = 2 * len(self.paths.keys())
        trampoline_pool: List[int] = []  # only for arm64.
        used_trampoline_pool: List[int] = []  # block addr, target_addr
        if enable_trampoline:
            if self.arch_type == QL_ARCH.ARM64:
                for node in self.deflat_analyzer.irrelevant_nodes:
                    if node.size >= 4:
                        for cur_addr_offset in range(0, node.size, 4):
                            trampoline_pool.append(node.addr + cur_addr_offset)
                            if len(trampoline_pool) >= max_trampoline_pool_cnt:
                                break
                    if len(trampoline_pool) >= max_trampoline_pool_cnt:
                        break
            trampoline_pool.append(
                *self.deflat_analyzer.manual_trampoline_addr_list)

        # handle control flow
        for block_id, successors in self.paths.items():
            block = self.block_container.get_block_from_id(block_id)
            start_addr = block.start_addr - self.base_addr
            self.logger.debug(
                f"patch working on {hex(start_addr)}, {successors}")
            instructions = [
                ins for ins in self.md.disasm(block.code, start_addr)
            ]
            if self.arch_type == QL_ARCH.ARM64:
                # ARM64 patch
                if len(successors) == 2:
                    # real branch

                    true_branch = self.block_container.get_block_from_id(
                        successors[0]).start_addr - self.base_addr
                    false_branch = self.block_container.get_block_from_id(
                        successors[1]).start_addr - self.base_addr
                    self.logger.debug(
                        f"true {hex(true_branch)}, false {hex(false_branch)}")
                    should_trampoline = False
                    current_trampoline_addr = -1
                    if true_branch < start_addr and enable_trampoline:
                        should_trampoline = True
                        for i in range(len(trampoline_pool)):
                            if trampoline_pool[i] > start_addr:
                                current_trampoline_addr = trampoline_pool[i]
                                trampoline_pool.pop(i)
                                break
                        if current_trampoline_addr == -1:
                            self.logger.error(
                                f"Fail to find the suitable trampoline at {hex(start_addr)} with branch {hex(true_branch)}"
                            )
                            should_trampoline = False

                    if instructions[
                            -2].id in arm64_util.get_branch_instruction_types(
                            ):
                        self.logger.debug(f"at -2")
                        instruction_address = instructions[-2].address
                        if should_trampoline:
                            self.logger.debug(
                                f"trampoline to {hex(current_trampoline_addr)}"
                            )

                            assert current_trampoline_addr != -1
                            patched_code = arm64_util.assemble_branch_instruction(
                                instruction_address,
                                current_trampoline_addr - instruction_address,
                                false_branch,
                                instructions[-2].op_str.split(',')[-1].strip())
                            assert len(patched_code) == 8

                            # patch in trampoline
                            used_trampoline_pool.append(
                                current_trampoline_addr)
                            b_instruction = arm64_util.assemble_no_branch_instruction(
                                current_trampoline_addr, true_branch)
                            patcher.patch(current_trampoline_addr, 4,
                                          b_instruction)
                        else:
                            # recalculate the offset due to the keystone bug, maybe.
                            patched_code = arm64_util.assemble_branch_instruction(
                                instruction_address,
                                true_branch - instruction_address,
                                false_branch,
                                instructions[-2].op_str.split(',')[-1].strip())

                            assert len(patched_code) == 8
                        patcher.patch(instruction_address, 8, patched_code)

                    elif instructions[
                            -3].id in arm64_util.get_branch_instruction_types(
                            ):
                        self.logger.debug(f"at -3")
                        instruction_address = instructions[-3].address
                        branch_offset = instruction_address + 4

                        if should_trampoline:
                            self.logger.debug(
                                f"trampoline to {hex(current_trampoline_addr)}"
                            )
                            assert current_trampoline_addr != -1
                            patched_code = arm64_util.assemble_branch_instruction(
                                branch_offset,
                                current_trampoline_addr - branch_offset,
                                false_branch,
                                instructions[-3].op_str.split(',')[-1].strip())
                            assert len(patched_code) == 8

                            # patch in trampoline
                            used_trampoline_pool.append(
                                current_trampoline_addr)
                            b_instruction = arm64_util.assemble_no_branch_instruction(
                                current_trampoline_addr, true_branch)
                            patcher.patch(current_trampoline_addr, 4,
                                          b_instruction)
                        else:
                            patched_code = arm64_util.assemble_branch_instruction(
                                branch_offset, true_branch - branch_offset,
                                false_branch,
                                instructions[-3].op_str.split(',')[-1].strip())

                            assert len(patched_code) == 8
                        patcher.copy_to_patch(instruction_address,
                                              branch_offset, 4)
                        patcher.patch(branch_offset, 8, patched_code)
                    else:
                        assert len(instructions) > 4
                        self.logger.warning(
                            "may encounter special csel instruction with larger than 2 offset, this is an experimental patch."
                        )
                        target_branch_instruction = None
                        # instructions[-3].id in arm64_util.get_branch_instruction_types()
                        for cursor_offset in range(
                                len(instructions) - 1, -1, -1):
                            if instructions[
                                    cursor_offset].id in arm64_util.get_branch_instruction_types(
                                    ):
                                target_branch_instruction = instructions[
                                    cursor_offset]
                                break
                        if target_branch_instruction is None:
                            raise Exception("Unhandled Branch Block")
                        target_branch_instruction_address = target_branch_instruction.address
                        self.logger.debug(
                            f"target_branch_instruction at {hex(target_branch_instruction_address)}, {target_branch_instruction.op_str}"
                        )
                        # Disable trampoline here!
                        branch_offset = start_addr + block.size - 8  # instructions[-2].addresss  # we assume the last two instructions to be branch
                        self.logger.debug(
                            f"last two instruction at {hex(branch_offset)}")
                        assert isinstance(branch_offset, int)
                        patched_code = arm64_util.assemble_branch_instruction(
                            branch_offset, true_branch - branch_offset,
                            false_branch,
                            target_branch_instruction.op_str.split(
                                ',')[-1].strip())

                        assert len(patched_code) == 8
                        cnt = branch_offset - target_branch_instruction_address
                        patcher.copy_to_patch(
                            target_branch_instruction_address,
                            target_branch_instruction_address + 4, cnt)
                        patcher.patch(branch_offset, 8, patched_code)
                elif len(successors) == 1:
                    # force jump
                    instruction_address = instructions[-1].address

                    next_block = self.block_container.get_block_from_id(
                        successors[0]).start_addr - self.base_addr

                    self.logger.debug(f"next_block {hex(next_block)}")

                    patched_code = arm64_util.assemble_no_branch_instruction(
                        instruction_address, next_block)
                    patcher.patch(instruction_address, 4, patched_code)
                else:
                    assert len(successors) == 0
                    # return block
                    continue

            else:
                raise Exception("Unsupported Arch")

        patcher.write_patch_to_file()
        self.logger.info("Patch code finish.")