def makePredictions(self):
        # model is already loaded from disk by constructor

        expConfig = self.expConfig
        assert (hasattr(expConfig, "RESTORE_ID"))
        assert (hasattr(expConfig, "RESTORE_EPOCH"))
        id = expConfig.RESTORE_ID
        epoch = expConfig.RESTORE_EPOCH

        print('============ PREDICTING ============')
        print(self.expConfig.EXPERIMENT_NAME)
        print("ID: {}".format(expConfig.id))
        print("RESTORE ID {}".format(expConfig.RESTORE_ID))
        print("RESTORE EPOCH {}".format(expConfig.RESTORE_EPOCH))
        print('====================================')

        basePath = os.path.join(self.predictionsBasePath,
                                "{}_e{}".format(id, epoch))
        if not os.path.exists(basePath):
            os.makedirs(basePath)

        with torch.no_grad():
            for i, data in enumerate(self.challengeValDataLoader):
                inputs, pids, xOffset, yOffset, zOffset = data
                print("processing {}".format(pids[0]))
                inputs = inputs.to(self.device)

                #predict labels and bring into required shape
                outputs = expConfig.net(inputs)
                outputs = outputs[:, :, :, :, :155]
                s = outputs.shape
                fullsize = outputs.new_zeros((s[0], s[1], 240, 240, 155))
                if xOffset + s[2] > 240:
                    outputs = outputs[:, :, :240 - xOffset, :, :]
                if yOffset + s[3] > 240:
                    outputs = outputs[:, :, :, :240 - yOffset, :]
                if zOffset + s[4] > 155:
                    outputs = outputs[:, :, :, :, :155 - zOffset]
                fullsize[:, :, xOffset:xOffset + s[2], yOffset:yOffset + s[3],
                         zOffset:zOffset + s[4]] = outputs

                #binarize output
                wt, tc, et = fullsize.chunk(3, dim=1)
                s = fullsize.shape
                wt = (wt > 0.5).view(s[2], s[3], s[4])
                tc = (tc > 0.5).view(s[2], s[3], s[4])
                et = (et > 0.5).view(s[2], s[3], s[4])

                result = fullsize.new_zeros((s[2], s[3], s[4]),
                                            dtype=torch.uint8)
                result[wt] = 2
                result[tc] = 1
                result[et] = 4

                npResult = result.cpu().numpy()
                path = os.path.join(basePath, "{}.nii.gz".format(pids[0]))
                utils.save_nii(path, npResult, None, None)

        print("Done :)")
Esempio n. 2
0
    def visualize_prob_maps(self):
        expConfig = self.expConfig
        assert (hasattr(expConfig, "RESTORE_ID"))
        assert (hasattr(expConfig, "RESTORE_EPOCH"))
        id = expConfig.RESTORE_ID
        epoch = expConfig.RESTORE_EPOCH

        print("==============VISUALIZE FEATURE MAP================")
        print(self.expConfig.EXPERIMENT_NAME)
        print("ID: {}".format(expConfig.id))
        print("RESTORE ID: {}".format(expConfig.RESTORE_ID))
        print("RESTORE EPOCH: {}".format(expConfig.RESTORE_EPOCH))
        print("===================================================")

        basePath = os.path.join(self.predictionsBasePath,
                                "{}_e{}".format(id, epoch))
        if not os.path.exists(basePath):
            os.makedirs(basePath)

        with torch.no_grad():
            for i, data in enumerate(self.challengeValDataLoader):
                inputs, pids, xOffset, yOffset, zOffset = data
                print("processing {}".format(pids[0]))
                patient_path = os.path.join(basePath, "{}".format(pids[0]))
                os.mkdir(patient_path)

                inputs = inputs.to(self.device)

                # predict labels and bring into required shape
                outputs = expConfig.net(inputs)
                outputs = outputs[:, :, :, :, :155]
                s = outputs.shape
                fullsize = outputs.new_zeros((s[0], s[1], 240, 240, 155))
                if xOffset + s[2] > 240:
                    outputs = outputs[:, :, :240 - xOffset, :, :]
                if yOffset + s[3] > 240:
                    outputs = outputs[:, :, :, :240 - yOffset, :]
                if zOffset + s[4] > 155:
                    outputs = outputs[:, :, :, :, :155 - zOffset]
                fullsize[:, :, xOffset:xOffset + s[2], yOffset:yOffset + s[3],
                         zOffset:zOffset + s[4]] = outputs

                # binarize output
                wt, tc, et = fullsize.chunk(3, dim=1)

                wt_np = wt.data.cpu().numpy()
                tc_np = tc.data.cpu().numpy()
                et_np = et.data.cpu().numpy()

                wt_path = os.path.join(patient_path, "wt.nii.gz")
                tc_path = os.path.join(patient_path, "tc.nii.gz")
                et_path = os.path.join(patient_path, "et.nii.gz")
                utils.save_nii(wt_path, wt_np[0, 0, :, :, :], None, None)
                utils.save_nii(tc_path, tc_np[0, 0, :, :, :], None, None)
                utils.save_nii(et_path, et_np[0, 0, :, :, :], None, None)
        print("Done :)")
Esempio n. 3
0
    tc_total = torch.zeros(1, 1, 240, 240, 155)
    et_total = torch.zeros(1, 1, 240, 240, 155)
    for filePath in filePathList:
        fullsize, _, _ = utils.load_nii(filePath)
        fullsize = torch.from_numpy(fullsize)
        wt, tc, et = fullsize.chunk(3, dim=1)
        wt_total += wt
        tc_total += tc
        et_total += et

    file_number = len(filePathList)
    wt = wt_total / file_number
    tc = tc_total / file_number
    et = et_total / file_number
    wt = (wt > 0.5).view(240, 240, 155)
    tc = (tc > 0.5).view(240, 240, 155)
    et = (et > 0.5).view(240, 240, 155)

    result = torch.zeros_like(wt, dtype=torch.uint8)
    result[wt] = 2
    result[tc] = 1
    result[et] = 4

    npResult = result.cpu().numpy()
    ET_voxels = (npResult == 4).sum()
    if ET_voxels < 500:
        npResult[np.where(npResult == 4)] = 1

    path = os.path.join(ensembledPath, file)
    utils.save_nii(path, npResult, None, None)
Esempio n. 4
0
    def makePredictions(self):
        #  model is already loaded from disk by constructor

        expConfig = self.expConfig
        assert (hasattr(expConfig, "RESTORE_ID"))
        assert (hasattr(expConfig, "RESTORE_EPOCH"))
        id = expConfig.RESTORE_ID
        epoch = expConfig.RESTORE_EPOCH

        print('============ PREDICTING ============')
        print(self.expConfig.EXPERIMENT_NAME)
        print("ID: {}".format(expConfig.id))
        print("RESTORE ID {}".format(expConfig.RESTORE_ID))
        print("RESTORE EPOCH {}".format(expConfig.RESTORE_EPOCH))
        print('====================================')

        basePath = os.path.join(self.predictionsBasePath,
                                "{}_e{}".format(id, epoch))
        if not os.path.exists(basePath):
            os.makedirs(basePath)

        with torch.no_grad():
            for i, data in enumerate(self.challengeValDataLoader):

                if expConfig.AVERAGE_DATA:
                    inputs, pids, average_inputs, xOffset, yOffset, zOffset = data
                    # inputs = torch.cat((inputs, average_inputs), dim=1)
                    inputs = inputs - average_inputs
                    # average_inputs = average_inputs.to(self.device)
                else:
                    inputs, pids, xOffset, yOffset, zOffset = data

                print("No.{} processing {}".format(i, pids[0]))
                inputs = inputs.to(self.device)

                # predict labels and bring into required shape
                outputs = expConfig.net(inputs)
                # TTA
                outputs += expConfig.net(
                    inputs.flip(dims=(2, ))).flip(dims=(2, ))
                outputs += expConfig.net(
                    inputs.flip(dims=(3, ))).flip(dims=(3, ))
                outputs += expConfig.net(
                    inputs.flip(dims=(4, ))).flip(dims=(4, ))
                outputs += expConfig.net(inputs.flip(dims=(2,
                                                           3))).flip(dims=(2,
                                                                           3))
                outputs += expConfig.net(inputs.flip(dims=(2,
                                                           4))).flip(dims=(2,
                                                                           4))
                outputs += expConfig.net(inputs.flip(dims=(3,
                                                           4))).flip(dims=(3,
                                                                           4))
                outputs += expConfig.net(
                    inputs.flip(dims=(2, 3, 4))).flip(dims=(2, 3, 4))

                # outputs += expConfig.net(inputs.flip(dims=(2,)), average_inputs.flip(dims=(2,))).flip(dims=(2,))
                # outputs += expConfig.net(inputs.flip(dims=(3,)), average_inputs.flip(dims=(3,))).flip(dims=(3,))
                # outputs += expConfig.net(inputs.flip(dims=(4,)), average_inputs.flip(dims=(4,))).flip(dims=(4,))
                # outputs += expConfig.net(inputs.flip(dims=(2, 3)), average_inputs.flip(dims=(2, 3))).flip(dims=(2, 3))
                # outputs += expConfig.net(inputs.flip(dims=(2, 4)), average_inputs.flip(dims=(2, 4))).flip(dims=(2, 4))
                # outputs += expConfig.net(inputs.flip(dims=(3, 4)), average_inputs.flip(dims=(3, 4))).flip(dims=(3, 4))
                # outputs += expConfig.net(inputs.flip(dims=(2, 3, 4)), average_inputs.flip(dims=(2, 3, 4))).flip(dims=(2, 3, 4))
                outputs = outputs / 8.0  # mean

                outputs = outputs[:, :, :, :, :155]
                s = outputs.shape
                fullsize = outputs.new_zeros((s[0], s[1], 240, 240, 155))
                if xOffset + s[2] > 240:
                    outputs = outputs[:, :, :240 - xOffset, :, :]
                if yOffset + s[3] > 240:
                    outputs = outputs[:, :, :, :240 - yOffset, :]
                if zOffset + s[4] > 155:
                    outputs = outputs[:, :, :, :, :155 - zOffset]
                fullsize[:, :, xOffset:xOffset + s[2], yOffset:yOffset + s[3],
                         zOffset:zOffset + s[4]] = outputs

                npFullsize = fullsize.cpu().numpy()
                path = basePath + "_fullsize"
                if not os.path.exists(path):
                    os.makedirs(path)
                path = os.path.join(path, "{}.nii.gz".format(pids[0]))
                utils.save_nii(path, npFullsize, None, None)

                # binarize output
                wt, tc, et = fullsize.chunk(3, dim=1)
                s = fullsize.shape
                wt = (wt > 0.5).view(s[2], s[3], s[4])
                tc = (tc > 0.5).view(s[2], s[3], s[4])
                et = (et > 0.5).view(s[2], s[3], s[4])

                result = fullsize.new_zeros((s[2], s[3], s[4]),
                                            dtype=torch.uint8)
                result[wt] = 2
                result[tc] = 1
                result[et] = 4

                npResult = result.cpu().numpy()
                ET_voxels = (npResult == 4).sum()
                if ET_voxels < 500:
                    # torch.where(result == 4, result, torch.ones_like(result))
                    npResult[np.where(npResult == 4)] = 1

                path = os.path.join(basePath, "{}.nii.gz".format(pids[0]))
                utils.save_nii(path, npResult, None, None)

        print("Done :)")