예제 #1
0
    def __init__(self, hparam, loss, data, sp):
        self.hparam, self.data, self.loss, self.sp =  \
             hparam,      data,      loss,      sp

        self.optimizer = Optimization(hparam, loss, data)

        # recode all segments and left/right leaf positions
        self.dict_segments = OrderedBunch()
        self.dict_lrs = OrderedBunch()
예제 #2
0
 def set_isocenter_and_beam_angle(self, rtplan_file):
     ds = pydicom.read_file(rtplan_file, force=True)
     self.beam_info = OrderedBunch()
     for i, beam in enumerate(ds.BeamSequence):
         self.beam_info[i + 1] = OrderedBunch()
         cp0 = beam.ControlPointSequence[0]
         self.beam_info[i + 1].SSD = float(cp0.SourceToSurfaceDistance / 10)
         self.beam_info[i + 1].GantryAngle = float(cp0.GantryAngle)
         self.beam_info[i + 1].IsoCenter = np.array(
             [float(x) for x in cp0.IsocenterPosition])
예제 #3
0
    def _get_leafInJawField(self):
        '''
        get y axis leaf positions by finding the leaves in jaw field 

        Return: self.dict_jawsPos {beam_id: [x1,x2,y1,y2]}, self.dict_inJaw {beam_id: (51,)}
        '''
        self.dict_jawsPos = OrderedBunch()  # jaw positions
        self.dict_inJaw = OrderedBunch(
        )  # bool vector indicate leaves in jaw Filed
        ## get jaw positions from seg*.txt file
        seg_files = glob.glob(
            os.path.join(self.hparam.winServer_MonteCarloDir, 'templates',
                         'Seg_beamID*.txt'))
        seg_files.sort()  # sort to be consistent with beam_id

        for beam_id, seg in enumerate(seg_files):
            beam_id += 1
            H, W = self.data.dict_rayBoolMat[beam_id].shape
            #  print(f'beam_ID:{beam_id}; file_name:{seg}')
            with open(seg, 'r') as f:
                lines = f.readlines()

            ## get jaw positions
            is_jaw_line = False
            jaw = OrderedBunch()
            for line in lines:
                if 'MU_CollimatorJawX1' in line:
                    is_jaw_line = True
                    continue
                if is_jaw_line:
                    position = line.split(' ')[1:5]
                    position = [float(p) for p in position]
                    jaw.x1, jaw.x2, jaw.y1, jaw.y2 = position
                    print(f'jaw position: {jaw.x1, jaw.x2, jaw.y1, jaw.y2}')
                    break
            self.dict_jawsPos[beam_id] = jaw

            ## Is a leaf in jaws' open field?
            # for upper half leaves: if (leaf bottom edge > jaw_y1) {this leaf in valid field}
            # for lower half leaves: if (leaf upper  edge < jaw_y2) {this leaf in valid field}
            self.dict_inJaw[beam_id] = np.empty((self.nb_leafPairs, ),
                                                dtype=np.bool)
            for i, c in enumerate(self.coords):
                in_field = False
                if (c < jaw.y2 and c > jaw.y1):
                    in_field = True
                if (c < jaw.y2 and
                        self.coords[i - 1] > jaw.y1):  # consider upper edge
                    in_field = True
                self.dict_inJaw[beam_id][i] = in_field
                #  print(f'{in_field}---{i}: {c}')
            #  print(f'{self.dict_inJaw[beam_id].sum()}')
            assert self.dict_inJaw[beam_id].sum(
            ) == H, f'H={H}, inJaw={self.dict_inJaw[beam_id].sum()}'
예제 #4
0
파일: data.py 프로젝트: lc82111/pyRad
    def _set_beamID_rayBeginNum_dict(self):
        self.dict_beamID_ValidRayBeginNum = OrderedBunch()
        begin = 0
        for beam_id, mask in self.dict_rayBoolMat.items():
            self.dict_beamID_ValidRayBeginNum[beam_id] = [begin, mask.sum()] 
            begin += mask.sum()

        num_bixel = 0
        for beam_id, (_, num) in self.dict_beamID_ValidRayBeginNum.items():
            num_bixel += num
        assert num_bixel == self.deposition.shape[1]
예제 #5
0
    def _set_skin_coords(self):
        '''get corner of maximum skin rectangle  '''
        dose_grid = self.dose_grid[:, 0:2]
        dose_grid = dose_grid.round().astype(np.uint)

        self.skin_lefttop = OrderedBunch({
            'x': dose_grid[:, 0].min(),
            'y': dose_grid[:, 1].min()
        })
        self.skin_rightbot = OrderedBunch({
            'x': dose_grid[:, 0].max(),
            'y': dose_grid[:, 1].max()
        })
예제 #6
0
    def _save_best_state(self, loss, grad, dict_MUs, dict_segments, dict_lrs,
                         dict_partialExp):
        self.best_loss = loss

        # grad of fluence
        self.best_grads = grad  # (#vaild_bixels,)

        self.best_dict_MUs, self.best_dict_segments, self.best_dict_lrs, self.best_dict_partialExp = OrderedBunch(
        ), OrderedBunch(), OrderedBunch(), OrderedBunch()
        for beam_id, lrs in dict_lrs.items():
            self.best_dict_MUs[beam_id] = to_np(dict_MUs[beam_id])
            self.best_dict_segments[beam_id] = to_np(dict_segments[beam_id])
            self.best_dict_partialExp[beam_id] = to_np(
                dict_partialExp[beam_id])
            self.best_dict_lrs[beam_id] = lrs
예제 #7
0
    def computer_fluence(self, dict_segments, dict_partialExp, dict_lrs,
                         dict_MUs):
        '''computer fluence from seg and MU. 
        Arguments:
            dict_segments: {beam_id: tensor consists of segment columns (hxw, #aperture)}
            dict_partialexp: {beam_id: tensor (#aperture, h, 2)}
            dict_lrs: {beam_id: ndarray (#aperture, h, 2)}
            dict_mus:{beam_id: tensor (#aperture,)}
        Return: 
            fluence (#valid_bixels, )
        For retain_grad() see : https://discuss.pytorch.org/t/how-do-i-calculate-the-gradients-of-a-non-leaf-variable-w-r-t-to-a-loss-function/5112
        '''
        dict_fluences = OrderedBunch()
        for beam_id in range(1, len(dict_segments) + 1):
            # 0. modulate segment with partial exposure
            pe = torch.sigmoid(dict_partialExp[beam_id])  # [0,1] constraint
            seg = self._modulate_segment_with_partialExposure(
                dict_segments[beam_id], pe, dict_lrs[beam_id],
                self.data.dict_rayBoolMat[beam_id])

            # 1. modulated_segment * MU
            MU = torch.abs(dict_MUs[beam_id])  # nonnegative constraint
            dict_fluences[beam_id] = torch.matmul(seg, MU)  # {beam_id: vector}

        fluence, dict_fluenceMaps = self.data.project_to_validRays_torch(
            dict_fluences)  # (#valid_bixels,), {beam_id: matrix}
        fluence.retain_grad()
        return fluence, dict_fluenceMaps
예제 #8
0
파일: data.py 프로젝트: lc82111/pyRad
 def set_doseGrid(self, data):
     gs = data.organ_info['ITV_skin']['Grid Size']
     doseGrid_spacing = np.array([gs, gs, 2.5]) # juyao give 2.5 to me
     cprint(f'using juyao given z spacing 2.5 !','red')
     self.doseGrid = OrderedBunch({'spacing': doseGrid_spacing,
                                   'size': (self.CT.size * self.CT.spacing / doseGrid_spacing).astype(np.int),
                                   })
예제 #9
0
파일: evaluation.py 프로젝트: lc82111/pyRad
 def load_MonteCarlo_OrganDose(self, MUs, name, scale=1):
     MUs = np.abs(MUs) / self.hparam.dose_scale  # x1000
     MCdoses = self.unitMUDose * MUs * scale
     MCdoses = MCdoses.sum(axis=0, keepdims=False)  #  (#slice, H, W) 
     MCdoses = torch.tensor(MCdoses, dtype=torch.float32, device=self.hparam.device)
     dict_organ_doses = parse_MonteCarlo_dose(MCdoses, self.data)  # parse organ_doses to obtain individual organ doses
     return OrderedBunch({'dose':dict_organ_doses, 'name':name})
예제 #10
0
def get_segment_grad(dict_segments, dict_rayBoolMat):
    dict_gradMaps = OrderedBunch()
    for beam_id, mask in dict_rayBoolMat.items():  # for each beam
        grad = dict_segments[beam_id].grad.detach().cpu().numpy(
        )  # (h*w, #aperture=1)
        grad = grad.sum(axis=-1)  # (h*w)
        dict_gradMaps[beam_id] = grad.reshape(*mask.shape) * mask
    return dict_gradMaps
예제 #11
0
    def get_random_apertures(self):
        '''
        return: self.dict_randomApertures {beam_id: ndarray(nb_apertures, H, W)}
        '''
        def get_random_shape(H, W):
            if np.random.randint(0, 2):
                img = random_shapes((H, W),
                                    max_shapes=3,
                                    multichannel=False,
                                    min_size=min(H, W) // 3,
                                    allow_overlap=True,
                                    intensity_range=(1, 1))[0]
                img = np.where(img == 255, 0, img)
            else:
                img = np.zeros((H, W), dtype=np.uint8)
                for i in range(len(img)):  # for each row
                    l, r = np.random.randint(0, W + 1, (2, ))
                    if l == r: continue
                    if l > r: l, r = r, l
                    img[i, l:r] = 1
            return img

        save_path = Path(
            hparam.patient_ID).joinpath('dataset/dict_randomApertures.pickle')
        if os.path.isfile(save_path):
            self.dict_randomApertures = unpickle_object(save_path)
            return

        self.dict_randomApertures = OrderedBunch()
        for beam_id in range(1, self.nb_beams + 1):  # for each beam
            H, W = self.data.dict_rayBoolMat[beam_id].shape
            self.dict_randomApertures[beam_id] = np.zeros(
                (self.nb_apertures, H, W),
                np.uint8)  # default closed apertures
            for i, apt in enumerate(
                    self.dict_randomApertures[beam_id]):  # for each apterture
                if i == 0:  # skip first aperture for each beam to get a all-leaf-opened aperture
                    self.dict_randomApertures[beam_id][i] = np.ones((H, W),
                                                                    np.uint8)
                else:
                    self.dict_randomApertures[beam_id][i] = get_random_shape(
                        H, W)
        pickle_object(save_path, self.dict_randomApertures)
예제 #12
0
 def solve(self, dict_gradMaps):
     ''' Find aperture shapes with the minimum gradient based on gradMaps.
        Arguments:
              dict_gradMaps: {beam_id: gradient maps; ndarray; matrix}
        Return: 
              dict_segments: {beam_id: aperture shapes; bool ndarray; vector}
        '''
     cprint('solving SubProblem .............', 'yellow')
     dict_segments, dict_lrs = OrderedBunch(), OrderedBunch()
     for beam_id, grad_map in dict_gradMaps.items():
         blocked_bixels = ~self.data.dict_rayBoolMat[
             beam_id]  # where 1 indicates non-valid/blocked bixel
         grad_map[
             blocked_bixels] = 10000.  # set nonvalid bixels as 10000 to enforce smallest_contiguous_sum() choose the smaller region
         grad_map = grad_map.astype(np.float64)
         dict_segments[beam_id], dict_lrs[
             beam_id] = _smallest_contiguous_sum_2d(grad_map)
     cprint('done', 'green')
     return dict_segments, dict_lrs
예제 #13
0
파일: data.py 프로젝트: lc82111/pyRad
    def _set_paramters_from_csv_table(self):
        df = pd.read_csv(self.hparam.csv_file, skiprows=1, index_col=0, skip_blank_lines=True)  # duplicated column will be renamed automatically

        # drop nan columns
        cols = [c for c in df.columns if 'Unnamed' not in c] 
        df = df[cols]

        # drop organ with 0 point num
        organ_names = []
        for name, pointNum in df.loc['Points Number'].items():
            if pointNum == '0':
                organ_names.append(name)
        df = df.drop(organ_names, axis='columns')

        # drop another organs if skin present
        is_skin = False
        nonskin_names, skin_names = [], []
        for name in df.columns:
            if 'skin' in name:
                is_skin = True
                skin_names.append(name)
            else:
                nonskin_names.append(name)
        if is_skin:
            self.csv_loss_table = df.drop(skin_names, axis='columns') # this var will be used in loss.py, therefore we should keep the duplicated columns
            df = df.drop(nonskin_names, axis='columns')

        # drop duplicated columns
        df = df.loc[:, ~df.columns.str.replace("(\.\d+)$", "").duplicated()]

        # set up dict of organ info
        self.organ_info = OrderedBunch(df.loc[['Grid Size', 'Points Number']].astype(float).to_dict())
        for organ_name, v in self.organ_info.copy().items():
            self.organ_info[organ_name]['Grid Size'] = v['Grid Size']*10.  # cm to mm
            self.organ_info[organ_name]['Points Number'] = int(v['Points Number'])
        cprint('following csv info will be used to parsing deposition matrix', 'green')
        pp.pprint(dict(self.organ_info))

        tmp = self.csv_loss_table.loc[['Grid Size', 'Points Number', 'Hard/Soft', 'Constraint Type', 'Min Dose', 'Max Dose', 'DVH Volume', 'Priority']]
        cprint('following csv info will be used in loss function', 'green')
        with pd.option_context('display.max_rows', None, 'display.max_columns', None):
            print(self.csv_loss_table.head(10))
예제 #14
0
    def _test(is_seg_modulate):
        def _modulate_segment_with_partialExposure(seg, lrs, pes):
            '''
            Imposing the partialExp effect at the endpoint of leaf
            lrs: (#aperture, H, 2); seg:(HxW, #aperture); pes:(#aperture, H, 2)
            '''
            for i, aperture in enumerate(lrs):  # for each aperture
                for j, lr in enumerate(aperture):  # for each row
                    [l, r] = lr
                    l_pe, r_pe = sigmoid(pes[i, j])
                    # close hopeless bixel?
                    if l_pe < 0.6:
                        #  seg[j*W:j*W+W, i] [l] = 0
                        seg[j * W + l, i] = 0
                    if r_pe < 0.6:
                        #  seg[j*W:j*W+W, i] [r-1] = 0
                        seg[j * W + (r - 1), i] = 0
            return seg

        dict_segments, dict_MUs = OrderedBunch(), OrderedBunch()
        for (beam_id, MU), (_, seg) in zip(mp.dict_MUs.items(),
                                           mp.dict_segments.items()):
            H, W = mp.data.dict_rayBoolMat[beam_id].shape

            validRay = mp.data.dict_rayBoolMat[beam_id].flatten().reshape(
                (-1, 1))  # where 1 indicates non-valid/blocked bixel
            validRay = np.tile(validRay, (1, seg.shape[1]))  # (HxW, #aperture)
            seg = seg * validRay  # partialExp may open bixels in non-valid regions.

            lrs = dict_lrs[beam_id]  # (#aperture, H, 2)
            pes = dict_partialExp[beam_id]  # (#aperture, H, 2)
            if is_seg_modulate:
                seg = _modulate_segment_with_partialExposure(seg, lrs, pes)

            dict_segments[beam_id] = torch.tensor(seg,
                                                  dtype=torch.float32,
                                                  device='cpu')
            dict_MUs[beam_id] = torch.tensor(MU,
                                             dtype=torch.float32,
                                             device='cpu',
                                             requires_grad=True)
예제 #15
0
    def _get_x_axis_position(self):
        '''
         get x axis position from self.dict_randomApertures 
         Return: 
            self.dict_lrs {beam_id: strings (#aperture, 51)}, NOTE: 51 leaf pairs in reversed order.
            self.nb_beams
            self.nb_apertures
        '''
        self.dict_lrs = OrderedBunch()  # {beam_id: (#aperture, H)}

        def get_leafPos_for_a_row(row):
            '''
            [0.0] 0 [0.5] 0 [1.0] 1 [1.5] 1 [2.0] 0 [2.5] 0 [3.0]
            '''
            jaw_x1 = self.dict_jawsPos[beam_id].x1
            if (row == 0).all():  # closed row
                lr = default_lr
                first, last = 0, 0
            else:  # opened row
                first, last = np.nonzero(row)[0][[
                    0, -1
                ]]  # get first 1 and last 1 positions
                #  last += 1 # block the left bixel of first 1, and right bixel of last 1; TODO +1?
                l = jaw_x1 + first * self.x_spacing  # spacing 0.5mm
                r = jaw_x1 + last * self.x_spacing  # spacing 0.5mm
                lr = '{:.2f} {:.2f}\n'.format(l, r)
            #  cprint(f'row:{row_idx}; {first}  {last};  {lr}', 'green')
            return lr

        for beam_id, apts in self.dict_randomApertures.items(
        ):  # 0. for each beam
            #  print(f'\n beam_id:{beam_id}')
            H, W = self.data.dict_rayBoolMat[beam_id].shape
            #  print(f'height:{H}; width:{W}')

            pos = self.dict_jawsPos[
                beam_id].x1 - self.x_spacing  # leaf closed at jaw_x1-0.5 by default
            default_lr = '{:.2f} {:.2f}\n'.format(
                pos, pos)  # by default, leaves closed
            self.dict_lrs[beam_id] = np.full(
                (self.nb_apertures, self.nb_leafPairs),
                default_lr,
                dtype=object)  # (#aperture, 51),
            for a in range(self.nb_apertures):  # 1. for each aperture
                row_idx = 0
                for i in range(self.nb_leafPairs):  # 2. for each row
                    if self.dict_inJaw[beam_id][i]:
                        lr = get_leafPos_for_a_row(apts[a, row_idx])
                        self.dict_lrs[beam_id][a, i] = lr
                        row_idx += 1
                self.dict_lrs[beam_id][a] = self.dict_lrs[beam_id][
                    a, ::
                    -1]  # NOTE: In TPS, 51 leaf pairs are in reversed order.
예제 #16
0
    def load_fluence_dose_from_TPS(self, tps_ray_inten_file='./data/TPSray.txt'):
        # intensity
        fluence = np.loadtxt(tps_ray_inten_file)
        fluence = torch.tensor(fluence, dtype=torch.float32, device=self.hparam.device)
        dict_FMs = self.data.project_to_fluenceMaps(to_np(fluence))

        # dose
        doses = torch.matmul(self.deposition, fluence) # cal dose
        # split organ_doses to obtain individual organ doses
        dict_organ_doses = split_doses(doses, self.data.organ_inf)

        return OrderedBunch({'fluence':to_np(fluence), 'dose':dict_organ_doses, 'fluenceMaps': dict_FMs, 'name': 'TPS'})
예제 #17
0
파일: evaluation.py 프로젝트: lc82111/pyRad
    def load_TPS_OrganDose(self, name='TPSOptimResult'):
        # intensity
        fluence = np.loadtxt(self.hparam.tps_ray_inten_file)
        fluence = torch.tensor(fluence, dtype=torch.float32, device=self.hparam.device)
        dict_FMs = self.data.project_to_fluenceMaps(to_np(fluence))

        # dose
        doses = cal_dose(self.deposition, fluence)
        # split organ_doses to obtain individual organ doses
        dict_organ_doses = split_doses(doses, self.data.organName_ptsNum)

        return OrderedBunch({'fluence':to_np(fluence), 'dose':dict_organ_doses, 'fluenceMaps': dict_FMs, 'name': name})
예제 #18
0
    def _get_x_axis_position(self):
        '''
         get x axis position from optimized_segments_MUs_file
         Return: 
            self.dict_lrs {beam_id: strings (#aperture, 51)}, NOTE: 51 leaf pairs in reversed order.
            self.nb_beams
            self.nb_apertures
        '''
        self.dict_lrs = OrderedBunch()  # {beam_id: (#aperture, H)}

        def get_leafPos_for_a_row(row):
            '''
            [0.0] 0 [0.5] 0 [1.0] 1 [1.5] 1 [2.0] 0 [2.5] 0 [3.0]
            '''
            jaw_x1 = self.dict_jawsPos[beam_id].x1
            if (row==0).all():  # closed row
                lr = default_lr; first,last=0,0 
            else: # opened row
                first, last = np.nonzero(row)[0][[0,-1]]  # get first 1 and last 1 positions
                #  last += 1 # block the left bixel of first 1, and right bixel of last 1; TODO +1?
                l = jaw_x1 + first*self.x_spacing # spacing 0.5mm
                r = jaw_x1 + last *self.x_spacing # spacing 0.5mm
                lr = '{:.2f} {:.2f}\n'.format(l, r)
            #  cprint(f'row:{row_idx}; {first}  {last};  {lr}', 'green')
            return lr
        file_name = os.path.join(self.hparam.optimized_segments_MUs_file, 'optimized_segments_MUs.pickle')
        with open(file_name, 'rb') as f:
            self.segs_mus = pickle.load(f)
        self.nb_apertures = len(self.segs_mus[1]['MU'])
        self.nb_beams     = len(self.segs_mus)
        self.old_MUs      = np.empty((self.nb_beams*self.nb_apertures, 1, 1, 1), dtype=np.float32)

        for beam_id, seg_mu in self.segs_mus.items():  # 0. for each beam
            #  print(f'\n beam_id:{beam_id}')
            H, W = self.data.dict_rayBoolMat[beam_id].shape
            #  print(f'height:{H}; width:{W}')
            segs, mus = seg_mu['Seg'], seg_mu['MU']
            self.old_MUs[(beam_id-1)*self.nb_apertures: (beam_id-1)*self.nb_apertures+self.nb_apertures] = mus.reshape((self.nb_apertures,1,1,1)) 

            pos = self.dict_jawsPos[beam_id].x1-self.x_spacing  # leaf closed at jaw_x1-0.5 by default 
            default_lr = '{:.2f} {:.2f}\n'.format(pos, pos)  # by default, leaves closed 
            self.dict_lrs[beam_id] = np.full((self.nb_apertures, self.nb_leafPairs), default_lr, dtype=object)  # (#aperture, 51), 
            for aperture in range(self.nb_apertures):   # 1. for each aperture
                seg = segs[:, aperture]
                seg = seg.reshape(H,W)
                row_idx = 0
                for i in range(self.nb_leafPairs): # 2. for each row
                    if self.dict_inJaw[beam_id][i]:
                        lr = get_leafPos_for_a_row(seg[row_idx])
                        self.dict_lrs[beam_id][aperture, i] = lr
                        row_idx += 1
                self.dict_lrs[beam_id][aperture] = self.dict_lrs[beam_id][aperture, ::-1]  # NOTE: In TPS, 51 leaf pairs are in reversed order. 
예제 #19
0
    def load_Depos_organ_dose(self, name):
        # get seg and MU
        file_name = self.hparam.optimized_segments_MUs_file+'/optimized_segments_MUs.pickle'
        if not os.path.isfile(file_name): raise ValueError(f'file not exist: {file_name}')
        cprint(f'load segments and MUs from {file_name}', 'yellow')
        segments_and_MUs = unpickle_object(file_name)
        dict_segments, dict_MUs = OrderedBunch(), OrderedBunch()
        for beam_id, seg_MU in segments_and_MUs.items():
            dict_segments[beam_id] = torch.tensor(seg_MU['Seg'], dtype=torch.float32, device=self.hparam.device)
            dict_MUs[beam_id]      = torch.tensor(seg_MU['MU'],  dtype=torch.float32, device=self.hparam.device, requires_grad=True)

        # compute fluence
        fluence, _ = computer_fluence(self.data, dict_segments, dict_MUs)
        fluence    = fluence / self.hparam.dose_scale # * 1000
        dict_FMs   = self.data.project_to_fluenceMaps(to_np(fluence))

        # compute dose
        doses = torch.matmul(self.deposition, fluence) # cal dose
        # split organ_doses to obtain individual organ doses
        dict_organ_doses = split_doses(doses, self.data.organ_inf)
        
        return OrderedBunch({'fluence':to_np(fluence), 'dose':dict_organ_doses, 'fluenceMaps': dict_FMs, 'name': name})
예제 #20
0
파일: data.py 프로젝트: lc82111/pyRad
 def project_to_validRays_torch(self, dict_fluences):
     ''' Convert flatten fluenceMap to valid fluenceVector
     Arguments: 
         dict_fluences: {beam_id: fluence vector}
     Return:
         valid_rays: (#valid_bixels,)
         dict_fluenceMaps: {beam_id: fluence matrix} '''
     dict_fluenceMaps = OrderedBunch()
     valid_rays = []
     for (beam_id, msk), (_, fluence) in zip(self.dict_rayBoolMat.items(), dict_fluences.items()):
         msk = torch.tensor(msk, dtype=torch.bool, device=fluence.device)
         valid_rays.append(fluence.view(*msk.shape)[msk].flatten()) # select valid rays and back to 1d vector
         dict_fluenceMaps[beam_id] = fluence.detach()
     valid_rays = torch.cat(valid_rays, axis=0)
     return valid_rays, dict_fluenceMaps
예제 #21
0
파일: evaluation.py 프로젝트: lc82111/pyRad
    def load_fluenceOptim_OrganDose(self, name):
        # intensity
        fluence = loosen_object(os.path.join(self.hparam.optimized_fluence_file_path+'/optimized_fluence.pickle'))
        fluence = torch.tensor(fluence, device=self.hparam.device)

        fluence = torch.abs(fluence)
        fluence = torch.where(fluence>self.hparam.max_fluence, self.hparam.max_fluence, ray_inten)
        fluence = fluence / self.hparam.dose_scale # * 1000

        # compute dose
        doses = cal_dose(self.deposition, fluence)
        # split organ_doses to obtain individual organ doses
        dict_organ_doses = split_doses(doses, self.data.organName_ptsNum)
        
        return OrderedBunch({'fluence':to_np(fluence), 'dose':dict_organ_doses, 'fluenceMaps': dict_FMs, 'name': name})
예제 #22
0
def save_result(mp):
    def _modulate_segment_with_partialExposure(seg, lrs, pes):
        '''
        Imposing the partialExp effect at the endpoint of leaf
        lrs: (#aperture, H, 2); seg:(HxW, #aperture); pes:(#aperture, H, 2)
        '''
        for i, aperture in enumerate(lrs):  # for each aperture
            for j, lr in enumerate(aperture):  # for each row
                assert label(
                    seg[j * W:j * W + W, i]
                )[1] <= 1  # ensure only zero or one connected component in a row
                [l, r] = lr
                l_pe, r_pe = sigmoid(pes[i, j])
                # close hopeless bixel?
                if l_pe < 0.6:
                    seg[j * W:j * W + W, i][l] = 0
                if r_pe < 0.6:
                    seg[j * W:j * W + W, i][r - 1] = 0
        return seg

    results = OrderedBunch()
    for (beam_id, MU), (_, seg) in zip(mp.dict_MUs.items(),
                                       mp.dict_segments.items()):
        H, W = mp.data.dict_rayBoolMat[beam_id].shape

        validRay = mp.data.dict_rayBoolMat[beam_id].flatten().reshape(
            (-1, 1))  # where 1 indicates non-valid/blocked bixel
        validRay = np.tile(validRay, (1, seg.shape[1]))  # (HxW, #aperture)
        seg = seg * validRay  # partialExp may open bixels in non-valid regions.

        lrs = mp.dict_lrs[beam_id]  # (#aperture, H, 2)
        pes = mp.dict_partialExp[beam_id]  # (#aperture, H, 2)
        seg = _modulate_segment_with_partialExposure(seg, lrs, pes)

        results[beam_id] = {
            'MU': np.abs(MU),
            'Seg': seg,
            'lrs': lrs,
            'PEs': pes,
            'global_step': mp.optimizer.global_step
        }

    if not os.path.isdir(hparam.optimized_segments_MUs_file_path):
        os.makedirs(hparam.optimized_segments_MUs_file_path)
    pickle_object(
        os.path.join(hparam.optimized_segments_MUs_file_path,
                     'optimized_segments_MUs.pickle'), results)
예제 #23
0
def computer_fluence(data, dict_segments, dict_MUs):
    '''computer fluence from seg and MU. 
    data: a data class instance
    dict_segments: {beam_id: matrix consists of segment columns}
    dict_MUs:{beam_id: vector of segment MU}
    return: fluence (#valid_bixels, )
    For retain_grad() see : https://discuss.pytorch.org/t/how-do-i-calculate-the-gradients-of-a-non-leaf-variable-w-r-t-to-a-loss-function/5112
    '''
    dict_fluences = OrderedBunch()
    for beam_id, seg in dict_segments.items():
        MU = torch.abs(dict_MUs[beam_id])  # nonnegative constraint
        dict_fluences[beam_id] = torch.matmul(seg, MU)  # {beam_id: vector}

    fluence, dict_fluenceMaps = data.project_to_validRays_torch(
        dict_fluences)  # (#valid_bixels,), {beam_id: matrix}
    fluence.retain_grad()
    return fluence, dict_fluenceMaps
예제 #24
0
    def get_dose(self, uid):
        ''' return:pbDose (#slice, H, W) '''
        self._load_randApert()

        # get saved fluence of beam_id and aperture_id
        beam_id, apert_id = uid.split('_')
        beam_id, apert_id = int(beam_id), int(apert_id)
        preset_FM = self.dict_randomApertures[beam_id][apert_id]  # (H, W)
        dict_FMs = OrderedBunch()
        for bid, FM in self.data.dict_rayBoolMat.copy().items():
            if bid == beam_id:
                dict_FMs[bid] = FM * preset_FM.copy()  # FM may not all ones
            else:
                dict_FMs[bid] = FM * 0
        fluence_vector = self.data.get_rays_from_fluences(dict_FMs)
        dose = self.data.deposition.dot(fluence_vector)
        vector_dose = dose[0:self.data.get_pointNum_from_organName(
            self.roi_skinName)]  # only care the dose of skin
        pbDose = self._parse_dose_torch(
            torch.tensor(
                vector_dose,
                dtype=torch.float32)).cpu().numpy()  # 3D dose 63x256x256
        pbDose = np.squeeze(pbDose).astype(np.float32)
        return pbDose
예제 #25
0
def parse_MonteCarlo_dose(MCDose, data):
    ''' Return: dict_organ_dose {organ_name: dose ndarray (#organ_dose, )} '''
    dict_organ_dose = OrderedBunch()
    for organ_name, msk in data.organ_masks.items():
        dict_organ_dose[organ_name] = MCDose[msk]
    return dict_organ_dose
예제 #26
0
파일: data.py 프로젝트: lc82111/pyRad
 def set_CT(self, data):
     self.CT = OrderedBunch({'spacing': np.array(data.Dicom_Reader.dicom_handle.GetSpacing(), dtype=np.float32), # [1.171875, 1.171875, 2.5]mm@512x512x126, 
                             'size':    np.array(data.Dicom_Reader.dicom_handle.GetSize(), dtype=np.int), # (512,512,126)
                             'origin':  np.array(data.Dicom_Reader.dicom_handle.GetOrigin()), # [-300, -300, z]mm
                              })
예제 #27
0
class MonteCarlo():
    def __init__(self, hparam, data):
        self.hparam = hparam
        self.data = data

        self.nb_leafPairs = 51  # 51 leaf pairs
        self.x_spacing = 0.5  # cm
        self.nb_apertures = 1000  # we will generate this number random apertures
        self.nb_beams = data.num_beams

        self._get_leafBottomEdgePosition()
        self._get_leafInJawField(
        )  # get y axis leaf position from jaw_y1 ,jaw_y2

    def get_random_apertures(self):
        '''
        return: self.dict_randomApertures {beam_id: ndarray(nb_apertures, H, W)}
        '''
        def get_random_shape(H, W):
            if np.random.randint(0, 2):
                img = random_shapes((H, W),
                                    max_shapes=3,
                                    multichannel=False,
                                    min_size=min(H, W) // 3,
                                    allow_overlap=True,
                                    intensity_range=(1, 1))[0]
                img = np.where(img == 255, 0, img)
            else:
                img = np.zeros((H, W), dtype=np.uint8)
                for i in range(len(img)):  # for each row
                    l, r = np.random.randint(0, W + 1, (2, ))
                    if l == r: continue
                    if l > r: l, r = r, l
                    img[i, l:r] = 1
            return img

        save_path = Path(
            hparam.patient_ID).joinpath('dataset/dict_randomApertures.pickle')
        if os.path.isfile(save_path):
            self.dict_randomApertures = unpickle_object(save_path)
            return

        self.dict_randomApertures = OrderedBunch()
        for beam_id in range(1, self.nb_beams + 1):  # for each beam
            H, W = self.data.dict_rayBoolMat[beam_id].shape
            self.dict_randomApertures[beam_id] = np.zeros(
                (self.nb_apertures, H, W),
                np.uint8)  # default closed apertures
            for i, apt in enumerate(
                    self.dict_randomApertures[beam_id]):  # for each apterture
                if i == 0:  # skip first aperture for each beam to get a all-leaf-opened aperture
                    self.dict_randomApertures[beam_id][i] = np.ones((H, W),
                                                                    np.uint8)
                else:
                    self.dict_randomApertures[beam_id][i] = get_random_shape(
                        H, W)
        pickle_object(save_path, self.dict_randomApertures)

    def _get_leafBottomEdgePosition(self):
        '''
        the leaf coords is:     jaw_y2(+)
                            jaw_x1(-)   jaw_x2(+)
                                jaw_y1(-)
        Return: self.coords, list of 51 leaves' bottom edge positions 
        '''
        ## read FM_info file
        FM_info_template = os.path.join(self.hparam.winServer_MonteCarloDir,
                                        'templates', 'FM_info.txt')
        with open(FM_info_template, 'r') as f:
            lines = f.readlines()

        ## 0. get the thickness of the 51 pair leaves
        is_thick_line = False
        thicks = []
        leaf_num = 0
        for line in lines:
            if 'MLC_LeafThickness' in line:
                is_thick_line = True
                continue
            if leaf_num == self.nb_leafPairs:
                break
            if is_thick_line:
                thicks.append(float(line.replace('\n', '')))
                leaf_num += 1
        #print(thicks)
        #print(sum(thicks))
        #print(f'center leaf thickness: {thicks[25]}')

        ## 1. get edge bottom coord of leaves (51 pairs)
        coords = []  # leaves bottom edges

        # upper half leaves: total 25 edge bottom positions
        coord26thLeafUp = thicks[25] / 2.  # 26-th leaf with its center at y=0
        coords.append(coord26thLeafUp)  # +1 position
        for i in range(24, 0, -1):  # [24, 0], +24 positions
            coord26thLeafUp += thicks[i]
            coords.append(coord26thLeafUp)
        coords = coords[::-1]

        # lower half leaves: total 26 edge bottom positions
        coord26thLeafbot = -thicks[25] / 2.
        coords.append(coord26thLeafbot)  # +1 position
        for i in range(26, self.nb_leafPairs):  # [26, 50], +25 positions
            coord26thLeafbot -= thicks[i]
            coords.append(coord26thLeafbot)

        # round to 2 decimals
        self.coords = [round(c, 2) for c in coords]

    def _get_leafInJawField(self):
        '''
        get y axis leaf positions by finding the leaves in jaw field 

        Return: self.dict_jawsPos {beam_id: [x1,x2,y1,y2]}, self.dict_inJaw {beam_id: (51,)}
        '''
        self.dict_jawsPos = OrderedBunch()  # jaw positions
        self.dict_inJaw = OrderedBunch(
        )  # bool vector indicate leaves in jaw Filed
        ## get jaw positions from seg*.txt file
        seg_files = glob.glob(
            os.path.join(self.hparam.winServer_MonteCarloDir, 'templates',
                         'Seg_beamID*.txt'))
        seg_files.sort()  # sort to be consistent with beam_id

        for beam_id, seg in enumerate(seg_files):
            beam_id += 1
            H, W = self.data.dict_rayBoolMat[beam_id].shape
            #  print(f'beam_ID:{beam_id}; file_name:{seg}')
            with open(seg, 'r') as f:
                lines = f.readlines()

            ## get jaw positions
            is_jaw_line = False
            jaw = OrderedBunch()
            for line in lines:
                if 'MU_CollimatorJawX1' in line:
                    is_jaw_line = True
                    continue
                if is_jaw_line:
                    position = line.split(' ')[1:5]
                    position = [float(p) for p in position]
                    jaw.x1, jaw.x2, jaw.y1, jaw.y2 = position
                    print(f'jaw position: {jaw.x1, jaw.x2, jaw.y1, jaw.y2}')
                    break
            self.dict_jawsPos[beam_id] = jaw

            ## Is a leaf in jaws' open field?
            # for upper half leaves: if (leaf bottom edge > jaw_y1) {this leaf in valid field}
            # for lower half leaves: if (leaf upper  edge < jaw_y2) {this leaf in valid field}
            self.dict_inJaw[beam_id] = np.empty((self.nb_leafPairs, ),
                                                dtype=np.bool)
            for i, c in enumerate(self.coords):
                in_field = False
                if (c < jaw.y2 and c > jaw.y1):
                    in_field = True
                if (c < jaw.y2 and
                        self.coords[i - 1] > jaw.y1):  # consider upper edge
                    in_field = True
                self.dict_inJaw[beam_id][i] = in_field
                #  print(f'{in_field}---{i}: {c}')
            #  print(f'{self.dict_inJaw[beam_id].sum()}')
            assert self.dict_inJaw[beam_id].sum(
            ) == H, f'H={H}, inJaw={self.dict_inJaw[beam_id].sum()}'

    def _get_x_axis_position(self):
        '''
         get x axis position from self.dict_randomApertures 
         Return: 
            self.dict_lrs {beam_id: strings (#aperture, 51)}, NOTE: 51 leaf pairs in reversed order.
            self.nb_beams
            self.nb_apertures
        '''
        self.dict_lrs = OrderedBunch()  # {beam_id: (#aperture, H)}

        def get_leafPos_for_a_row(row):
            '''
            [0.0] 0 [0.5] 0 [1.0] 1 [1.5] 1 [2.0] 0 [2.5] 0 [3.0]
            '''
            jaw_x1 = self.dict_jawsPos[beam_id].x1
            if (row == 0).all():  # closed row
                lr = default_lr
                first, last = 0, 0
            else:  # opened row
                first, last = np.nonzero(row)[0][[
                    0, -1
                ]]  # get first 1 and last 1 positions
                #  last += 1 # block the left bixel of first 1, and right bixel of last 1; TODO +1?
                l = jaw_x1 + first * self.x_spacing  # spacing 0.5mm
                r = jaw_x1 + last * self.x_spacing  # spacing 0.5mm
                lr = '{:.2f} {:.2f}\n'.format(l, r)
            #  cprint(f'row:{row_idx}; {first}  {last};  {lr}', 'green')
            return lr

        for beam_id, apts in self.dict_randomApertures.items(
        ):  # 0. for each beam
            #  print(f'\n beam_id:{beam_id}')
            H, W = self.data.dict_rayBoolMat[beam_id].shape
            #  print(f'height:{H}; width:{W}')

            pos = self.dict_jawsPos[
                beam_id].x1 - self.x_spacing  # leaf closed at jaw_x1-0.5 by default
            default_lr = '{:.2f} {:.2f}\n'.format(
                pos, pos)  # by default, leaves closed
            self.dict_lrs[beam_id] = np.full(
                (self.nb_apertures, self.nb_leafPairs),
                default_lr,
                dtype=object)  # (#aperture, 51),
            for a in range(self.nb_apertures):  # 1. for each aperture
                row_idx = 0
                for i in range(self.nb_leafPairs):  # 2. for each row
                    if self.dict_inJaw[beam_id][i]:
                        lr = get_leafPos_for_a_row(apts[a, row_idx])
                        self.dict_lrs[beam_id][a, i] = lr
                        row_idx += 1
                self.dict_lrs[beam_id][a] = self.dict_lrs[beam_id][
                    a, ::
                    -1]  # NOTE: In TPS, 51 leaf pairs are in reversed order.

    def write_to_seg_txt(self):
        """
        Write seg*.txt to the shared disk of windowsServer
        Args: 
            self.dict_lrs {beam_id: strings (#aperture, 51)}, NOTE: 51 leaf pairs in reversed order.
            self.nb_apertures
            self.nb_beams
        Outputs:
            seg*.txt 
        """
        ## write Seg_{beam_id}_{aperture_id}.txt
        for beam_id in range(1, self.nb_beams + 1):
            seg_template = os.path.join(self.hparam.winServer_MonteCarloDir,
                                        'templates',
                                        f'Seg_beamID{beam_id}.txt')
            with open(seg_template, 'r') as f:
                lines = f.readlines()
            for aperture_id in range(0, self.nb_apertures):
                ap_lines = lines.copy() + [None] * 51
                ap_lines[-51:] = self.dict_lrs[beam_id][
                    aperture_id]  # 51 leaves positions

                # write Seg*.txt
                save_path = os.path.join(self.hparam.winServer_MonteCarloDir,
                                         'Segs',
                                         f'Seg_{beam_id}_{aperture_id}.txt')
                with open(save_path, "w") as f:
                    f.writelines(ap_lines)
                cprint(f'Writing Seg_{beam_id}_{aperture_id}.txt', 'green')

        cprint(
            f'Done. {self.nb_beams*self.nb_apertures} Seg*.txt files have been written to Dir {self.hparam.winServer_MonteCarloDir}/segs.',
            'green')

    def get_unit_MCdose(self):
        ''' Return: unitMUDose, ndarray (nb_beams*nb_apertures, #slice, H, W)  '''
        self._get_x_axis_position(
        )  # get x axis position from the saved random generated fluences

        cprint(
            f'compute unit MU Dose on winServer and save results to {self.hparam.winServer_MonteCarloDir}',
            'green')
        pdb.set_trace()
        if not Path(self.hparam.winServer_MonteCarloDir, 'Segs',
                    'Seg_6_999.txt').is_file():
            self.write_to_seg_txt()
        call_FM_gDPM_on_windowsServer(self.hparam.patient_ID, self.nb_beams,
                                      self.nb_apertures,
                                      hparam.winServer_nb_threads)
        pdb.set_trace()

    def get_dose(self, uid):
        ''' return:mcDose(#slice, H, W) '''
        dpm_result_dir = Path(self.hparam.winServer_MonteCarloDir,
                              'gDPM_results', f'dpm_result_{uid}Ave.dat')
        with open(dpm_result_dir, 'rb') as f:
            dose = np.fromfile(f, dtype=np.float32)
            dose = dose.reshape(*hparam.MCDose_shape)
        mcDose = np.swapaxes(dose, 2, 1)
        return mcDose
예제 #28
0
파일: data.py 프로젝트: lc82111/pyRad
 def set_plan(self, data):
     beam_info = data.Dicom_Reader.beam_info # isocenter: [-18.09999, -1.599998, 246.3]mm
     self.plan = OrderedBunch({'isocenter': beam_info[1].IsoCenter, 
                               'beam_numbers': len(beam_info),
                               'beam_info': beam_info,
                              })
예제 #29
0
파일: evaluation.py 프로젝트: lc82111/pyRad
 def load_JYMonteCarlo_OrganDose(self, name, dosefilepath, scale=1):
     MCdoses = self.mc.get_JY_MCdose(dosefilepath) * scale
     MCdoses = torch.tensor(MCdoses, dtype=torch.float32, device=self.hparam.device)
     dict_organ_doses = parse_MonteCarlo_dose(MCdoses, self.data)  # parse organ_doses to obtain individual organ doses
     return OrderedBunch({'dose':dict_organ_doses, 'name':name})
예제 #30
0
def test_save_result(mp):
    def display():
        doses = cal_dose(mp.optimizer.deposition,
                         fluence)  # cal dose (#voxels, )
        dict_organ_doses = split_doses(
            doses, mp.data.organName_ptsNum
        )  # split organ_doses to obtain individual organ doses
        loss, breaking_points_nums = mp.optimizer.loss.loss_func(
            dict_organ_doses)
        print(f'breaking points #: ', end='')
        for organ_name, breaking_points_num in breaking_points_nums.items():
            print(f'{organ_name}: {breaking_points_num}   ', end='')
        print(f'loss={to_np(loss)}\n\n')

    # ndarray to tensor
    dict_segments, dict_MUs, dict_partialExp = OrderedBunch(), OrderedBunch(
    ), OrderedBunch()
    for (beam_id, MU), (_, seg), (_, pe) in zip(mp.dict_MUs.items(),
                                                mp.dict_segments.items(),
                                                mp.dict_partialExp.items()):
        dict_segments[beam_id] = torch.tensor(seg,
                                              dtype=torch.float32,
                                              device='cpu')
        dict_MUs[beam_id] = torch.tensor(MU,
                                         dtype=torch.float32,
                                         device='cpu',
                                         requires_grad=True)
        dict_partialExp[beam_id] = torch.tensor(pe,
                                                dtype=torch.float32,
                                                device='cpu',
                                                requires_grad=True)

    fluence = mp.optimizer.computer_fluence(dict_segments, dict_partialExp,
                                            mp.dict_lrs,
                                            dict_MUs)[0]  # (#valid_bixels,)
    display()

    def _test(is_seg_modulate):
        def _modulate_segment_with_partialExposure(seg, lrs, pes):
            '''
            Imposing the partialExp effect at the endpoint of leaf
            lrs: (#aperture, H, 2); seg:(HxW, #aperture); pes:(#aperture, H, 2)
            '''
            for i, aperture in enumerate(lrs):  # for each aperture
                for j, lr in enumerate(aperture):  # for each row
                    [l, r] = lr
                    l_pe, r_pe = sigmoid(pes[i, j])
                    # close hopeless bixel?
                    if l_pe < 0.6:
                        #  seg[j*W:j*W+W, i] [l] = 0
                        seg[j * W + l, i] = 0
                    if r_pe < 0.6:
                        #  seg[j*W:j*W+W, i] [r-1] = 0
                        seg[j * W + (r - 1), i] = 0
            return seg

        dict_segments, dict_MUs = OrderedBunch(), OrderedBunch()
        for (beam_id, MU), (_, seg) in zip(mp.dict_MUs.items(),
                                           mp.dict_segments.items()):
            H, W = mp.data.dict_rayBoolMat[beam_id].shape

            validRay = mp.data.dict_rayBoolMat[beam_id].flatten().reshape(
                (-1, 1))  # where 1 indicates non-valid/blocked bixel
            validRay = np.tile(validRay, (1, seg.shape[1]))  # (HxW, #aperture)
            seg = seg * validRay  # partialExp may open bixels in non-valid regions.

            lrs = dict_lrs[beam_id]  # (#aperture, H, 2)
            pes = dict_partialExp[beam_id]  # (#aperture, H, 2)
            if is_seg_modulate:
                seg = _modulate_segment_with_partialExposure(seg, lrs, pes)

            dict_segments[beam_id] = torch.tensor(seg,
                                                  dtype=torch.float32,
                                                  device='cpu')
            dict_MUs[beam_id] = torch.tensor(MU,
                                             dtype=torch.float32,
                                             device='cpu',
                                             requires_grad=True)

    _test(is_seg_modulate=False)
    fluence = computer_fluence(mp.data, dict_segments, dict_MUs)[0]
    display()

    _test(is_seg_modulate=True)
    fluence = computer_fluence(mp.data, dict_segments, dict_MUs)[0]
    display()