def get_outside_index(length, level, offset_cache=None, cuda=False): if offset_cache is None: offset_cache = get_offset_cache(length) index = OutsideIndex() pairs = index.get_all_pairs(level, length) par_lvl, par_pos = [], [] sis_lvl, sis_pos = [], [] for pair in pairs: par, sis = pair par_lvl.append(par[0]) par_pos.append(par[1] - par[0]) sis_lvl.append(sis[0]) sis_pos.append(sis[1] - sis[0]) device = torch.cuda.current_device() if cuda else None # Parent index = [] for lvl, pos in zip(par_lvl, par_pos): offset = offset_cache[lvl] idx = offset + pos index.append(idx) par_index = torch.tensor(index, dtype=torch.int64, device=device) # Sibling index = [] for lvl, pos in zip(sis_lvl, sis_pos): offset = offset_cache[lvl] idx = offset + pos index.append(idx) sis_index = torch.tensor(index, dtype=torch.int64, device=device) return par_index, sis_index
def get_inside_index(length, level, offset_cache=None, cuda=False): if offset_cache is None: offset_cache = get_offset_cache(length) index = InsideIndex() pairs = index.get_all_pairs(level, length) L = length - level n_constituents = len(pairs) // L idx_l, idx_r = [], [] for i in range(n_constituents): index_l, index_r = [], [] lvl_l = i lvl_r = level - i - 1 lstart, lend = 0, L rstart, rend = length - L - lvl_r, length - lvl_r if lvl_l < 0: lvl_l = length + lvl_l if lvl_r < 0: lvl_r = length + lvl_r for pos in range(lstart, lend): offset = offset_cache[lvl_l] idx = offset + pos index_l.append(idx) for pos in range(rstart, rend): offset = offset_cache[lvl_r] idx = offset + pos index_r.append(idx) idx_l.append(index_l) idx_r.append(index_r) device = torch.cuda.current_device() if cuda else None idx_l = torch.tensor(idx_l, dtype=torch.int64, device=device).transpose(0, 1).contiguous().flatten() idx_r = torch.tensor(idx_r, dtype=torch.int64, device=device).transpose(0, 1).contiguous().flatten() return idx_l, idx_r
def get_offset(self, length): if length not in self.offset_cache: self.offset_cache[length] = get_offset_cache(length) return self.offset_cache[length]