def ghost_to_view(self, idxs): """ This maps the index from the current ghost shape of the view to the view shape. :param list idxs_in: list of indices to be transformed .. note:: no slicing is admitted here. Preprocess ``idxs`` with :py:meth:`expand_idxs` if slicing is required. """ if self.maps[self.view]['Q'] != None: return tuple([(i if i < N else N - 1) for i, N in zip( self.ghost_to_extended(idxs), self.get_view_shape())]) else: idxs = idxfold(self.get_view_shape(), idxunfold(self.get_ghost_shape(), idxs)) return tuple(idxs)
def __getitem__(self, idxs): """ Get item function :param tuple,int idxs: ((i_1,..,i_d),(j_1,..,j_d)) with respect to the unfolded mode sizes :returns: item at that position """ if not self.init: raise NameError( "TensorToolbox.QTTmat.__getitem__: QTT not initialized correctly" ) # Check for out of bounds if any(map(operator.ge, idxs[0], self.get_full_nrows())) or any( map(operator.ge, idxs[1], self.get_full_ncols())): raise NameError( "TensorToolbox.QTTmat.__getitem__: Index out of bounds") # Compute the index of the folding representation from the unfolded index return TTmat.__getitem__( self, (idxfold(self.get_nrows(), idxunfold(self.get_full_nrows(), idxs[0])), idxfold(self.get_ncols(), idxunfold(self.get_full_ncols(), idxs[1]))))
def view_to_ghost(self, idxs): """ This maps the index from the view to the ghost shape. :param list idxs: list of indices to be transformed .. note:: no slicing is admitted here. Preprocess ``idxs`` with :py:meth:`expand_idxs` if slicing is required. .. note:: this returns an error if the ghost shape is obtained by quantics folding, because the one view index can be pointing to many indices in the folding. """ if self.maps[self.view]['Q'] != None: raise NotImplemented( "This operation is undefined because one view idx can point to many q indices" ) else: return idxfold(self.get_ghost_shape(), idxunfold(self.get_view_shape(), idxs))
def __getitem__(self, idxs): """ Get item function: indexes are entered in with respect to the unfolded mode sizes. """ if not self.init: raise NameError( "TensorToolbox.QTTvec.__getitem__: QTT not initialized correctly" ) # Check whether index out of bounds if any(map(operator.ge, idxs, self.get_global_shape())): raise NameError( "TensorToolbox.QTTvec.__getitem__: Index out of bounds") # Compute the index of the folding representation from the unfolded representation return TTvec.__getitem__( self, idxfold(self.shape(), idxunfold(self.get_global_shape(), idxs)))
def full_to_q(self, idxs): return idxfold(self.get_q_shape(), idxunfold(self.shape(), idxs))
def q_to_full(self, idxs): return idxfold(self.shape(), idxunfold(self.get_q_shape(), idxs))
def __getitem__(self, idxs_in): (lidxs, out_shape, transpose_list_shape) = expand_idxs(idxs_in, self.shape, self.get_ghost_shape(), self.maps[self.view]['fix_dims'], self.maps[self.view]['fix_idxs']) # Allocate output array if len(out_shape) > 0: out = np.empty(out_shape, dtype=self.dtype) if self.active_weights: out_weights = np.empty(out_shape, dtype=self.dtype) # MPI code eval_is = [] eval_idxs = [] eval_xx = [] # End MPI code for i, idxs in enumerate(lidxs): # Map ghost indices to global indices idxs = self.ghost_to_global(idxs) # Compute the weight corresponding to idxs if self.active_weights: out_weights[idxfold(out_shape, i)] = np.prod( [self.W[j][jj] for j, jj in enumerate(idxs)]) # Separate field idxs from parameter idxs if self.twtype == 'array': # Check whether the value has already been computed try: out[idxfold(out_shape, i)] = self.data[idxs] except KeyError: if idxs not in eval_idxs: # Evaluate function xx = np.array([ self.X[ii][idx] for ii, idx in enumerate(idxs) ]) # MPI code eval_is.append([i]) eval_idxs.append(idxs) eval_xx.append(xx) # End MPI code else: pos = eval_idxs.index(idxs) eval_is[pos].append(i) else: # Evaluate function xx = np.array( [self.X[ii][idx] for ii, idx in enumerate(idxs)]) out[idxfold(out_shape, i)] = self.f(xx, self.params) # # Check that the idxs belong to the real tensor # isout_flag = not self.full_is_view( idxs ) # # if isout_flag: # out[idxfold(out_shape,i)] = TensorWrapper.FILL_VALUE # else: # # Map ghost indices to global indices # idxs = self.full_to_global( idxs ) # # # Separate field idxs from parameter idxs # if self.twtype == 'array': # # Check whether the value has already been computed # try: # out[idxfold(out_shape,i)] = self.data[idxs] # except KeyError: # if idxs not in eval_idxs: # # Evaluate function # xx = np.array( [self.X[ii][idx] for ii,idx in enumerate(idxs)] ) # # MPI code # eval_is.append([i]) # eval_idxs.append(idxs) # eval_xx.append(xx) # # End MPI code # else: # pos = eval_idxs.index(idxs) # eval_is[pos].append(i) # # else: # # Evaluate function # xx = np.array([self.X[ii][idx] for ii,idx in enumerate(idxs)]) # out[idxfold(out_shape,i)] = self.f(xx,self.params) # Evaluate missing values if len(eval_xx) > 0: self.logger.debug(" [START] Num. of func. eval.: %d " % len(eval_xx)) start_eval = time.time() if self.__maxprocs == None or not MPI_SUPPORT: if self.ftype == 'serial': # Serial evaluation for (ii, idxs, xx) in zip(eval_is, eval_idxs, eval_xx): self.data[idxs] = self.f(xx, self.params) self.store() for i in ii: out[idxfold(out_shape, i)] = self.data[idxs] elif self.ftype == 'vector': # Vectorized evaluation eval_xx_mat = np.vstack(eval_xx) data_mat = self.f(eval_xx_mat, self.params) for j, (ii, idxs) in enumerate(zip(eval_is, eval_idxs)): self.data[idxs] = data_mat[j] for i in ii: out[idxfold(out_shape, i)] = self.data[idxs] self.store() else: # MPI code eval_res = mpi_map.mpi_map_code(self.f_code, eval_xx, self.params, self.__maxprocs) for (ii, idxs, res) in zip(eval_is, eval_idxs, eval_res): self.data[idxs] = res for i in ii: out[idxfold(out_shape, i)] = self.data[idxs] self.store() # End MPI code stop_eval = time.time() self.logger.debug( " [DONE] Num. of func. eval.: %d - Avg. time of func. eval.: %fs - Tot. time: %s" % (len(eval_xx), (stop_eval - start_eval) / len(eval_xx) * (min(self.__maxprocs, len(eval_xx)) if self.__maxprocs != None else 1), str(datetime.timedelta(seconds=(stop_eval - start_eval))))) # Apply weights if needed if self.active_weights: out *= out_weights if transpose_list_shape: out = np.transpose(out, tuple(list(range(1, len(out_shape))) + [0])) else: idxs = tuple(itertools.chain(*lidxs)) # Map ghost indices to global indices idxs = self.ghost_to_global(idxs) # Compute weight if necessary if self.active_weights: w = np.prod([self.W[j][jj] for j, jj in enumerate(idxs)]) if self.twtype == 'array': try: out = self.data[idxs] except KeyError: # Evaluate function xx = np.array( [self.X[ii][idx] for ii, idx in enumerate(idxs)]) self.data[idxs] = self.f(xx, self.params) self.store() out = self.data[idxs] else: out = self.f( np.array([self.X[ii][idx] for ii, idx in enumerate(idxs)]), self.params) # Apply the weight if necessary if self.active_weights: out *= w return out
def ghost_to_extended(self, idxs): return idxfold(self.get_extended_shape(), idxunfold(self.get_ghost_shape(), idxs))