def _compute_columns(self): if isinstance(self._engine, LlvmEvaluationEngine): fnptr = self._mapnode.get_result() rowindex = self._engine.rowindex if rowindex: nrows = rowindex.nrows else: nrows = self.dt.nrows return core.columns_from_mixed(self._elems, self.dt.internal, nrows, fnptr) else: ee = self._engine _dt = ee.dt.internal _ri = ee.rowindex ncols = len(self._elems) if ee.groupby: opfirst = reduce_opcodes["first"] n_reduce_cols = 0 for elem in self._elems: if isinstance(elem, int): is_groupby_col = elem in ee.groupby_cols n_reduce_cols += is_groupby_col else: n_reduce_cols += elem.is_reduce_expr(ee) expand_dataset = (n_reduce_cols < ncols) columns = ee.groupby_cols + self._elems self._names = ([ee.dt.names[i] for i in ee.groupby_cols] + self._names) for i, elem in enumerate(columns): if isinstance(elem, int): col = core.expr_column(_dt, elem, _ri) if not expand_dataset: col = core.expr_reduceop(opfirst, col, ee.groupby) else: col = elem.evaluate_eager(ee) if expand_dataset and elem.is_reduce_expr(ee): col = col.ungroup(ee.groupby) columns[i] = col else: columns = [ core.expr_column(_dt, e, _ri) if isinstance(e, int) else e.evaluate_eager(ee) for e in self._elems ] return core.columns_from_columns(columns)
def _compute_columns(self): if isinstance(self._engine, LlvmEvaluationEngine): fnptr = self._mapnode.get_result() rowindex = self._engine.rowindex if rowindex: nrows = rowindex.nrows else: nrows = self.dt.nrows return core.columns_from_mixed(self._elems, self.dt.internal, nrows, fnptr) else: ee = self._engine _dt = ee.dt.internal _ri = ee.rowindex columns = [ core.expr_column(_dt, e, _ri) if isinstance(e, int) else e.evaluate_eager(ee) for e in self._elems ] return core.columns_from_columns(columns)
def evaluate_eager(self, ee): self.resolve() dt = self._dtexpr.get_datatable() ri = self._dtexpr.get_rowindex() return core.expr_column(dt.internal, self._colid, ri)