Ejemplo n.º 1
0
def test_rowcount():
    assert str(dt.rowcount(f.A)) == str(f.A.rowcount())
    assert str(dt.rowcount(f[:])) == str(f[:].rowcount())
    DT = dt.Frame({"C": [2, 5, 30, 20, 10],
                   "D": [10, 8, 20, 20, 1]})

    assert_equals(DT[:, f[:].rowcount()], DT[:, dt.rowcount(f[:])])
Ejemplo n.º 2
0
def test_rowcount_different_types():
    DT = dt.Frame([[1, 4, None, 7, 0, None],
                   [True, None, None, False, False, False],
                   [7.4, math.nan, None, math.inf, -math.inf, 1.6e300],
                   ["A", "", None, None, "NaN", "None"]])
    RES = DT[:, rowcount(f[:])]
    assert_equals(RES, dt.Frame([4, 2, 0, 3, 4, 3], stype=dt.int32))
Ejemplo n.º 3
0
def test_reprs():
    # Check that row-expressions can be repr'd without errors
    assert repr(rowall())
    assert repr(rowany())
    assert repr(rowsum())
    assert repr(rowcount())
    assert repr(rowmin())
    assert repr(rowmax())
    assert repr(rowfirst())
    assert repr(rowlast())
    assert repr(rowmean())
    assert repr(rowsd())
Ejemplo n.º 4
0
    # remove black listed columns or column groups that smaller than minimal size
    col_groups = {
        key: val
        for key, val in all_col_groups.items()
        if not key in black_listed_columns or len(val) >= min_col_group_size
    }

    # list of column prefixes
    columns = list(col_groups.keys())
    # list of column ranges
    ranges = [(min(idx), max(idx)) for idx in col_groups.values()]

# produce tuple for column slices
col_slices = [((col + "%d") % (desde), (col + "%d") % (hasta))
              for (col, (desde, hasta)) in zip(columns, ranges)]

for c, r, s in zip(columns, ranges, col_slices):
    update_map = {
        c + "_sum": rowsum(f[s[0]:s[1]]),
        c + "_mean": rowmean(f[s[0]:s[1]]),
        c + "_sd": rowsd(f[s[0]:s[1]]),
        c + "_max": rowmax(f[s[0]:s[1]]),
        c + "_min": rowmin(f[s[0]:s[1]]),
        c + "_range": rowmax(f[s[0]:s[1]]) - rowmin(f[s[0]:s[1]]),
        c + "_first": rowfirst(f[s[0]:s[1]]),
        c + "_last": rowlast(f[s[0]:s[1]]),
        c + "_missing": (r[1] - r[0] + 1) - rowcount(f[s[0]:s[1]])
    }
    X[:, update(**update_map)]

return {"CreditCard-train-aug.csv": X}
    def create_data(
        X: dt.Frame = None
    ) -> Union[str, List[str], dt.Frame, List[dt.Frame], np.ndarray,
               List[np.ndarray], pd.DataFrame, List[pd.DataFrame], Dict[
                   str, str],  # {data set names : paths}
               Dict[str, dt.Frame],  # {data set names : dt frames}
               Dict[str, np.ndarray],  # {data set names : np arrays}
               Dict[str, pd.DataFrame],  # {data set names : pd frames}
               ]:
        if X is None:
            return []

        columns = None  # columns = ["PAY_AMT", "BILL_AMT", "PAY_"]
        ranges = None  # [(1, 6), (1, 6), (2, 6)]
        black_listed_columns = []
        min_col_group_size = 2

        # parse column names for time series column groups
        if columns is None or columns == [] or \
                ranges is None or ranges == []:
            # match any column names that consist of alpha name (prefix) followed by integer index (suffix)
            p = re.compile(r"^([a-zA-Z_]+)(\d+)$")
            matches = [p.match(c) for c in X.names]
            all_col_groups = defaultdict(list)
            for m in matches:
                if m is not None:
                    key = m.group(1)
                    val = int(m.group(2))
                    all_col_groups[key].append(val)

            # remove black listed columns or column groups that smaller than minimal size
            col_groups = {
                key: val
                for key, val in all_col_groups.items()
                if not key in black_listed_columns
                or len(val) >= min_col_group_size
            }

            # list of column prefixes
            columns = list(col_groups.keys())
            # list of column ranges
            ranges = [(min(idx), max(idx)) for idx in col_groups.values()]

        col_slices = [((col + "%d") % (desde), (col + "%d") % (hasta))
                      for (col, (desde, hasta)) in zip(columns, ranges)]

        for c, r, s in zip(columns, ranges, col_slices):
            update_map = {
                c + "_sum": rowsum(f[s[0]:s[1]]),
                c + "_mean": rowmean(f[s[0]:s[1]]),
                c + "_sd": rowsd(f[s[0]:s[1]]),
                c + "_max": rowmax(f[s[0]:s[1]]),
                c + "_min": rowmin(f[s[0]:s[1]]),
                c + "_range": rowmax(f[s[0]:s[1]]) - rowmin(f[s[0]:s[1]]),
                c + "_first": rowfirst(f[s[0]:s[1]]),
                c + "_last": rowlast(f[s[0]:s[1]]),
                c + "_missing": (r[1] - r[0] + 1) - rowcount(f[s[0]:s[1]])
            }
            X[:, update(**update_map)]

        return X