Exemplo n.º 1
0
def test__NumFactorEvaluator():
    from nose.tools import assert_raises
    naa = NAAction()
    f = _MockFactor()
    nf1 = _NumFactorEvaluator(f, {}, 1)
    assert nf1.factor is f
    eval123, is_NA = nf1.eval({"mock": [1, 2, 3]}, naa)
    assert eval123.shape == (3, 1)
    assert np.all(eval123 == [[1], [2], [3]])
    assert is_NA.shape == (3, )
    assert np.all(~is_NA)
    assert_raises(PatsyError, nf1.eval, {"mock": [[[1]]]}, naa)
    assert_raises(PatsyError, nf1.eval, {"mock": [[1, 2]]}, naa)
    assert_raises(PatsyError, nf1.eval, {"mock": ["a", "b"]}, naa)
    assert_raises(PatsyError, nf1.eval, {"mock": [True, False]}, naa)
    nf2 = _NumFactorEvaluator(_MockFactor(), {}, 2)
    eval123321, is_NA = nf2.eval({"mock": [[1, 3], [2, 2], [3, 1]]}, naa)
    assert eval123321.shape == (3, 2)
    assert np.all(eval123321 == [[1, 3], [2, 2], [3, 1]])
    assert is_NA.shape == (3, )
    assert np.all(~is_NA)
    assert_raises(PatsyError, nf2.eval, {"mock": [1, 2, 3]}, naa)
    assert_raises(PatsyError, nf2.eval, {"mock": [[1, 2, 3]]}, naa)

    ev_nan, is_NA = nf1.eval({"mock": [1, 2, np.nan]},
                             NAAction(NA_types=["NaN"]))
    assert np.array_equal(is_NA, [False, False, True])
    ev_nan, is_NA = nf1.eval({"mock": [1, 2, np.nan]}, NAAction(NA_types=[]))
    assert np.array_equal(is_NA, [False, False, False])

    if have_pandas:
        eval_ser, _ = nf1.eval(
            {"mock": pandas.Series([1, 2, 3], index=[10, 20, 30])}, naa)
        assert isinstance(eval_ser, pandas.DataFrame)
        assert np.array_equal(eval_ser, [[1], [2], [3]])
        assert np.array_equal(eval_ser.index, [10, 20, 30])
        eval_df1, _ = nf1.eval(
            {"mock": pandas.DataFrame([[2], [1], [3]], index=[20, 10, 30])},
            naa)
        assert isinstance(eval_df1, pandas.DataFrame)
        assert np.array_equal(eval_df1, [[2], [1], [3]])
        assert np.array_equal(eval_df1.index, [20, 10, 30])
        eval_df2, _ = nf2.eval(
            {
                "mock":
                pandas.DataFrame([[2, 3], [1, 4], [3, -1]], index=[20, 30, 10])
            }, naa)
        assert isinstance(eval_df2, pandas.DataFrame)
        assert np.array_equal(eval_df2, [[2, 3], [1, 4], [3, -1]])
        assert np.array_equal(eval_df2.index, [20, 30, 10])

        assert_raises(PatsyError, nf2.eval,
                      {"mock": pandas.Series([1, 2, 3], index=[10, 20, 30])},
                      naa)
        assert_raises(
            PatsyError, nf1.eval, {
                "mock":
                pandas.DataFrame([[2, 3], [1, 4], [3, -1]], index=[20, 30, 10])
            }, naa)
Exemplo n.º 2
0
def test_CategoricalSniffer():
    from patsy.missing import NAAction

    def t(NA_types, datas, exp_finish_fast, exp_levels, exp_contrast=None):
        sniffer = CategoricalSniffer(NAAction(NA_types=NA_types))
        for data in datas:
            done = sniffer.sniff(data)
            if done:
                assert exp_finish_fast
                break
            else:
                assert not exp_finish_fast
        assert sniffer.levels_contrast() == (exp_levels, exp_contrast)

    if have_pandas_categorical:
        t([], [pandas.Categorical.from_array([1, 2, None])], True, (1, 2))
        # check order preservation
        t([], [pandas.Categorical([1, 0], ["a", "b"])], True, ("a", "b"))
        t([], [pandas.Categorical([1, 0], ["b", "a"])], True, ("b", "a"))
        # check that if someone sticks a .contrast field onto a Categorical
        # object, we pick it up:
        c = pandas.Categorical.from_array(["a", "b"])
        c.contrast = "CONTRAST"
        t([], [c], True, ("a", "b"), "CONTRAST")

    t([], [C([1, 2]), C([3, 2])], False, (1, 2, 3))
    # check order preservation
    t([], [C([1, 2], levels=[1, 2, 3]), C([4, 2])], True, (1, 2, 3))
    t([], [C([1, 2], levels=[3, 2, 1]), C([4, 2])], True, (3, 2, 1))

    # do some actual sniffing with NAs in
    t(["None", "NaN"], [C([1, np.nan]), C([10, None])], False, (1, 10))
    # But 'None' can be a type if we don't make it represent NA:
    sniffer = CategoricalSniffer(NAAction(NA_types=["NaN"]))
    sniffer.sniff(C([1, np.nan, None]))
    # The level order here is different on py2 and py3 :-( Because there's no
    # consistent way to sort mixed-type values on both py2 and py3. Honestly
    # people probably shouldn't use this, but I don't know how to give a
    # sensible error.
    levels, _ = sniffer.levels_contrast()
    assert set(levels) == set([None, 1])

    # bool special case
    t(["None", "NaN"], [C([True, np.nan, None])], True, (False, True))
    t([], [C([10, 20]), C([False]), C([30, 40])], False,
      (False, True, 10, 20, 30, 40))

    # check tuples too
    t(["None", "NaN"], [C([("b", 2), None, ("a", 1), np.nan, ("c", None)])],
      False, (("a", 1), ("b", 2), ("c", None)))

    # contrasts
    t([], [C([10, 20], contrast="FOO")], False, (10, 20), "FOO")

    # unhashable level error:
    from nose.tools import assert_raises
    sniffer = CategoricalSniffer(NAAction())
    assert_raises(PatsyError, sniffer.sniff, [{}])
Exemplo n.º 3
0
def test__eval_factor_categorical():
    from pytest import raises
    from patsy.categorical import C
    naa = NAAction()
    f = _MockFactor()
    fi1 = FactorInfo(f,
                     "categorical", {},
                     num_columns=None,
                     categories=("a", "b"))
    assert fi1.factor is f
    cat1, _ = _eval_factor(fi1, {"mock": ["b", "a", "b"]}, naa)
    assert cat1.shape == (3, )
    assert np.all(cat1 == [1, 0, 1])
    raises(PatsyError, _eval_factor, fi1, {"mock": ["c"]}, naa)
    raises(PatsyError, _eval_factor, fi1, {"mock": C(["a", "c"])}, naa)
    raises(PatsyError, _eval_factor, fi1,
           {"mock": C(["a", "b"], levels=["b", "a"])}, naa)
    raises(PatsyError, _eval_factor, fi1, {"mock": [1, 0, 1]}, naa)
    bad_cat = np.asarray(["b", "a", "a", "b"])
    bad_cat.resize((2, 2))
    raises(PatsyError, _eval_factor, fi1, {"mock": bad_cat}, naa)

    cat1_NA, is_NA = _eval_factor(fi1, {"mock": ["a", None, "b"]},
                                  NAAction(NA_types=["None"]))
    assert np.array_equal(is_NA, [False, True, False])
    assert np.array_equal(cat1_NA, [0, -1, 1])
    raises(PatsyError, _eval_factor, fi1, {"mock": ["a", None, "b"]},
           NAAction(NA_types=[]))

    fi2 = FactorInfo(_MockFactor(),
                     "categorical", {},
                     num_columns=None,
                     categories=[False, True])
    cat2, _ = _eval_factor(fi2, {"mock": [True, False, False, True]}, naa)
    assert cat2.shape == (4, )
    assert np.all(cat2 == [1, 0, 0, 1])

    if have_pandas:
        s = pandas.Series(["b", "a"], index=[10, 20])
        cat_s, _ = _eval_factor(fi1, {"mock": s}, naa)
        assert isinstance(cat_s, pandas.Series)
        assert np.array_equal(cat_s, [1, 0])
        assert np.array_equal(cat_s.index, [10, 20])
        sbool = pandas.Series([True, False], index=[11, 21])
        cat_sbool, _ = _eval_factor(fi2, {"mock": sbool}, naa)
        assert isinstance(cat_sbool, pandas.Series)
        assert np.array_equal(cat_sbool, [1, 0])
        assert np.array_equal(cat_sbool.index, [11, 21])
Exemplo n.º 4
0
 def __init__(self, formula: str, data: DataFrame, eval_env: int = 2):
     self._formula = formula
     self._data = data
     self._na_action = NAAction(on_NA="raise", NA_types=[])
     self._eval_env = eval_env
     self._components: Dict[str, str] = {}
     self._parse()
Exemplo n.º 5
0
 def __init__(self, formula, data, eval_env=2):
     self._formula = formula
     self._data = data
     self._na_action = NAAction(on_NA='raise', NA_types=[])
     self._eval_env = eval_env
     self._components = {}
     self._parse()
Exemplo n.º 6
0
    def _prepare_data_from_formula(
            formula: str, data: DataFrame,
            portfolios: DataFrame) -> Tuple[DataFrame, DataFrame, str]:
        na_action = NAAction(on_NA="raise", NA_types=[])
        orig_formula = formula
        if portfolios is not None:
            factors = dmatrix(formula + " + 0",
                              data,
                              return_type="dataframe",
                              NA_action=na_action)
        else:
            formula_components = formula.split("~")
            portfolios = dmatrix(
                formula_components[0].strip() + " + 0",
                data,
                return_type="dataframe",
                NA_action=na_action,
            )
            factors = dmatrix(
                formula_components[1].strip() + " + 0",
                data,
                return_type="dataframe",
                NA_action=na_action,
            )

        return factors, portfolios, orig_formula
Exemplo n.º 7
0
    def from_formula(cls, formula, data, *, portfolios=None):
        """
        Parameters
        ----------
        formula : str
            Patsy formula modified for the syntax described in the notes
        data : DataFrame
            DataFrame containing the variables used in the formula
        portfolios : array-like, optional
            Portfolios to be used in the model

        Returns
        -------
        model : TradedFactorModel
            Model instance

        Notes
        -----
        The formula can be used in one of two ways.  The first specified only the
        factors and uses the data provided in ``portfolios`` as the test portfolios.
        The second specified the portfolio using ``+`` to separate the test portfolios
        and ``~`` to separate the test portfolios from the factors.

        Examples
        --------
        >>> from linearmodels.datasets import french
        >>> from linearmodels.asset_pricing import TradedFactorModel
        >>> data = french.load()
        >>> formula = 'S1M1 + S1M5 + S3M3 + S5M1 S5M5 ~ MktRF + SMB + HML'
        >>> mod = TradedFactorModel.from_formula(formula, data)

        Using only factors

        >>> portfolios = data[['S1M1', 'S1M5', 'S3M1', 'S3M5', 'S5M1', 'S5M5']]
        >>> formula = 'MktRF + SMB + HML'
        >>> mod = TradedFactorModel.from_formula(formula, data, portfolios=portfolios)
        """
        na_action = NAAction(on_NA='raise', NA_types=[])
        orig_formula = formula
        if portfolios is not None:
            factors = dmatrix(formula + ' + 0',
                              data,
                              return_type='dataframe',
                              NA_action=na_action)
        else:
            formula = formula.split('~')
            portfolios = dmatrix(formula[0].strip() + ' + 0',
                                 data,
                                 return_type='dataframe',
                                 NA_action=na_action)
            factors = dmatrix(formula[1].strip() + ' + 0',
                              data,
                              return_type='dataframe',
                              NA_action=na_action)
        mod = cls(portfolios, factors)
        mod.formula = orig_formula
        return mod
Exemplo n.º 8
0
 def t(NA_types, datas, exp_finish_fast, exp_levels, exp_contrast=None):
     sniffer = CategoricalSniffer(NAAction(NA_types=NA_types))
     for data in datas:
         done = sniffer.sniff(data)
         if done:
             assert exp_finish_fast
             break
         else:
             assert not exp_finish_fast
     assert sniffer.levels_contrast() == (exp_levels, exp_contrast)
Exemplo n.º 9
0
def test__CatFactorEvaluator():
    from nose.tools import assert_raises
    from patsy.categorical import C
    naa = NAAction()
    f = _MockFactor()
    cf1 = _CatFactorEvaluator(f, {}, ["a", "b"])
    assert cf1.factor is f
    cat1, _ = cf1.eval({"mock": ["b", "a", "b"]}, naa)
    assert cat1.shape == (3, )
    assert np.all(cat1 == [1, 0, 1])
    assert_raises(PatsyError, cf1.eval, {"mock": ["c"]}, naa)
    assert_raises(PatsyError, cf1.eval, {"mock": C(["a", "c"])}, naa)
    assert_raises(PatsyError, cf1.eval,
                  {"mock": C(["a", "b"], levels=["b", "a"])}, naa)
    assert_raises(PatsyError, cf1.eval, {"mock": [1, 0, 1]}, naa)
    bad_cat = np.asarray(["b", "a", "a", "b"])
    bad_cat.resize((2, 2))
    assert_raises(PatsyError, cf1.eval, {"mock": bad_cat}, naa)

    cat1_NA, is_NA = cf1.eval({"mock": ["a", None, "b"]},
                              NAAction(NA_types=["None"]))
    assert np.array_equal(is_NA, [False, True, False])
    assert np.array_equal(cat1_NA, [0, -1, 1])
    assert_raises(PatsyError, cf1.eval, {"mock": ["a", None, "b"]},
                  NAAction(NA_types=[]))

    cf2 = _CatFactorEvaluator(_MockFactor(), {}, [False, True])
    cat2, _ = cf2.eval({"mock": [True, False, False, True]}, naa)
    assert cat2.shape == (4, )
    assert np.all(cat2 == [1, 0, 0, 1])

    if have_pandas:
        s = pandas.Series(["b", "a"], index=[10, 20])
        cat_s, _ = cf1.eval({"mock": s}, naa)
        assert isinstance(cat_s, pandas.Series)
        assert np.array_equal(cat_s, [1, 0])
        assert np.array_equal(cat_s.index, [10, 20])
        sbool = pandas.Series([True, False], index=[11, 21])
        cat_sbool, _ = cf2.eval({"mock": sbool}, naa)
        assert isinstance(cat_sbool, pandas.Series)
        assert np.array_equal(cat_sbool, [1, 0])
        assert np.array_equal(cat_sbool.index, [11, 21])
Exemplo n.º 10
0
    def _prepare_data_from_formula(formula, data, portfolios):
        na_action = NAAction(on_NA='raise', NA_types=[])
        orig_formula = formula
        if portfolios is not None:
            factors = dmatrix(formula + ' + 0', data, return_type='dataframe', NA_action=na_action)
        else:
            formula = formula.split('~')
            portfolios = dmatrix(formula[0].strip() + ' + 0', data,
                                 return_type='dataframe', NA_action=na_action)
            factors = dmatrix(formula[1].strip() + ' + 0', data,
                              return_type='dataframe', NA_action=na_action)

        return factors, portfolios, orig_formula
Exemplo n.º 11
0
def test_NA_action():
    initial_data = {"x": [1, 2, 3], "c": ["c1", "c2", "c1"]}

    def iter_maker():
        yield initial_data

    builder = design_matrix_builders([make_termlist("x", "c")], iter_maker,
                                     0)[0]

    # By default drops rows containing either NaN or None
    mat = build_design_matrices(
        [builder], {
            "x": [10.0, np.nan, 20.0],
            "c": np.asarray(["c1", "c2", None], dtype=object)
        })[0]
    assert mat.shape == (1, 3)
    assert np.array_equal(mat, [[1.0, 0.0, 10.0]])

    # NA_action="a string" also accepted:
    mat = build_design_matrices(
        [builder], {
            "x": [10.0, np.nan, 20.0],
            "c": np.asarray(["c1", "c2", None], dtype=object)
        },
        NA_action="drop")[0]
    assert mat.shape == (1, 3)
    assert np.array_equal(mat, [[1.0, 0.0, 10.0]])

    # And objects
    from patsy.missing import NAAction
    # allows NaN's to pass through
    NA_action = NAAction(NA_types=[])
    mat = build_design_matrices([builder], {
        "x": [10.0, np.nan],
        "c": np.asarray(["c1", "c2"], dtype=object)
    },
                                NA_action=NA_action)[0]
    assert mat.shape == (2, 3)
    # According to this (and only this) function, NaN == NaN.
    np.testing.assert_array_equal(mat, [[1.0, 0.0, 10.0], [0.0, 1.0, np.nan]])

    # NA_action="raise"
    pytest.raises(PatsyError,
                  build_design_matrices, [builder], {
                      "x": [10.0, np.nan, 20.0],
                      "c": np.asarray(["c1", "c2", None], dtype=object)
                  },
                  NA_action="raise")
Exemplo n.º 12
0
    def from_formula(cls, formula, data, *, portfolios=None, risk_free=False,
                     sigma=None):
        """
        Parameters
        ----------
        formula : str
            Patsy formula modified for the syntax described in the notes
        data : DataFrame
            DataFrame containing the variables used in the formula
        portfolios : array-like, optional
            Portfolios to be used in the model. If provided, must use formula
            syntax containing only factors.
        risk_free : bool, optional
            Flag indicating whether the risk-free rate should be estimated
            from returns along other risk premia.  If False, the returns are
            assumed to be excess returns using the correct risk-free rate.
        sigma : array-like, optional
            Positive definite residual covariance (nportfolio by nportfolio)

        Returns
        -------
        model : LinearFactorModel
            Model instance

        Notes
        -----
        The formula can be used in one of two ways.  The first specified only the
        factors and uses the data provided in ``portfolios`` as the test portfolios.
        The second specified the portfolio using ``+`` to separate the test portfolios
        and ``~`` to separate the test portfolios from the factors.

        Examples
        --------
        >>> from linearmodels.datasets import french
        >>> from linearmodels.asset_pricing import LinearFactorModel
        >>> data = french.load()
        >>> formula = 'S1M1 + S1M5 + S3M3 + S5M1 + S5M5 ~ MktRF + SMB + HML'
        >>> mod = LinearFactorModel.from_formula(formula, data)

        Using only factors

        >>> portfolios = data[['S1M1', 'S1M5', 'S3M1', 'S3M5', 'S5M1', 'S5M5']]
        >>> formula = 'MktRF + SMB + HML'
        >>> mod = LinearFactorModel.from_formula(formula, data, portfolios=portfolios)
        """
        na_action = NAAction(on_NA='raise', NA_types=[])
        orig_formula = formula
        if portfolios is not None:
            factors = dmatrix(formula + ' + 0', data, return_type='dataframe', NA_action=na_action)
        else:
            formula = formula.split('~')
            portfolios = dmatrix(formula[0].strip() + ' + 0', data,
                                 return_type='dataframe', NA_action=na_action)
            factors = dmatrix(formula[1].strip() + ' + 0', data,
                              return_type='dataframe', NA_action=na_action)
        if sigma is not None:
            mod = cls(portfolios, factors, risk_free=risk_free, sigma=sigma)
        else:
            mod = cls(portfolios, factors, risk_free=risk_free)
        mod.formula = orig_formula
        return mod
Exemplo n.º 13
0
def test_CategoricalSniffer():
    from patsy.missing import NAAction

    def t(NA_types, datas, exp_finish_fast, exp_levels, exp_contrast=None):
        sniffer = CategoricalSniffer(NAAction(NA_types=NA_types))
        for data in datas:
            done = sniffer.sniff(data)
            if done:
                assert exp_finish_fast
                break
            else:
                assert not exp_finish_fast
        assert sniffer.levels_contrast() == (exp_levels, exp_contrast)

    if have_pandas_categorical:
        # We make sure to test with both boxed and unboxed pandas objects,
        # because we used to have a bug where boxed pandas objects would be
        # treated as categorical, but their levels would be lost...
        preps = [lambda x: x, C]
        if have_pandas_categorical_dtype:
            preps += [pandas.Series, lambda x: C(pandas.Series(x))]
        for prep in preps:
            t([], [prep(pandas.Categorical([1, 2, None]))], True, (1, 2))
            # check order preservation
            t([], [prep(pandas_Categorical_from_codes([1, 0], ["a", "b"]))],
              True, ("a", "b"))
            t([], [prep(pandas_Categorical_from_codes([1, 0], ["b", "a"]))],
              True, ("b", "a"))
            # check that if someone sticks a .contrast field onto our object
            obj = prep(pandas.Categorical(["a", "b"]))
            obj.contrast = "CONTRAST"
            t([], [obj], True, ("a", "b"), "CONTRAST")

    t([], [C([1, 2]), C([3, 2])], False, (1, 2, 3))
    # check order preservation
    t([], [C([1, 2], levels=[1, 2, 3]), C([4, 2])], True, (1, 2, 3))
    t([], [C([1, 2], levels=[3, 2, 1]), C([4, 2])], True, (3, 2, 1))

    # do some actual sniffing with NAs in
    t(["None", "NaN"], [C([1, np.nan]), C([10, None])], False, (1, 10))
    # But 'None' can be a type if we don't make it represent NA:
    sniffer = CategoricalSniffer(NAAction(NA_types=["NaN"]))
    sniffer.sniff(C([1, np.nan, None]))
    # The level order here is different on py2 and py3 :-( Because there's no
    # consistent way to sort mixed-type values on both py2 and py3. Honestly
    # people probably shouldn't use this, but I don't know how to give a
    # sensible error.
    levels, _ = sniffer.levels_contrast()
    assert set(levels) == set([None, 1])

    # bool special cases
    t(["None", "NaN"], [C([True, np.nan, None])], True, (False, True))
    t([], [C([10, 20]), C([False]), C([30, 40])], False,
      (False, True, 10, 20, 30, 40))
    # exercise the fast-path
    t([], [np.asarray([True, False]), ["foo"]], True, (False, True))

    # check tuples too
    t(["None", "NaN"], [C([("b", 2), None, ("a", 1), np.nan, ("c", None)])],
      False, (("a", 1), ("b", 2), ("c", None)))

    # contrasts
    t([], [C([10, 20], contrast="FOO")], False, (10, 20), "FOO")

    # no box
    t([], [[10, 30], [20]], False, (10, 20, 30))
    t([], [["b", "a"], ["a"]], False, ("a", "b"))

    # 0d
    t([], ["b"], False, ("b", ))

    from nose.tools import assert_raises

    # unhashable level error:
    sniffer = CategoricalSniffer(NAAction())
    assert_raises(PatsyError, sniffer.sniff, [{}])

    # >1d is illegal
    assert_raises(PatsyError, sniffer.sniff, np.asarray([["b"]]))
Exemplo n.º 14
0
def build_design_matrices(design_infos, data,
                          NA_action="drop",
                          return_type="matrix",
                          dtype=np.dtype(float)):
    """Construct several design matrices from :class:`DesignMatrixBuilder`
    objects.

    This is one of Patsy's fundamental functions. This function and
    :func:`design_matrix_builders` together form the API to the core formula
    interpretation machinery.

    :arg design_infos: A list of :class:`DesignInfo` objects describing the
      design matrices to be built.
    :arg data: A dict-like object which will be used to look up data.
    :arg NA_action: What to do with rows that contain missing values. You can
      ``"drop"`` them, ``"raise"`` an error, or for customization, pass an
      :class:`NAAction` object. See :class:`NAAction` for details on what
      values count as 'missing' (and how to alter this).
    :arg return_type: Either ``"matrix"`` or ``"dataframe"``. See below.
    :arg dtype: The dtype of the returned matrix. Useful if you want to use
      single-precision or extended-precision.

    This function returns either a list of :class:`DesignMatrix` objects (for
    ``return_type="matrix"``) or a list of :class:`pandas.DataFrame` objects
    (for ``return_type="dataframe"``). In both cases, all returned design
    matrices will have ``.design_info`` attributes containing the appropriate
    :class:`DesignInfo` objects.

    Note that unlike :func:`design_matrix_builders`, this function takes only
    a simple data argument, not any kind of iterator. That's because this
    function doesn't need a global view of the data -- everything that depends
    on the whole data set is already encapsulated in the ``design_infos``. If
    you are incrementally processing a large data set, simply call this
    function for each chunk.

    Index handling: This function always checks for indexes in the following
    places:

    * If ``data`` is a :class:`pandas.DataFrame`, its ``.index`` attribute.
    * If any factors evaluate to a :class:`pandas.Series` or
      :class:`pandas.DataFrame`, then their ``.index`` attributes.

    If multiple indexes are found, they must be identical (same values in the
    same order). If no indexes are found, then a default index is generated
    using ``np.arange(num_rows)``. One way or another, we end up with a single
    index for all the data. If ``return_type="dataframe"``, then this index is
    used as the index of the returned DataFrame objects. Examining this index
    makes it possible to determine which rows were removed due to NAs.

    Determining the number of rows in design matrices: This is not as obvious
    as it might seem, because it's possible to have a formula like "~ 1" that
    doesn't depend on the data (it has no factors). For this formula, it's
    obvious what every row in the design matrix should look like (just the
    value ``1``); but, how many rows like this should there be? To determine
    the number of rows in a design matrix, this function always checks in the
    following places:

    * If ``data`` is a :class:`pandas.DataFrame`, then its number of rows.
    * The number of entries in any factors present in any of the design
    * matrices being built.

    All these values much match. In particular, if this function is called to
    generate multiple design matrices at once, then they must all have the
    same number of rows.

    .. versionadded:: 0.2.0
       The ``NA_action`` argument.

    """
    if isinstance(NA_action, str):
        NA_action = NAAction(NA_action)
    if return_type == "dataframe" and not have_pandas:
        raise PatsyError("pandas.DataFrame was requested, but pandas "
                            "is not installed")
    if return_type not in ("matrix", "dataframe"):
        raise PatsyError("unrecognized output type %r, should be "
                            "'matrix' or 'dataframe'" % (return_type,))
    # Evaluate factors
    factor_info_to_values = {}
    factor_info_to_isNAs = {}
    rows_checker = _CheckMatch("Number of rows", lambda a, b: a == b)
    index_checker = _CheckMatch("Index", lambda a, b: a.equals(b))
    if have_pandas and isinstance(data, pandas.DataFrame):
        index_checker.check(data.index, "data.index", None)
        rows_checker.check(data.shape[0], "data argument", None)
    for design_info in design_infos:
        # We look at evaluators rather than factors here, because it might
        # happen that we have the same factor twice, but with different
        # memorized state.
        for factor_info in six.itervalues(design_info.factor_infos):
            if factor_info not in factor_info_to_values:
                value, is_NA = _eval_factor(factor_info, data, NA_action)
                factor_info_to_isNAs[factor_info] = is_NA
                # value may now be a Series, DataFrame, or ndarray
                name = factor_info.factor.name()
                origin = factor_info.factor.origin
                rows_checker.check(value.shape[0], name, origin)
                if (have_pandas
                    and isinstance(value, (pandas.Series, pandas.DataFrame))):
                    index_checker.check(value.index, name, origin)
                # Strategy: we work with raw ndarrays for doing the actual
                # combining; DesignMatrixBuilder objects never sees pandas
                # objects. Then at the end, if a DataFrame was requested, we
                # convert. So every entry in this dict is either a 2-d array
                # of floats, or a 1-d array of integers (representing
                # categories).
                value = np.asarray(value)
                factor_info_to_values[factor_info] = value
    # Handle NAs
    values = list(factor_info_to_values.values())
    is_NAs = list(factor_info_to_isNAs.values())
    origins = [factor_info.factor.origin
               for factor_info in factor_info_to_values]
    pandas_index = index_checker.value
    num_rows = rows_checker.value
    # num_rows is None iff evaluator_to_values (and associated sets like
    # 'values') are empty, i.e., we have no actual evaluators involved
    # (formulas like "~ 1").
    if return_type == "dataframe" and num_rows is not None:
        if pandas_index is None:
            pandas_index = np.arange(num_rows)
        values.append(pandas_index)
        is_NAs.append(np.zeros(len(pandas_index), dtype=bool))
        origins.append(None)
    new_values = NA_action.handle_NA(values, is_NAs, origins)
    # NA_action may have changed the number of rows.
    if new_values:
        num_rows = new_values[0].shape[0]
    if return_type == "dataframe" and num_rows is not None:
        pandas_index = new_values.pop()
    factor_info_to_values = dict(zip(factor_info_to_values, new_values))
    # Build factor values into matrices
    results = []
    for design_info in design_infos:
        results.append(_build_design_matrix(design_info,
                                            factor_info_to_values,
                                            dtype))
    matrices = []
    for need_reshape, matrix in results:
        if need_reshape:
            # There is no data-dependence, at all -- a formula like "1 ~ 1".
            # In this case the builder just returns a single-row matrix, and
            # we have to broadcast it vertically to the appropriate size. If
            # we can figure out what that is...
            assert matrix.shape[0] == 1
            if num_rows is not None:
                matrix = DesignMatrix(np.repeat(matrix, num_rows, axis=0),
                                      matrix.design_info)
            else:
                raise PatsyError(
                    "No design matrix has any non-trivial factors, "
                    "the data object is not a DataFrame. "
                    "I can't tell how many rows the design matrix should "
                    "have!"
                    )
        matrices.append(matrix)
    if return_type == "dataframe":
        assert have_pandas
        for i, matrix in enumerate(matrices):
            di = matrix.design_info
            matrices[i] = pandas.DataFrame(matrix,
                                           columns=di.column_names,
                                           index=pandas_index)
            matrices[i].design_info = di
    return matrices
Exemplo n.º 15
0
def test__eval_factor_numerical():
    import pytest
    naa = NAAction()
    f = _MockFactor()

    fi1 = FactorInfo(f, "numerical", {}, num_columns=1, categories=None)

    assert fi1.factor is f
    eval123, is_NA = _eval_factor(fi1, {"mock": [1, 2, 3]}, naa)
    assert eval123.shape == (3, 1)
    assert np.all(eval123 == [[1], [2], [3]])
    assert is_NA.shape == (3,)
    assert np.all(~is_NA)
    pytest.raises(PatsyError, _eval_factor, fi1, {"mock": [[[1]]]}, naa)
    pytest.raises(PatsyError, _eval_factor, fi1, {"mock": [[1, 2]]}, naa)
    pytest.raises(PatsyError, _eval_factor, fi1, {"mock": ["a", "b"]}, naa)
    pytest.raises(PatsyError, _eval_factor, fi1, {"mock": [True, False]}, naa)
    fi2 = FactorInfo(_MockFactor(), "numerical",
                     {}, num_columns=2, categories=None)
    eval123321, is_NA = _eval_factor(fi2,
                                     {"mock": [[1, 3], [2, 2], [3, 1]]},
                                     naa)
    assert eval123321.shape == (3, 2)
    assert np.all(eval123321 == [[1, 3], [2, 2], [3, 1]])
    assert is_NA.shape == (3,)
    assert np.all(~is_NA)
    pytest.raises(PatsyError, _eval_factor, fi2, {"mock": [1, 2, 3]}, naa)
    pytest.raises(PatsyError, _eval_factor, fi2, {"mock": [[1, 2, 3]]}, naa)

    ev_nan, is_NA = _eval_factor(fi1, {"mock": [1, 2, np.nan]},
                                 NAAction(NA_types=["NaN"]))
    assert np.array_equal(is_NA, [False, False, True])
    ev_nan, is_NA = _eval_factor(fi1, {"mock": [1, 2, np.nan]},
                                 NAAction(NA_types=[]))
    assert np.array_equal(is_NA, [False, False, False])

    if have_pandas:
        eval_ser, _ = _eval_factor(fi1,
                                   {"mock":
                                    pandas.Series([1, 2, 3],
                                                  index=[10, 20, 30])},
                                   naa)
        assert isinstance(eval_ser, pandas.DataFrame)
        assert np.array_equal(eval_ser, [[1], [2], [3]])
        assert np.array_equal(eval_ser.index, [10, 20, 30])
        eval_df1, _ = _eval_factor(fi1,
                                   {"mock":
                                    pandas.DataFrame([[2], [1], [3]],
                                                     index=[20, 10, 30])},
                                   naa)
        assert isinstance(eval_df1, pandas.DataFrame)
        assert np.array_equal(eval_df1, [[2], [1], [3]])
        assert np.array_equal(eval_df1.index, [20, 10, 30])
        eval_df2, _ = _eval_factor(fi2,
                                   {"mock":
                                    pandas.DataFrame([[2, 3], [1, 4], [3, -1]],
                                                     index=[20, 30, 10])},
                                   naa)
        assert isinstance(eval_df2, pandas.DataFrame)
        assert np.array_equal(eval_df2, [[2, 3], [1, 4], [3, -1]])
        assert np.array_equal(eval_df2.index, [20, 30, 10])

        pytest.raises(PatsyError,
                      _eval_factor, fi2,
                      {"mock": pandas.Series([1, 2, 3], index=[10, 20, 30])},
                      naa)
        pytest.raises(PatsyError,
                      _eval_factor, fi1,
                      {"mock":
                       pandas.DataFrame([[2, 3], [1, 4], [3, -1]],
                                        index=[20, 30, 10])},
                      naa)
Exemplo n.º 16
0
def test__examine_factor_types():
    from patsy.categorical import C
    class MockFactor(object):
        def __init__(self):
            # You should check this using 'is', not '=='
            from patsy.origin import Origin
            self.origin = Origin("MOCK", 1, 2)

        def eval(self, state, data):
            return state[data]

        def name(self):
            return "MOCK MOCK"

    # This hacky class can only be iterated over once, but it keeps track of
    # how far it got.
    class DataIterMaker(object):
        def __init__(self):
            self.i = -1

        def __call__(self):
            return self

        def __iter__(self):
            return self

        def next(self):
            self.i += 1
            if self.i > 1:
                raise StopIteration
            return self.i
        __next__ = next

    num_1dim = MockFactor()
    num_1col = MockFactor()
    num_4col = MockFactor()
    categ_1col = MockFactor()
    bool_1col = MockFactor()
    string_1col = MockFactor()
    object_1col = MockFactor()
    object_levels = (object(), object(), object())
    factor_states = {
        num_1dim: ([1, 2, 3], [4, 5, 6]),
        num_1col: ([[1], [2], [3]], [[4], [5], [6]]),
        num_4col: (np.zeros((3, 4)), np.ones((3, 4))),
        categ_1col: (C(["a", "b", "c"], levels=("a", "b", "c"),
                       contrast="MOCK CONTRAST"),
                     C(["c", "b", "a"], levels=("a", "b", "c"),
                       contrast="MOCK CONTRAST")),
        bool_1col: ([True, True, False], [False, True, True]),
        # It has to read through all the data to see all the possible levels:
        string_1col: (["a", "a", "a"], ["c", "b", "a"]),
        object_1col: ([object_levels[0]] * 3, object_levels),
        }

    it = DataIterMaker()
    (num_column_counts, cat_levels_contrasts,
     ) = _examine_factor_types(factor_states.keys(), factor_states, it,
                               NAAction())
    assert it.i == 2
    iterations = 0
    assert num_column_counts == {num_1dim: 1, num_1col: 1, num_4col: 4}
    assert cat_levels_contrasts == {
        categ_1col: (("a", "b", "c"), "MOCK CONTRAST"),
        bool_1col: ((False, True), None),
        string_1col: (("a", "b", "c"), None),
        object_1col: (tuple(sorted(object_levels, key=id)), None),
        }

    # Check that it doesn't read through all the data if that's not necessary:
    it = DataIterMaker()
    no_read_necessary = [num_1dim, num_1col, num_4col, categ_1col, bool_1col]
    (num_column_counts, cat_levels_contrasts,
     ) = _examine_factor_types(no_read_necessary, factor_states, it,
                               NAAction())
    assert it.i == 0
    assert num_column_counts == {num_1dim: 1, num_1col: 1, num_4col: 4}
    assert cat_levels_contrasts == {
        categ_1col: (("a", "b", "c"), "MOCK CONTRAST"),
        bool_1col: ((False, True), None),
        }

    # Illegal inputs:
    bool_3col = MockFactor()
    num_3dim = MockFactor()
    # no such thing as a multi-dimensional Categorical
    # categ_3dim = MockFactor()
    string_3col = MockFactor()
    object_3col = MockFactor()
    illegal_factor_states = {
        num_3dim: (np.zeros((3, 3, 3)), np.ones((3, 3, 3))),
        string_3col: ([["a", "b", "c"]], [["b", "c", "a"]]),
        object_3col: ([[[object()]]], [[[object()]]]),
        }
    import pytest
    for illegal_factor in illegal_factor_states:
        it = DataIterMaker()
        try:
            _examine_factor_types([illegal_factor], illegal_factor_states, it,
                                  NAAction())
        except PatsyError as e:
            assert e.origin is illegal_factor.origin
        else:
            assert False
Exemplo n.º 17
0
def design_matrix_builders(termlists, data_iter_maker, eval_env,
                           NA_action="drop"):
    """Construct several :class:`DesignInfo` objects from termlists.

    This is one of Patsy's fundamental functions. This function and
    :func:`build_design_matrices` together form the API to the core formula
    interpretation machinery.

    :arg termlists: A list of termlists, where each termlist is a list of
      :class:`Term` objects which together specify a design matrix.
    :arg data_iter_maker: A zero-argument callable which returns an iterator
      over dict-like data objects. This must be a callable rather than a
      simple iterator because sufficiently complex formulas may require
      multiple passes over the data (e.g. if there are nested stateful
      transforms).
    :arg eval_env: Either a :class:`EvalEnvironment` which will be used to
      look up any variables referenced in `termlists` that cannot be
      found in `data_iter_maker`, or else a depth represented as an
      integer which will be passed to :meth:`EvalEnvironment.capture`.
      ``eval_env=0`` means to use the context of the function calling
      :func:`design_matrix_builders` for lookups. If calling this function
      from a library, you probably want ``eval_env=1``, which means that
      variables should be resolved in *your* caller's namespace.
    :arg NA_action: An :class:`NAAction` object or string, used to determine
      what values count as 'missing' for purposes of determining the levels of
      categorical factors.
    :returns: A list of :class:`DesignInfo` objects, one for each
      termlist passed in.

    This function performs zero or more iterations over the data in order to
    sniff out any necessary information about factor types, set up stateful
    transforms, pick column names, etc.

    See :ref:`formulas` for details.

    .. versionadded:: 0.2.0
       The ``NA_action`` argument.
    .. versionadded:: 0.4.0
       The ``eval_env`` argument.
    """
    # People upgrading from versions prior to 0.4.0 could potentially have
    # passed NA_action as the 3rd positional argument. Fortunately
    # EvalEnvironment.capture only accepts int and EvalEnvironment objects,
    # and we improved its error messages to make this clear.
    eval_env = EvalEnvironment.capture(eval_env, reference=1)
    if isinstance(NA_action, str):
        NA_action = NAAction(NA_action)
    all_factors = set()
    for termlist in termlists:
        for term in termlist:
            all_factors.update(term.factors)
    factor_states = _factors_memorize(all_factors, data_iter_maker, eval_env)
    # Now all the factors have working eval methods, so we can evaluate them
    # on some data to find out what type of data they return.
    (num_column_counts,
     cat_levels_contrasts) = _examine_factor_types(all_factors,
                                                   factor_states,
                                                   data_iter_maker,
                                                   NA_action)
    # Now we need the factor infos, which encapsulate the knowledge of
    # how to turn any given factor into a chunk of data:
    factor_infos = {}
    for factor in all_factors:
        if factor in num_column_counts:
            fi = FactorInfo(factor,
                            "numerical",
                            factor_states[factor],
                            num_columns=num_column_counts[factor],
                            categories=None)
        else:
            assert factor in cat_levels_contrasts
            categories = cat_levels_contrasts[factor][0]
            fi = FactorInfo(factor,
                            "categorical",
                            factor_states[factor],
                            num_columns=None,
                            categories=categories)
        factor_infos[factor] = fi
    # And now we can construct the DesignInfo for each termlist:
    design_infos = []
    for termlist in termlists:
        term_to_subterm_infos = _make_subterm_infos(termlist,
                                                    num_column_counts,
                                                    cat_levels_contrasts)
        assert isinstance(term_to_subterm_infos, OrderedDict)
        assert frozenset(term_to_subterm_infos) == frozenset(termlist)
        this_design_factor_infos = {}
        for term in termlist:
            for factor in term.factors:
                this_design_factor_infos[factor] = factor_infos[factor]
        column_names = []
        for subterms in six.itervalues(term_to_subterm_infos):
            for subterm in subterms:
                for column_name in _subterm_column_names_iter(
                        factor_infos, subterm):
                    column_names.append(column_name)
        design_infos.append(DesignInfo(column_names,
                                       factor_infos=this_design_factor_infos,
                                       term_codings=term_to_subterm_infos))
    return design_infos
Exemplo n.º 18
0
def test_categorical_to_int():
    from nose.tools import assert_raises
    from patsy.missing import NAAction
    if have_pandas:
        s = pandas.Series(["a", "b", "c"], index=[10, 20, 30])
        c_pandas = categorical_to_int(s, ("a", "b", "c"), NAAction())
        assert np.all(c_pandas == [0, 1, 2])
        assert np.all(c_pandas.index == [10, 20, 30])
        # Input must be 1-dimensional
        assert_raises(PatsyError, categorical_to_int,
                      pandas.DataFrame({10: s}), ("a", "b", "c"), NAAction())
    if have_pandas_categorical:
        cat = pandas.Categorical([1, 0, -1], ("a", "b"))
        conv = categorical_to_int(cat, ("a", "b"), NAAction())
        assert np.all(conv == [1, 0, -1])
        # Trust pandas NA marking
        cat2 = pandas.Categorical([1, 0, -1], ("a", "None"))
        conv2 = categorical_to_int(cat, ("a", "b"),
                                   NAAction(NA_types=["None"]))
        assert np.all(conv2 == [1, 0, -1])
        # But levels must match
        assert_raises(PatsyError, categorical_to_int,
                      pandas.Categorical([1, 0], ("a", "b")), ("a", "c"),
                      NAAction())
        assert_raises(PatsyError, categorical_to_int,
                      pandas.Categorical([1, 0], ("a", "b")), ("b", "a"),
                      NAAction())

    def t(data, levels, expected, NA_action=NAAction()):
        got = categorical_to_int(data, levels, NA_action)
        assert np.array_equal(got, expected)

    t(["a", "b", "a"], ("a", "b"), [0, 1, 0])
    t(np.asarray(["a", "b", "a"]), ("a", "b"), [0, 1, 0])
    t(np.asarray(["a", "b", "a"], dtype=object), ("a", "b"), [0, 1, 0])
    t([0, 1, 2], (1, 2, 0), [2, 0, 1])
    t(np.asarray([0, 1, 2]), (1, 2, 0), [2, 0, 1])
    t(np.asarray([0, 1, 2], dtype=float), (1, 2, 0), [2, 0, 1])
    t(np.asarray([0, 1, 2], dtype=object), (1, 2, 0), [2, 0, 1])
    t(["a", "b", "a"], ("a", "d", "z", "b"), [0, 3, 0])
    t([("a", 1), ("b", 0), ("a", 1)], (("a", 1), ("b", 0)), [0, 1, 0])

    assert_raises(PatsyError, categorical_to_int, ["a", "b", "a"], ("a", "c"),
                  NAAction())

    t(C(["a", "b", "a"]), ("a", "b"), [0, 1, 0])
    t(C(["a", "b", "a"]), ("b", "a"), [1, 0, 1])
    t(C(["a", "b", "a"], levels=["b", "a"]), ("b", "a"), [1, 0, 1])
    # Mismatch between C() levels and expected levels
    assert_raises(PatsyError, categorical_to_int,
                  C(["a", "b", "a"], levels=["a", "b"]), ("b", "a"),
                  NAAction())

    # ndim == 2 is disallowed
    assert_raises(PatsyError, categorical_to_int,
                  np.asarray([["a", "b"], ["b", "a"]]), ("a", "b"), NAAction())
    # ndim == 0 is disallowed likewise
    assert_raises(PatsyError, categorical_to_int, "a", ("a", "b"), NAAction())

    # levels must be hashable
    assert_raises(PatsyError, categorical_to_int, ["a", "b"], ("a", "b", {}),
                  NAAction())
    assert_raises(PatsyError, categorical_to_int, ["a", "b", {}], ("a", "b"),
                  NAAction())

    t(["b", None, np.nan, "a"], ("a", "b"), [1, -1, -1, 0],
      NAAction(NA_types=["None", "NaN"]))
    t(["b", None, np.nan, "a"], ("a", "b", None), [1, -1, -1, 0],
      NAAction(NA_types=["None", "NaN"]))
    t(["b", None, np.nan, "a"], ("a", "b", None), [1, 2, -1, 0],
      NAAction(NA_types=["NaN"]))

    # Smoke test for the branch that formats the ellipsized list of levels in
    # the error message:
    assert_raises(PatsyError, categorical_to_int, ["a", "b", "q"],
                  ("a", "b", "c", "d", "e", "f", "g", "h"), NAAction())
Exemplo n.º 19
0
 def t(data, levels, expected, NA_action=NAAction()):
     got = categorical_to_int(data, levels, NA_action)
     assert np.array_equal(got, expected)
Exemplo n.º 20
0
def design_matrix_builders(termlists, data_iter_maker, NA_action="drop"):
    """Construct several :class:`DesignMatrixBuilders` from termlists.

    This is one of Patsy's fundamental functions. This function and
    :func:`build_design_matrices` together form the API to the core formula
    interpretation machinery.

    :arg termlists: A list of termlists, where each termlist is a list of
      :class:`Term` objects which together specify a design matrix.
    :arg data_iter_maker: A zero-argument callable which returns an iterator
      over dict-like data objects. This must be a callable rather than a
      simple iterator because sufficiently complex formulas may require
      multiple passes over the data (e.g. if there are nested stateful
      transforms).
    :arg NA_action: An :class:`NAAction` object or string, used to determine
      what values count as 'missing' for purposes of determining the levels of
      categorical factors.
    :returns: A list of :class:`DesignMatrixBuilder` objects, one for each
      termlist passed in.

    This function performs zero or more iterations over the data in order to
    sniff out any necessary information about factor types, set up stateful
    transforms, pick column names, etc.

    See :ref:`formulas` for details.

    .. versionadded:: 0.2.0
       The ``NA_action`` argument.
    """
    if isinstance(NA_action, basestring):
        NA_action = NAAction(NA_action)
    all_factors = set()
    for termlist in termlists:
        for term in termlist:
            all_factors.update(term.factors)
    factor_states = _factors_memorize(all_factors, data_iter_maker)
    # Now all the factors have working eval methods, so we can evaluate them
    # on some data to find out what type of data they return.
    (num_column_counts,
     cat_levels_contrasts) = _examine_factor_types(all_factors, factor_states,
                                                   data_iter_maker, NA_action)
    # Now we need the factor evaluators, which encapsulate the knowledge of
    # how to turn any given factor into a chunk of data:
    factor_evaluators = {}
    for factor in all_factors:
        if factor in num_column_counts:
            evaluator = _NumFactorEvaluator(factor, factor_states[factor],
                                            num_column_counts[factor])
        else:
            assert factor in cat_levels_contrasts
            levels = cat_levels_contrasts[factor][0]
            evaluator = _CatFactorEvaluator(factor, factor_states[factor],
                                            levels)
        factor_evaluators[factor] = evaluator
    # And now we can construct the DesignMatrixBuilder for each termlist:
    builders = []
    for termlist in termlists:
        result = _make_term_column_builders(termlist, num_column_counts,
                                            cat_levels_contrasts)
        new_term_order, term_to_column_builders = result
        assert frozenset(new_term_order) == frozenset(termlist)
        term_evaluators = set()
        for term in termlist:
            for factor in term.factors:
                term_evaluators.add(factor_evaluators[factor])
        builders.append(
            DesignMatrixBuilder(new_term_order, term_evaluators,
                                term_to_column_builders))
    return builders
Exemplo n.º 21
0
def build_design_matrices(builders, data,
                          NA_action="drop",
                          return_type="matrix",
                          dtype=np.dtype(float)):
    """Construct several design matrices from :class:`DesignMatrixBuilder`
    objects.

    This is one of Patsy's fundamental functions. This function and
    :func:`design_matrix_builders` together form the API to the core formula
    interpretation machinery.

    :arg builders: A list of :class:`DesignMatrixBuilders` specifying the
      design matrices to be built.
    :arg data: A dict-like object which will be used to look up data.
    :arg NA_action: What to do with rows that contain missing values. You can
      ``"drop"`` them, ``"raise"`` an error, or for customization, pass an
      :class:`NAAction` object. See :class:`NAAction` for details on what
      values count as 'missing' (and how to alter this).
    :arg return_type: Either ``"matrix"`` or ``"dataframe"``. See below.
    :arg dtype: The dtype of the returned matrix. Useful if you want to use
      single-precision or extended-precision.

    This function returns either a list of :class:`DesignMatrix` objects (for
    ``return_type="matrix"``) or a list of :class:`pandas.DataFrame` objects
    (for ``return_type="dataframe"``). In the latter case, the DataFrames will
    preserve any (row) indexes that were present in the input, which may be
    useful for time-series models etc. In any case, all returned design
    matrices will have ``.design_info`` attributes containing the appropriate
    :class:`DesignInfo` objects.

    Unlike :func:`design_matrix_builders`, this function takes only a simple
    data argument, not any kind of iterator. That's because this function
    doesn't need a global view of the data -- everything that depends on the
    whole data set is already encapsulated in the `builders`. If you are
    incrementally processing a large data set, simply call this function for
    each chunk.
    """
    if isinstance(NA_action, basestring):
        NA_action = NAAction(NA_action)
    if return_type == "dataframe" and not have_pandas:
        raise PatsyError("pandas.DataFrame was requested, but pandas "
                            "is not installed")
    if return_type not in ("matrix", "dataframe"):
        raise PatsyError("unrecognized output type %r, should be "
                            "'matrix' or 'dataframe'" % (return_type,))
    # Evaluate factors
    evaluator_to_values = {}
    evaluator_to_isNAs = {}
    num_rows = None
    pandas_index = None
    for builder in builders:
        # We look at evaluators rather than factors here, because it might
        # happen that we have the same factor twice, but with different
        # memorized state.
        for evaluator in builder._evaluators:
            if evaluator not in evaluator_to_values:
                value, is_NA = evaluator.eval(data, NA_action)
                evaluator_to_isNAs[evaluator] = is_NA
                # value may now be a Series, DataFrame, or ndarray
                if num_rows is None:
                    num_rows = value.shape[0]
                else:
                    if num_rows != value.shape[0]:
                        msg = ("Row mismatch: factor %s had %s rows, when "
                               "previous factors had %s rows"
                               % (evaluator.factor.name(), value.shape[0],
                                  num_rows))
                        raise PatsyError(msg, evaluator.factor)
                if (have_pandas
                    and isinstance(value, (pandas.Series, pandas.DataFrame))):
                    if pandas_index is None:
                        pandas_index = value.index
                    else:
                        if not pandas_index.equals(value.index):
                            msg = ("Index mismatch: pandas objects must "
                                   "have aligned indexes")
                            raise PatsyError(msg, evaluator.factor)
                # Strategy: we work with raw ndarrays for doing the actual
                # combining; DesignMatrixBuilder objects never sees pandas
                # objects. Then at the end, if a DataFrame was requested, we
                # convert. So every entry in this dict is either a 2-d array
                # of floats, or a 1-d array of integers (representing
                # categories).
                value = np.asarray(value)
                evaluator_to_values[evaluator] = value
    # Handle NAs
    values = evaluator_to_values.values()
    is_NAs = evaluator_to_isNAs.values()
    if return_type == "dataframe" and num_rows is not None:
        if pandas_index is None:
            pandas_index = np.arange(num_rows)
        values.append(pandas_index)
        is_NAs.append(np.zeros(len(pandas_index), dtype=bool))
    origins = [evaluator.factor.origin for evaluator in evaluator_to_values]
    new_values = NA_action.handle_NA(values, is_NAs, origins)
    if return_type == "dataframe" and num_rows is not None:
        pandas_index = new_values.pop()
    evaluator_to_values = dict(zip(evaluator_to_values, new_values))
    # Build factor values into matrices
    results = []
    for builder in builders:
        results.append(builder._build(evaluator_to_values, dtype))
    matrices = []
    for need_reshape, matrix in results:
        if need_reshape and num_rows is not None:
            assert matrix.shape[0] == 1
            matrices.append(DesignMatrix(np.repeat(matrix, num_rows, axis=0),
                                         matrix.design_info))
        else:
            # There is no data-dependence, at all -- a formula like "1 ~ 1". I
            # guess we'll just return some single-row matrices. Perhaps it
            # would be better to figure out how many rows are in the input
            # data and broadcast to that size, but eh. Input data is optional
            # in the first place, so even that would be no guarantee... let's
            # wait until someone actually has a relevant use case before we
            # worry about it.
            matrices.append(matrix)
    if return_type == "dataframe":
        assert have_pandas
        for i, matrix in enumerate(matrices):
            di = matrix.design_info
            matrices[i] = pandas.DataFrame(matrix,
                                           columns=di.column_names,
                                           index=pandas_index)
            matrices[i].design_info = di
    return matrices
Exemplo n.º 22
0
def build_design_matrices(builders, data,
                          NA_action="drop",
                          return_type="matrix",
                          dtype=np.dtype(float)):
    """Construct several design matrices from :class:`DesignMatrixBuilder`
    objects.

    This is one of Patsy's fundamental functions. This function and
    :func:`design_matrix_builders` together form the API to the core formula
    interpretation machinery.

    :arg builders: A list of :class:`DesignMatrixBuilders` specifying the
      design matrices to be built.
    :arg data: A dict-like object which will be used to look up data.
    :arg NA_action: What to do with rows that contain missing values. You can
      ``"drop"`` them, ``"raise"`` an error, or for customization, pass an
      :class:`NAAction` object. See :class:`NAAction` for details on what
      values count as 'missing' (and how to alter this).
    :arg return_type: Either ``"matrix"`` or ``"dataframe"``. See below.
    :arg dtype: The dtype of the returned matrix. Useful if you want to use
      single-precision or extended-precision.

    This function returns either a list of :class:`DesignMatrix` objects (for
    ``return_type="matrix"``) or a list of :class:`pandas.DataFrame` objects
    (for ``return_type="dataframe"``). In the latter case, the DataFrames will
    preserve any (row) indexes that were present in the input, which may be
    useful for time-series models etc. In any case, all returned design
    matrices will have ``.design_info`` attributes containing the appropriate
    :class:`DesignInfo` objects.

    Unlike :func:`design_matrix_builders`, this function takes only a simple
    data argument, not any kind of iterator. That's because this function
    doesn't need a global view of the data -- everything that depends on the
    whole data set is already encapsulated in the `builders`. If you are
    incrementally processing a large data set, simply call this function for
    each chunk.

    .. versionadded:: 0.2.0
       The ``NA_action`` argument.
    """
    if isinstance(NA_action, basestring):
        NA_action = NAAction(NA_action)
    if return_type == "dataframe" and not have_pandas:
        raise PatsyError("pandas.DataFrame was requested, but pandas "
                            "is not installed")
    if return_type not in ("matrix", "dataframe"):
        raise PatsyError("unrecognized output type %r, should be "
                            "'matrix' or 'dataframe'" % (return_type,))
    # Evaluate factors
    evaluator_to_values = {}
    evaluator_to_isNAs = {}
    num_rows = None
    pandas_index = None
    for builder in builders:
        # We look at evaluators rather than factors here, because it might
        # happen that we have the same factor twice, but with different
        # memorized state.
        for evaluator in builder._evaluators:
            if evaluator not in evaluator_to_values:
                value, is_NA = evaluator.eval(data, NA_action)
                evaluator_to_isNAs[evaluator] = is_NA
                # value may now be a Series, DataFrame, or ndarray
                if num_rows is None:
                    num_rows = value.shape[0]
                else:
                    if num_rows != value.shape[0]:
                        msg = ("Row mismatch: factor %s had %s rows, when "
                               "previous factors had %s rows"
                               % (evaluator.factor.name(), value.shape[0],
                                  num_rows))
                        raise PatsyError(msg, evaluator.factor)
                if (have_pandas
                    and isinstance(value, (pandas.Series, pandas.DataFrame))):
                    if pandas_index is None:
                        pandas_index = value.index
                    else:
                        if not pandas_index.equals(value.index):
                            msg = ("Index mismatch: pandas objects must "
                                   "have aligned indexes")
                            raise PatsyError(msg, evaluator.factor)
                # Strategy: we work with raw ndarrays for doing the actual
                # combining; DesignMatrixBuilder objects never sees pandas
                # objects. Then at the end, if a DataFrame was requested, we
                # convert. So every entry in this dict is either a 2-d array
                # of floats, or a 1-d array of integers (representing
                # categories).
                value = np.asarray(value)
                evaluator_to_values[evaluator] = value
    # Handle NAs
    values = evaluator_to_values.values()
    is_NAs = evaluator_to_isNAs.values()
    # num_rows is None iff evaluator_to_values (and associated sets like
    # 'values') are empty, i.e., we have no actual evaluators involved
    # (formulas like "~ 1").
    if return_type == "dataframe" and num_rows is not None:
        if pandas_index is None:
            pandas_index = np.arange(num_rows)
        values.append(pandas_index)
        is_NAs.append(np.zeros(len(pandas_index), dtype=bool))
    origins = [evaluator.factor.origin for evaluator in evaluator_to_values]
    new_values = NA_action.handle_NA(values, is_NAs, origins)
    # NA_action may have changed the number of rows.
    if num_rows is not None:
        num_rows = new_values[0].shape[0]
    if return_type == "dataframe" and num_rows is not None:
        pandas_index = new_values.pop()
    evaluator_to_values = dict(zip(evaluator_to_values, new_values))
    # Build factor values into matrices
    results = []
    for builder in builders:
        results.append(builder._build(evaluator_to_values, dtype))
    matrices = []
    for need_reshape, matrix in results:
        if need_reshape and num_rows is not None:
            assert matrix.shape[0] == 1
            matrices.append(DesignMatrix(np.repeat(matrix, num_rows, axis=0),
                                         matrix.design_info))
        else:
            # There is no data-dependence, at all -- a formula like "1 ~ 1". I
            # guess we'll just return some single-row matrices. Perhaps it
            # would be better to figure out how many rows are in the input
            # data and broadcast to that size, but eh. Input data is optional
            # in the first place, so even that would be no guarantee... let's
            # wait until someone actually has a relevant use case before we
            # worry about it.
            matrices.append(matrix)
    if return_type == "dataframe":
        assert have_pandas
        for i, matrix in enumerate(matrices):
            di = matrix.design_info
            matrices[i] = pandas.DataFrame(matrix,
                                           columns=di.column_names,
                                           index=pandas_index)
            matrices[i].design_info = di
    return matrices
Exemplo n.º 23
0
def build_design_matrices(builders, data,
                          NA_action="drop",
                          return_type="matrix",
                          dtype=np.dtype(float)):
    """Construct several design matrices from :class:`DesignMatrixBuilder`
    objects.

    This is one of Patsy's fundamental functions. This function and
    :func:`design_matrix_builders` together form the API to the core formula
    interpretation machinery.

    :arg builders: A list of :class:`DesignMatrixBuilders` specifying the
      design matrices to be built.
    :arg data: A dict-like object which will be used to look up data.
    :arg NA_action: What to do with rows that contain missing values. You can
      ``"drop"`` them, ``"raise"`` an error, or for customization, pass an
      :class:`NAAction` object. See :class:`NAAction` for details on what
      values count as 'missing' (and how to alter this).
    :arg return_type: Either ``"matrix"`` or ``"dataframe"``. See below.
    :arg dtype: The dtype of the returned matrix. Useful if you want to use
      single-precision or extended-precision.

    This function returns either a list of :class:`DesignMatrix` objects (for
    ``return_type="matrix"``) or a list of :class:`pandas.DataFrame` objects
    (for ``return_type="dataframe"``). In both cases, all returned design
    matrices will have ``.design_info`` attributes containing the appropriate
    :class:`DesignInfo` objects.

    Note that unlike :func:`design_matrix_builders`, this function takes only
    a simple data argument, not any kind of iterator. That's because this
    function doesn't need a global view of the data -- everything that depends
    on the whole data set is already encapsulated in the `builders`. If you
    are incrementally processing a large data set, simply call this function
    for each chunk.

    Index handling: This function always checks for indexes in the following
    places:

    * If ``data`` is a :class:`pandas.DataFrame`, its ``.index`` attribute.
    * If any factors evaluate to a :class:`pandas.Series` or
      :class:`pandas.DataFrame`, then their ``.index`` attributes.

    If multiple indexes are found, they must be identical (same values in the
    same order). If no indexes are found, then a default index is generated
    using ``np.arange(num_rows)``. One way or another, we end up with a single
    index for all the data. If ``return_type="dataframe"``, then this index is
    used as the index of the returned DataFrame objects. Examining this index
    makes it possible to determine which rows were removed due to NAs.

    Determining the number of rows in design matrices: This is not as obvious
    as it might seem, because it's possible to have a formula like "~ 1" that
    doesn't depend on the data (it has no factors). For this formula, it's
    obvious what every row in the design matrix should look like (just the
    value ``1``); but, how many rows like this should there be? To determine
    the number of rows in a design matrix, this function always checks in the
    following places:

    * If ``data`` is a :class:`pandas.DataFrame`, then its number of rows.
    * The number of entries in any factors present in any of the design
    * matrices being built.

    All these values much match. In particular, if this function is called to
    generate multiple design matrices at once, then they must all have the
    same number of rows.

    .. versionadded:: 0.2.0
       The ``NA_action`` argument.
    """
    if isinstance(NA_action, str):
        NA_action = NAAction(NA_action)
    if return_type == "dataframe" and not have_pandas:
        raise PatsyError("pandas.DataFrame was requested, but pandas "
                            "is not installed")
    if return_type not in ("matrix", "dataframe"):
        raise PatsyError("unrecognized output type %r, should be "
                            "'matrix' or 'dataframe'" % (return_type,))
    # Evaluate factors
    evaluator_to_values = {}
    evaluator_to_isNAs = {}
    import operator
    rows_checker = _CheckMatch("Number of rows", lambda a, b: a == b)
    index_checker = _CheckMatch("Index", lambda a, b: a.equals(b))
    if have_pandas and isinstance(data, pandas.DataFrame):
        index_checker.check(data.index, "data.index", None)
        rows_checker.check(data.shape[0], "data argument", None)
    for builder in builders:
        # We look at evaluators rather than factors here, because it might
        # happen that we have the same factor twice, but with different
        # memorized state.
        for evaluator in builder._evaluators:
            if evaluator not in evaluator_to_values:
                value, is_NA = evaluator.eval(data, NA_action)
                evaluator_to_isNAs[evaluator] = is_NA
                # value may now be a Series, DataFrame, or ndarray
                name = evaluator.factor.name()
                origin = evaluator.factor.origin
                rows_checker.check(value.shape[0], name, origin)
                if (have_pandas
                    and isinstance(value, (pandas.Series, pandas.DataFrame))):
                    index_checker.check(value.index, name, origin)
                # Strategy: we work with raw ndarrays for doing the actual
                # combining; DesignMatrixBuilder objects never sees pandas
                # objects. Then at the end, if a DataFrame was requested, we
                # convert. So every entry in this dict is either a 2-d array
                # of floats, or a 1-d array of integers (representing
                # categories).
                value = np.asarray(value)
                evaluator_to_values[evaluator] = value
    # Handle NAs
    values = list(evaluator_to_values.values())
    is_NAs = list(evaluator_to_isNAs.values())
    origins = [evaluator.factor.origin for evaluator in evaluator_to_values]
    pandas_index = index_checker.value
    num_rows = rows_checker.value
    # num_rows is None iff evaluator_to_values (and associated sets like
    # 'values') are empty, i.e., we have no actual evaluators involved
    # (formulas like "~ 1").
    if return_type == "dataframe" and num_rows is not None:
        if pandas_index is None:
            pandas_index = np.arange(num_rows)
        values.append(pandas_index)
        is_NAs.append(np.zeros(len(pandas_index), dtype=bool))
        origins.append(None)
    new_values = NA_action.handle_NA(values, is_NAs, origins)
    # NA_action may have changed the number of rows.
    if new_values:
        num_rows = new_values[0].shape[0]
    if return_type == "dataframe" and num_rows is not None:
        pandas_index = new_values.pop()
    evaluator_to_values = dict(zip(evaluator_to_values, new_values))
    # Build factor values into matrices
    results = []
    for builder in builders:
        results.append(builder._build(evaluator_to_values, dtype))
    matrices = []
    for need_reshape, matrix in results:
        if need_reshape:
            # There is no data-dependence, at all -- a formula like "1 ~ 1".
            # In this case the builder just returns a single-row matrix, and
            # we have to broadcast it vertically to the appropriate size. If
            # we can figure out what that is...
            assert matrix.shape[0] == 1
            if num_rows is not None:
                matrix = DesignMatrix(np.repeat(matrix, num_rows, axis=0),
                                      matrix.design_info)
            else:
                raise PatsyError(
                    "No design matrix has any non-trivial factors, "
                    "the data object is not a DataFrame. "
                    "I can't tell how many rows the design matrix should "
                    "have!"
                    )
        matrices.append(matrix)
    if return_type == "dataframe":
        assert have_pandas
        for i, matrix in enumerate(matrices):
            di = matrix.design_info
            matrices[i] = pandas.DataFrame(matrix,
                                           columns=di.column_names,
                                           index=pandas_index)
            matrices[i].design_info = di
    return matrices
Exemplo n.º 24
0
def parse_formula(formula, data):
    na_action = NAAction(on_NA='raise', NA_types=[])
    if formula.count('~') == 1:
        dep, exog = dmatrices(formula,
                              data,
                              return_type='dataframe',
                              NA_action=na_action)
        endog = instr = None
        return dep, exog, endog, instr

    elif formula.count('~') > 2:
        raise ValueError('formula not understood.  Must have 1 or 2 '
                         'occurrences of ~')

    blocks = [bl.strip() for bl in formula.strip().split('~')]
    if '[' not in blocks[1] or ']' not in blocks[2]:
        raise ValueError('formula not understood. Endogenous variables and '
                         'instruments must be segregated in a block that '
                         'starts with [ and ends with ].')

    dep = blocks[0].strip()
    exog, endog = [bl.strip() for bl in blocks[1].split('[')]
    instr, exog2 = [bl.strip() for bl in blocks[2].split(']')]
    if endog[0] == '+' or endog[1] == '+':
        raise ValueError(
            'endogenous block must not start or end with +. This block was: {0}'
            .format(endog))
    if instr[0] == '+' or instr[1] == '+':
        raise ValueError(
            'instrument block must not start or end with +. This block was: {0}'
            .format(instr))
    if exog2:
        exog += exog2
    exog = exog[:-1].strip() if exog[-1] == '+' else exog

    try:
        dep = dmatrix('0 + ' + dep,
                      data,
                      eval_env=2,
                      return_type='dataframe',
                      NA_action=na_action)
        exog = dmatrix('0 + ' + exog,
                       data,
                       eval_env=2,
                       return_type='dataframe',
                       NA_action=na_action)
        endog = dmatrix('0 + ' + endog,
                        data,
                        eval_env=2,
                        return_type='dataframe',
                        NA_action=na_action)
        instr = dmatrix('0 + ' + instr,
                        data,
                        eval_env=2,
                        return_type='dataframe',
                        NA_action=na_action)
    except Exception as e:
        raise type(e)(PARSING_ERROR.format(dep, exog, endog, instr) + e.msg,
                      e.args[1])

    return dep, exog, endog, instr
Exemplo n.º 25
0
def test_categorical_to_int():
    from pytest import raises
    from patsy.missing import NAAction
    if have_pandas:
        s = pandas.Series(["a", "b", "c"], index=[10, 20, 30])
        c_pandas = categorical_to_int(s, ("a", "b", "c"), NAAction())
        assert np.all(c_pandas == [0, 1, 2])
        assert np.all(c_pandas.index == [10, 20, 30])
        # Input must be 1-dimensional
        raises(PatsyError, categorical_to_int, pandas.DataFrame({10: s}),
               ("a", "b", "c"), NAAction())
    if have_pandas_categorical:
        constructors = [pandas_Categorical_from_codes]
        if have_pandas_categorical_dtype:

            def Series_from_codes(codes, categories):
                c = pandas_Categorical_from_codes(codes, categories)
                return pandas.Series(c)

            constructors.append(Series_from_codes)
        for con in constructors:
            cat = con([1, 0, -1], ("a", "b"))
            conv = categorical_to_int(cat, ("a", "b"), NAAction())
            assert np.all(conv == [1, 0, -1])
            # Trust pandas NA marking
            cat2 = con([1, 0, -1], ("a", "None"))
            conv2 = categorical_to_int(cat, ("a", "b"),
                                       NAAction(NA_types=["None"]))
            assert np.all(conv2 == [1, 0, -1])
            # But levels must match
            raises(PatsyError, categorical_to_int, con([1, 0], ("a", "b")),
                   ("a", "c"), NAAction())
            raises(PatsyError, categorical_to_int, con([1, 0], ("a", "b")),
                   ("b", "a"), NAAction())

    def t(data, levels, expected, NA_action=NAAction()):
        got = categorical_to_int(data, levels, NA_action)
        assert np.array_equal(got, expected)

    t(["a", "b", "a"], ("a", "b"), [0, 1, 0])
    t(np.asarray(["a", "b", "a"]), ("a", "b"), [0, 1, 0])
    t(np.asarray(["a", "b", "a"], dtype=object), ("a", "b"), [0, 1, 0])
    t([0, 1, 2], (1, 2, 0), [2, 0, 1])
    t(np.asarray([0, 1, 2]), (1, 2, 0), [2, 0, 1])
    t(np.asarray([0, 1, 2], dtype=float), (1, 2, 0), [2, 0, 1])
    t(np.asarray([0, 1, 2], dtype=object), (1, 2, 0), [2, 0, 1])
    t(["a", "b", "a"], ("a", "d", "z", "b"), [0, 3, 0])
    t([("a", 1), ("b", 0), ("a", 1)], (("a", 1), ("b", 0)), [0, 1, 0])

    raises(PatsyError, categorical_to_int, ["a", "b", "a"], ("a", "c"),
           NAAction())

    t(C(["a", "b", "a"]), ("a", "b"), [0, 1, 0])
    t(C(["a", "b", "a"]), ("b", "a"), [1, 0, 1])
    t(C(["a", "b", "a"], levels=["b", "a"]), ("b", "a"), [1, 0, 1])
    # Mismatch between C() levels and expected levels
    raises(PatsyError, categorical_to_int, C(["a", "b", "a"],
                                             levels=["a", "b"]), ("b", "a"),
           NAAction())

    # ndim == 0 is okay
    t("a", ("a", "b"), [0])
    t("b", ("a", "b"), [1])
    t(True, (False, True), [1])

    # ndim == 2 is disallowed
    raises(PatsyError, categorical_to_int,
           np.asarray([["a", "b"], ["b", "a"]]), ("a", "b"), NAAction())

    # levels must be hashable
    raises(PatsyError, categorical_to_int, ["a", "b"], ("a", "b", {}),
           NAAction())
    raises(PatsyError, categorical_to_int, ["a", "b", {}], ("a", "b"),
           NAAction())

    t(["b", None, np.nan, "a"], ("a", "b"), [1, -1, -1, 0],
      NAAction(NA_types=["None", "NaN"]))
    t(["b", None, np.nan, "a"], ("a", "b", None), [1, -1, -1, 0],
      NAAction(NA_types=["None", "NaN"]))
    t(["b", None, np.nan, "a"], ("a", "b", None), [1, 2, -1, 0],
      NAAction(NA_types=["NaN"]))

    # Smoke test for the branch that formats the ellipsized list of levels in
    # the error message:
    raises(PatsyError, categorical_to_int, ["a", "b", "q"],
           ("a", "b", "c", "d", "e", "f", "g", "h"), NAAction())
Exemplo n.º 26
0
    def from_formula(cls, formula, data, *, sigma=None, weights=None):
        """
        Parameters
        ----------
        formula : {str, dict-like}
            Either a string or a dictionary of strings where each value in
            the dictionary represents a single equation. See Notes for a
            description of the accepted syntax
        data : DataFrame
            Frame containing named variables
        sigma : array-like
            Pre-specified residual covariance to use in GLS estimation. If
            not provided, FGLS is implemented based on an estimate of sigma.
        weights : dict-like
            Dictionary like object (e.g. a DataFrame) containing variable
            weights.  Each entry must have the same number of observations as
            data.  If an equation label is not a key weights, the weights will
            be set to unity

        Returns
        -------
        model : SUR
            Model instance

        Notes
        -----
        Models can be specified in one of two ways. The first uses curly
        braces to encapsulate equations.  The second uses a dictionary
        where each key is an equation name.

        Examples
        --------
        The simplest format uses standard Patsy formulas for each equation
        in a dictionary.  Best practice is to use an Ordered Dictionary

        >>> import pandas as pd
        >>> import numpy as np
        >>> data = pd.DataFrame(np.random.randn(500, 4), columns=['y1', 'x1_1', 'y2', 'x2_1'])
        >>> from linearmodels.system import SUR
        >>> formula = {'eq1': 'y1 ~ 1 + x1_1', 'eq2': 'y2 ~ 1 + x2_1'}
        >>> mod = SUR.from_formula(formula, data)

        The second format uses curly braces {} to surround distinct equations

        >>> formula = '{y1 ~ 1 + x1_1} {y2 ~ 1 + x2_1}'
        >>> mod = SUR.from_formula(formula, data)

        It is also possible to include equation labels when using curly braces

        >>> formula = '{eq1: y1 ~ 1 + x1_1} {eq2: y2 ~ 1 + x2_1}'
        >>> mod = SUR.from_formula(formula, data)
        """
        na_action = NAAction(on_NA='raise', NA_types=[])
        if not isinstance(formula, (Mapping, str)):
            raise TypeError('formula must be a string or dictionary-like')

        missing_weight_keys = []
        eqns = OrderedDict()
        if isinstance(formula, Mapping):
            for key in formula:
                f = formula[key]
                f = '~ 0 +'.join(f.split('~'))
                dep, exog = dmatrices(f, data, return_type='dataframe',
                                      NA_action=na_action)
                eqns[key] = {'dependent': dep, 'exog': exog}
                if weights is not None:
                    if key in weights:
                        eqns[key]['weights'] = weights[key]
                    else:
                        missing_weight_keys.append(key)
            _missing_weights(missing_weight_keys)
            return SUR(eqns, sigma=sigma)

        formula = formula.replace('\n', ' ').strip()
        parts = formula.split('}')
        for i, part in enumerate(parts):
            base_key = None
            part = part.strip()
            if part == '':
                continue
            part = part.replace('{', '')
            if ':' in part.split('~')[0]:
                base_key, part = part.split(':')
                key = base_key = base_key.strip()
                part = part.strip()
            f = '~ 0 +'.join(part.split('~'))
            dep, exog = dmatrices(f, data, return_type='dataframe',
                                  NA_action=na_action)
            if base_key is None:
                base_key = key = f.split('~')[0].strip()
            count = 0
            while key in eqns:
                key = base_key + '.{0}'.format(count)
                count += 1
            eqns[key] = {'dependent': dep, 'exog': exog}
            if weights is not None:
                if key in weights:
                    eqns[key]['weights'] = weights[key]
                else:
                    missing_weight_keys.append(key)

        _missing_weights(missing_weight_keys)

        return SUR(eqns, sigma=sigma)