def test_cast_to_series_indexed(reference, other):
    out_mean, out_sd = cast_to_series(reference, other)
    assert reference.equals(out_mean)
    assert reference.equals(out_sd)

    out_mean, out_sd = cast_to_series(other, reference)
    assert reference.equals(out_mean)
    assert reference.equals(out_sd)
def test_cast_to_series_mismatched_index():
    reference = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
    other = pd.Series([1, 2, 3])

    with pytest.raises(ValueError, match='identically indexed'):
        cast_to_series(reference, other)

    with pytest.raises(ValueError, match='identically indexed'):
        cast_to_series(other, reference)
    def get_parameters(cls,
                       parameters: Parameters = None,
                       mean: Parameter = None,
                       sd: Parameter = None) -> pd.DataFrame:
        required_parameters = list(cls.expected_parameters +
                                   ('x_min', 'x_max'))
        if parameters is not None:
            if not (mean is None and sd is None):
                raise ValueError(
                    "You may supply either pre-calculated parameters or"
                    " mean and standard deviation but not both.")
            parameters = format_data(parameters, required_parameters,
                                     'parameters')

        else:
            if mean is None or sd is None:
                raise ValueError(
                    "You may supply either pre-calculated parameters or"
                    " mean and standard deviation but not both.")

            mean, sd = cast_to_series(mean, sd)

            parameters = pd.DataFrame(0,
                                      columns=required_parameters,
                                      index=mean.index)

            computable = cls.computable_parameter_index(mean, sd)
            parameters.loc[computable,
                           ['x_min', 'x_max']] = cls.compute_min_max(
                               mean.loc[computable], sd.loc[computable])
            # The scipy.stats distributions run optimization routines that handle FloatingPointErrors,
            # transforming them into RuntimeWarnings. This gets noisy in our logs.
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', RuntimeWarning)
                parameters.loc[
                    computable,
                    list(cls.expected_parameters)] = cls._get_parameters(
                        mean.loc[computable], sd.loc[computable],
                        parameters.loc[computable, 'x_min'],
                        parameters.loc[computable, 'x_max'])

        return parameters
def test_cast_to_series_mismatched_length(reference, other):
    with pytest.raises(ValueError, match='same number of values'):
        cast_to_series(reference, other)

    with pytest.raises(ValueError, match='same number of values'):
        cast_to_series(other, reference)
def test_cast_to_series_nulls(val, null):
    with pytest.raises(ValueError, match='Empty data structure'):
        cast_to_series(val, null)

    with pytest.raises(ValueError, match='Empty data structure'):
        cast_to_series(null, val)
def test_cast_to_series_array_like(mean, sd):
    expected_mean, expected_sd = pd.Series([1, 2, 3]), pd.Series([1, 2, 3])
    out_mean, out_sd = cast_to_series(mean, sd)
    assert expected_mean.equals(out_mean)
    assert expected_sd.equals(out_sd)
def test_cast_to_series_single_floats(mean, sd):
    expected_mean, expected_sd = pd.Series([1.]), pd.Series([1.])
    out_mean, out_sd = cast_to_series(mean, sd)
    assert expected_mean.equals(out_mean)
    assert expected_sd.equals(out_sd)