def test_marshall_pyarrow_table_data(self):
        """Test that an error is raised when called with `pyarrow.Table` data."""
        df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
        proto = DataFrame()

        with self.assertRaises(StreamlitAPIException):
            data_frame.marshall_data_frame(pa.Table.from_pandas(df), proto)
Beispiel #2
0
def _create_dataframe_msg(df, id=1) -> ForwardMsg:
    msg = ForwardMsg()
    msg.metadata.delta_path[:] = make_delta_path(RootContainer.SIDEBAR, (), id)
    data_frame.marshall_data_frame(df, msg.delta.new_element.data_frame)
    return msg
Beispiel #3
0
TEXT_DELTA_MSG1.delta.new_element.text.body = "text1"
TEXT_DELTA_MSG1.metadata.delta_path[:] = make_delta_path(
    RootContainer.MAIN, (), 0)

TEXT_DELTA_MSG2 = ForwardMsg()
TEXT_DELTA_MSG2.delta.new_element.text.body = "text2"
TEXT_DELTA_MSG2.metadata.delta_path[:] = make_delta_path(
    RootContainer.MAIN, (), 0)

ADD_BLOCK_MSG = ForwardMsg()
ADD_BLOCK_MSG.metadata.delta_path[:] = make_delta_path(RootContainer.MAIN, (),
                                                       0)

DF_DELTA_MSG = ForwardMsg()
legacy_data_frame.marshall_data_frame({
    "col1": [0, 1, 2],
    "col2": [10, 11, 12]
}, DF_DELTA_MSG.delta.new_element.data_frame)
DF_DELTA_MSG.metadata.delta_path[:] = make_delta_path(RootContainer.MAIN, (),
                                                      0)

ADD_ROWS_MSG = ForwardMsg()
legacy_data_frame.marshall_data_frame({
    "col1": [3, 4, 5],
    "col2": [13, 14, 15]
}, ADD_ROWS_MSG.delta.add_rows.data)
ADD_ROWS_MSG.metadata.delta_path[:] = make_delta_path(RootContainer.MAIN, (),
                                                      0)


class ReportQueueTest(unittest.TestCase):
    def test_simple_enqueue(self):
Beispiel #4
0
    def _legacy_add_rows(self, data=None, **kwargs):
        """Concatenate a dataframe to the bottom of the current one.

        Parameters
        ----------
        data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,
        or None
            Table to concat. Optional.

        **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None
            The named dataset to concat. Optional. You can only pass in 1
            dataset (including the one in the data parameter).

        Example
        -------
        >>> df1 = pd.DataFrame(
        ...    np.random.randn(50, 20),
        ...    columns=('col %d' % i for i in range(20)))
        ...
        >>> my_table = st._legacy_table(df1)
        >>>
        >>> df2 = pd.DataFrame(
        ...    np.random.randn(50, 20),
        ...    columns=('col %d' % i for i in range(20)))
        ...
        >>> my_table._legacy_add_rows(df2)
        >>> # Now the table shown in the Streamlit app contains the data for
        >>> # df1 followed by the data for df2.

        You can do the same thing with plots. For example, if you want to add
        more data to a line chart:

        >>> # Assuming df1 and df2 from the example above still exist...
        >>> my_chart = st._legacy_line_chart(df1)
        >>> my_chart._legacy_add_rows(df2)
        >>> # Now the chart shown in the Streamlit app contains the data for
        >>> # df1 followed by the data for df2.

        And for plots whose datasets are named, you can pass the data with a
        keyword argument where the key is the name:

        >>> my_chart = st._legacy_vega_lite_chart({
        ...     'mark': 'line',
        ...     'encoding': {'x': 'a', 'y': 'b'},
        ...     'datasets': {
        ...       'some_fancy_name': df1,  # <-- named dataset
        ...      },
        ...     'data': {'name': 'some_fancy_name'},
        ... }),
        >>> my_chart._legacy_add_rows(some_fancy_name=df2)  # <-- name used as keyword

        """
        if self._root_container is None or self._cursor is None:
            return self

        if not self._cursor.is_locked:
            raise StreamlitAPIException(
                "Only existing elements can `add_rows`.")

        # Accept syntax st._legacy_add_rows(df).
        if data is not None and len(kwargs) == 0:
            name = ""
        # Accept syntax st._legacy_add_rows(foo=df).
        elif len(kwargs) == 1:
            name, data = kwargs.popitem()
        # Raise error otherwise.
        else:
            raise StreamlitAPIException(
                "Wrong number of arguments to add_rows()."
                "Command requires exactly one dataset")

        # When doing _legacy_add_rows on an element that does not already have data
        # (for example, st._legacy_ine_chart() without any args), call the original
        # st._legacy_foo() element with new data instead of doing an _legacy_add_rows().
        if (self._cursor.props["delta_type"]
                in DELTA_TYPES_THAT_MELT_DATAFRAMES
                and self._cursor.props["last_index"] is None):
            # IMPORTANT: This assumes delta types and st method names always
            # match!
            # delta_type doesn't have any prefix, but st_method_name starts with "_legacy_".
            st_method_name = "_legacy_" + self._cursor.props["delta_type"]
            st_method = getattr(self, st_method_name)
            st_method(data, **kwargs)
            return

        data, self._cursor.props["last_index"] = _maybe_melt_data_for_add_rows(
            data, self._cursor.props["delta_type"],
            self._cursor.props["last_index"])

        msg = ForwardMsg_pb2.ForwardMsg()
        msg.metadata.delta_path[:] = self._cursor.delta_path

        import streamlit.elements.legacy_data_frame as data_frame

        data_frame.marshall_data_frame(data, msg.delta.add_rows.data)

        if name:
            msg.delta.add_rows.name = name
            msg.delta.add_rows.has_name = True

        _enqueue_message(msg)

        return self
def marshall(proto, data=None, spec=None, use_container_width=False, **kwargs):
    """Construct a Vega-Lite chart object.

    See DeltaGenerator._legacy_vega_lite_chart for docs.
    """
    # Support passing data inside spec['datasets'] and spec['data'].
    # (The data gets pulled out of the spec dict later on.)
    if isinstance(data, dict) and spec is None:
        spec = data
        data = None

    # Support passing no spec arg, but filling it with kwargs.
    # Example:
    #   marshall(proto, baz='boz')
    if spec is None:
        spec = dict()
    else:
        # Clone the spec dict, since we may be mutating it.
        spec = dict(spec)

    # Support passing in kwargs. Example:
    #   marshall(proto, {foo: 'bar'}, baz='boz')
    if len(kwargs):
        # Merge spec with unflattened kwargs, where kwargs take precedence.
        # This only works for string keys, but kwarg keys are strings anyways.
        spec = dict(spec, **dicttools.unflatten(kwargs, _CHANNELS))

    if len(spec) == 0:
        raise ValueError("Vega-Lite charts require a non-empty spec dict.")

    if "autosize" not in spec:
        spec["autosize"] = {"type": "fit", "contains": "padding"}

    # Pull data out of spec dict when it's in a 'dataset' key:
    #   marshall(proto, {datasets: {foo: df1, bar: df2}, ...})
    if "datasets" in spec:
        for k, v in spec["datasets"].items():
            dataset = proto.datasets.add()
            dataset.name = str(k)
            dataset.has_name = True
            data_frame.marshall_data_frame(v, dataset.data)
        del spec["datasets"]

    # Pull data out of spec dict when it's in a top-level 'data' key:
    #   marshall(proto, {data: df})
    #   marshall(proto, {data: {values: df, ...}})
    #   marshall(proto, {data: {url: 'url'}})
    #   marshall(proto, {data: {name: 'foo'}})
    if "data" in spec:
        data_spec = spec["data"]

        if isinstance(data_spec, dict):
            if "values" in data_spec:
                data = data_spec["values"]
                del data_spec["values"]
        else:
            data = data_spec
            del spec["data"]

    proto.spec = json.dumps(spec)
    proto.use_container_width = use_container_width

    if data is not None:
        data_frame.marshall_data_frame(data, proto.data)