Esempio n. 1
0
 def build(self, parent):
     geo = parent.data[[self.lat_col, self.lon_col]].dropna()
     geo.columns = ["lat", "lon"]
     col_types = grid_columns(geo)
     f = grid_formatter(col_types, nan_display=None)
     return_data = f.format_lists(geo)
     return return_data, self._build_code()
Esempio n. 2
0
def _build_timeseries_chart_data(name,
                                 df,
                                 cols,
                                 min=None,
                                 max=None,
                                 sub_group=None):
    base_cols = ['date']
    if sub_group in df:
        dfs = df.groupby(sub_group)
        base_cols.append(sub_group)
    else:
        dfs = [('', df)]

    for sub_group_val, grp in dfs:
        for col in cols:
            key = '{0}:{1}:{2}'.format(
                sub_group_val if isinstance(sub_group_val, string_types) else
                '{0:.0f}'.format(sub_group_val), name, col)
            data = grp[base_cols + [col]].dropna(subset=[col])
            f = grid_formatter(grid_columns(data),
                               overrides={
                                   'D': lambda f, i, c: f.add_timestamp(i, c)
                               })
            data = f.format_dicts(data.itertuples())
            data = dict(data=data,
                        min=min or grp[col].min(),
                        max=max or grp[col].max())
            yield key, data
Esempio n. 3
0
def load_describe(column_series, additional_aggs=None):
    """
    Helper function for grabbing the output from :meth:`pandas:pandas.Series.describe` in a JSON serializable format

    :param column_series: data to describe
    :type column_series: :class:`pandas:pandas.Series`
    :return: JSON serializable dictionary of the output from calling :meth:`pandas:pandas.Series.describe`
    """
    desc = column_series.describe().to_frame().T
    if additional_aggs:
        for agg in additional_aggs:
            if agg == 'mode':
                mode = column_series.mode().values
                desc['mode'] = np.nan if len(mode) > 1 else mode[0]
                continue
            desc[agg] = getattr(column_series, agg)()
    desc_f_overrides = {
        'I': lambda f, i, c: f.add_int(i, c, as_string=True),
        'F': lambda f, i, c: f.add_float(i, c, precision=4, as_string=True),
    }
    desc_f = grid_formatter(grid_columns(desc), nan_display='nan', overrides=desc_f_overrides)
    desc = desc_f.format_dict(next(desc.itertuples(), None))
    if 'count' in desc:
        # pandas always returns 'count' as a float and it adds useless decimal points
        desc['count'] = desc['count'].split('.')[0]
    return desc
Esempio n. 4
0
    def build(self, parent):
        s = parent.data[parent.selected_col]
        if parent.classifier == "D":
            s = apply(s, json_timestamp)

        qq_x, qq_y = sts.probplot(s, dist="norm", fit=False)
        qq = pd.DataFrame(dict(x=qq_x, y=qq_y))
        f = grid_formatter(grid_columns(qq), nan_display=None)
        return_data = f.format_lists(qq)

        trend_line = px.scatter(x=qq_x, y=qq_y, trendline="ols").data[1]
        trend_line = pd.DataFrame(dict(x=trend_line["x"], y=trend_line["y"]))
        f = grid_formatter(grid_columns(trend_line), nan_display=None)
        trend_line = f.format_lists(trend_line)
        return_data["x2"] = trend_line["x"]
        return_data["y2"] = trend_line["y"]
        return return_data, self._build_code(parent)
Esempio n. 5
0
def get_correlations(data_id):
    """
    :class:`flask:flask.Flask` route which gathers Pearson correlations against all combinations of columns with
    numeric data using :meth:`pandas:pandas.DataFrame.corr`

    On large datasets with no :attr:`numpy:numpy.nan` data this code will use :meth:`numpy:numpy.corrcoef`
    for speed purposes

    :param data_id: integer string identifier for a D-Tale process's data
    :type data_id: str
    :param query: string from flask.request.args['query'] which is applied to DATA using the query() function
    :returns: JSON {
        data: [{column: col1, col1: 1.0, col2: 0.99, colN: 0.45},...,{column: colN, col1: 0.34, col2: 0.88, colN: 1.0}],
    } or {error: 'Exception message', traceback: 'Exception stacktrace'}
    """
    try:
        query = get_str_arg(request, 'query')
        data = DATA[data_id]
        data = data.query(query) if query is not None else data

        valid_corr_cols = []
        valid_date_cols = []
        rolling = False
        for col_info in DTYPES[data_id]:
            name, dtype = map(col_info.get, ['name', 'dtype'])
            dtype = classify_type(dtype)
            if dtype in ['I', 'F']:
                valid_corr_cols.append(name)
            elif dtype == 'D':
                # even if a datetime column exists, we need to make sure that there is enough data for a date
                # to warrant a correlation, https://github.com/man-group/dtale/issues/43
                date_counts = data[name].dropna().value_counts()
                if len(date_counts[date_counts > 1]) > 1:
                    valid_date_cols.append(name)
                elif date_counts.eq(1).all():
                    valid_date_cols.append(name)
                    rolling = True

        if data[valid_corr_cols].isnull().values.any():
            data = data.corr(method='pearson')
        else:
            # using pandas.corr proved to be quite slow on large datasets so I moved to numpy:
            # https://stackoverflow.com/questions/48270953/pandas-corr-and-corrwith-very-slow
            data = np.corrcoef(data[valid_corr_cols].values, rowvar=False)
            data = pd.DataFrame(data,
                                columns=valid_corr_cols,
                                index=valid_corr_cols)

        data.index.name = str('column')
        data = data.reset_index()
        col_types = grid_columns(data)
        f = grid_formatter(col_types, nan_display=None)
        return jsonify(data=f.format_dicts(data.itertuples()),
                       dates=valid_date_cols,
                       rolling=rolling)
    except BaseException as e:
        return jsonify(
            dict(error=str(e), traceback=str(traceback.format_exc())))
Esempio n. 6
0
def get_scatter():
    """
    Flask route which returns data used in correlation of two columns for scatter chart

    :param query: string from flask.request.args['query'] which is applied to DATA using the query() function
    :param cols: comma-separated string from flask.request.args['cols'] containing names of two columns in dataframe
    :param dateCol: string from flask.request.args['dateCol'] with name of date-type column in dateframe for timeseries
    :param date: string from flask.request.args['date'] date value in dateCol to filter dataframe to
    :returns: JSON {
        data: [{col1: 0.123, col2: 0.123, index: 1},...,{col1: 0.123, col2: 0.123, index: N}],
        stats: {
            correlated: 50,
            only_in_s0: 1,
            only_in_s1: 2,
            pearson: 0.987,
            spearman: 0.879,
        }
        x: col1,
        y: col2
    } or {error: 'Exception message', traceback: 'Exception stacktrace'}
    """
    cols = get_str_arg(request, 'cols')
    cols = cols.split(',')
    query = get_str_arg(request, 'query')
    date = get_str_arg(request, 'date')
    date_col = get_str_arg(request, 'dateCol')
    try:
        data = DATA[get_port()]
        data = data[data[date_col] == date] if date else data
        if query:
            data = data.query(query)

        data = data[list(set(cols))].dropna(how='any')
        data[str('index')] = data.index
        s0 = data[cols[0]]
        s1 = data[cols[1]]
        pearson = s0.corr(s1, method='pearson')
        spearman = s0.corr(s1, method='spearman')
        stats = dict(pearson='N/A' if pd.isnull(pearson) else pearson,
                     spearman='N/A' if pd.isnull(spearman) else spearman,
                     correlated=len(data),
                     only_in_s0=len(data[data[cols[0]].isnull()]),
                     only_in_s1=len(data[data[cols[1]].isnull()]))

        if len(data) > 15000:
            return jsonify(
                stats=stats,
                error=
                'Dataset exceeds 15,000 records, cannot render scatter. Please apply filter...'
            )
        f = grid_formatter(grid_columns(data))
        data = f.format_dicts(data.itertuples())
        return jsonify(data=data, x=cols[0], y=cols[1], stats=stats)
    except BaseException as e:
        return jsonify(
            dict(error=str(e), traceback=str(traceback.format_exc())))
Esempio n. 7
0
    def build(self, parent):
        code = [
            "s = df[~pd.isnull(df['{col}'])]['{col}']".format(col=parent.selected_col)
        ]
        s, cleaner_code = handle_cleaners(
            parent.data[parent.selected_col], self.cleaners
        )
        code += cleaner_code
        hist = self.build_hist(s, code)

        if self.ordinal_col is not None:
            ordinal_data, ordinal_code = self.setup_ordinal_data(parent)
            code += ordinal_code
            hist["ordinal"] = ordinal_data
            hist.index.name = "labels"
            hist = hist.reset_index().sort_values("ordinal")
            code += [
                "chart['ordinal'] = ordinal_data",
                "chart.index.name = 'labels'",
                "chart = chart.reset_index().sort_values('ordinal')",
            ]
        else:
            hist.index.name = "labels"
            hist = hist.reset_index().sort_values(
                ["data", "labels"], ascending=[False, True]
            )
            code += [
                "chart.index.name = 'labels'",
                "chart = chart.reset_index().sort_values(['data', 'labels'], ascending=[False, True])",
            ]
        hist, top, top_code = handle_top(hist, self.top)
        code += top_code
        col_types = grid_columns(hist)
        f = grid_formatter(col_types, nan_display=None)
        return_data = f.format_lists(hist)
        return_data["top"] = top

        layout = self.setup_chart_layout(parent)
        code.append(
            "charts = [go.Bar(x=chart['labels'].values, y=chart['data'].values, name='Frequency')]"
        )
        if self.ordinal_col:
            code.append(
                (
                    "charts.append(go.Scatter(\n"
                    "\tx=chart['labels'].values, y=chart['ordinal'].values, yaxis='y2',\n"
                    "\tname='{} ({})', {}\n"
                    "))"
                ).format(self.ordinal_col, self.ordinal_agg, LINE_CFG)
            )
        code.append(
            "figure = go.Figure(data=charts, layout=go.{layout})".format(layout=layout)
        )
        return return_data, code
Esempio n. 8
0
    def build(self, parent):
        s = parent.data[parent.selected_col]
        if parent.classifier == "D":
            s = apply(s, json_timestamp)

        qq_x, qq_y = sts.probplot(s, dist="norm", fit=False)
        qq = pd.DataFrame(dict(x=qq_x, y=qq_y))
        f = grid_formatter(grid_columns(qq), nan_display=None)
        return_data = dict(data=f.format_dicts(qq.itertuples()))
        return_data["min"] = f.fmts[0][-1](qq.min()[0].min(), None)
        return_data["max"] = f.fmts[0][-1](qq.max()[0].max(), None)
        return return_data, self._build_code(parent)
Esempio n. 9
0
 def build(self, parent):
     hist = parent.data.groupby(self.category_col)[[parent.selected_col
                                                    ]].agg(self.aggs)
     hist.columns = hist.columns.droplevel(0)
     hist.columns = ["count", "data"]
     if self.category_agg == "pctsum":
         hist["data"] = hist["data"] / hist["data"].sum()
     hist.index.name = "labels"
     hist = hist.reset_index()
     hist, top, top_code = handle_top(hist, self.top)
     f = grid_formatter(grid_columns(hist), nan_display=None)
     return_data = f.format_lists(hist)
     return_data["top"] = top
     return return_data, self._build_code(parent, top_code)
Esempio n. 10
0
 def check(self, df):
     group = self.cfg.get("group")
     duplicates = df[group].reset_index().groupby(group).count()
     duplicates = duplicates.iloc[:, 0]
     duplicates = duplicates[duplicates > 1]
     duplicate_counts = duplicates.values
     duplicates = duplicates.reset_index()[group]
     duplicates = grid_formatter(grid_columns(duplicates),
                                 as_string=True).format_lists(duplicates)
     check_data = {
         ", ".join([duplicates[col][i] for col in group]):
         dict(count=int(ct), filter=[duplicates[col][i] for col in group])
         for i, ct in enumerate(duplicate_counts)
     }
     return check_data
Esempio n. 11
0
def build_formatters(df):
    """
    Helper around :meth:`dtale.utils.grid_formatters` that will build a formatter for the data being fed into a chart as
    well as a formatter for the min/max values for each column used in the chart data.

    :param df: dataframe which contains column names and data types for formatters
    :type df: :class:`pandas:pandas.DataFrame`
    :return: json formatters for chart data and min/max values for each column used in the chart
    :rtype: (:class:`dtale.utils.JSONFormatter`, :class:`dtale.utils.JSONFormatter`)
    """
    cols = grid_columns(df)
    data_f = grid_formatter(cols, nan_display=None)
    overrides = {"F": lambda f, i, c: f.add_float(i, c, precision=2)}
    range_f = grid_formatter(cols, overrides=overrides, nan_display=None)
    return data_f, range_f
Esempio n. 12
0
def load_describe(column_series, additional_aggs=None):
    """
    Helper function for grabbing the output from :meth:`pandas:pandas.Series.describe` in a JSON serializable format

    :param column_series: data to describe
    :type column_series: :class:`pandas:pandas.Series`
    :return: JSON serializable dictionary of the output from calling :meth:`pandas:pandas.Series.describe`
    """
    desc = column_series.describe().to_frame().T
    code = [
        "# main statistics",
        "stats = df['{col}'].describe().to_frame().T".format(col=column_series.name),
    ]
    if additional_aggs:
        for agg in additional_aggs:
            if agg == "mode":
                mode = column_series.mode().values
                desc["mode"] = np.nan if len(mode) > 1 else mode[0]
                code.append(
                    (
                        "# mode\n"
                        "mode = df['{col}'].mode().values\n"
                        "stats['mode'] = np.nan if len(mode) > 1 else mode[0]"
                    ).format(col=column_series.name)
                )
                continue
            desc[agg] = getattr(column_series, agg)()
            code.append(
                "# {agg}\nstats['{agg}'] = df['{col}'].{agg}()".format(
                    col=column_series.name, agg=agg
                )
            )
    desc_f_overrides = {
        "I": lambda f, i, c: f.add_int(i, c, as_string=True),
        "F": lambda f, i, c: f.add_float(i, c, precision=4, as_string=True),
    }
    desc_f = grid_formatter(
        grid_columns(desc), nan_display="nan", overrides=desc_f_overrides
    )
    desc = desc_f.format_dict(next(desc.itertuples(), None))
    if "count" in desc:
        # pandas always returns 'count' as a float and it adds useless decimal points
        desc["count"] = desc["count"].split(".")[0]
    desc["total_count"] = json_int(len(column_series), as_string=True)
    missing_ct = column_series.isnull().sum()
    desc["missing_pct"] = json_float((missing_ct / len(column_series) * 100).round(2))
    desc["missing_ct"] = json_int(missing_ct, as_string=True)
    return desc, code
Esempio n. 13
0
def get_correlations():
    """
    Flask route which gathers Pearson correlations against all combinations of columns with numeric data
    using :meth:`pandas:pandas.DataFrame.corr`

    On large datasets with no :attr:`numpy:numpy.nan` data this code will use :meth:`numpy:numpy.corrcoef`
    for speed purposes

    :param query: string from flask.request.args['query'] which is applied to DATA using the query() function
    :returns: JSON {
        data: [{column: col1, col1: 1.0, col2: 0.99, colN: 0.45},...,{column: colN, col1: 0.34, col2: 0.88, colN: 1.0}],
    } or {error: 'Exception message', traceback: 'Exception stacktrace'}
    """
    try:
        query = get_str_arg(request, 'query')
        port = get_port()
        data = DATA[port]
        data = data.query(query) if query is not None else data

        valid_corr_cols = []
        valid_date_cols = []
        for col_info in DTYPES[port]:
            name, dtype = map(col_info.get, ['name', 'dtype'])
            dtype = classify_type(dtype)
            if dtype in ['I', 'F']:
                valid_corr_cols.append(name)
            elif dtype == 'D' and len(data[name].dropna().unique()) > 1:
                valid_date_cols.append(name)

        if data[valid_corr_cols].isnull().values.any():
            data = data.corr(method='pearson')
        else:
            # using pandas.corr proved to be quite slow on large datasets so I moved to numpy:
            # https://stackoverflow.com/questions/48270953/pandas-corr-and-corrwith-very-slow
            data = np.corrcoef(data[valid_corr_cols].values, rowvar=False)
            data = pd.DataFrame(data,
                                columns=valid_corr_cols,
                                index=valid_corr_cols)

        data.index.name = str('column')
        data = data.reset_index()
        col_types = grid_columns(data)
        f = grid_formatter(col_types, nan_display=None)
        return jsonify(data=f.format_dicts(data.itertuples()),
                       dates=valid_date_cols)
    except BaseException as e:
        return jsonify(
            dict(error=str(e), traceback=str(traceback.format_exc())))
Esempio n. 14
0
def load_describe(column_series):
    """
    Helper function for grabbing the output from :meth:`pandas:pandas.Series.describe` in a JSON serializable format

    :param column_series: data to describe
    :type column_series: :class:`pandas:pandas.Series`
    :return: JSON serializable dictionary of the output from calling :meth:`pandas:pandas.Series.describe`
    """
    desc = column_series.describe().to_frame().T
    desc_f_overrides = {
        'I': lambda f, i, c: f.add_int(i, c, as_string=True),
        'F': lambda f, i, c: f.add_float(i, c, precision=4, as_string=True),
    }
    desc_f = grid_formatter(grid_columns(desc),
                            nan_display='N/A',
                            overrides=desc_f_overrides)
    desc = desc_f.format_dict(next(desc.itertuples(), None))
    if 'count' in desc:
        # pandas always returns 'count' as a float and it adds useless decimal points
        desc['count'] = desc['count'].split('.')[0]
    return desc
Esempio n. 15
0
def get_correlations():
    """
    Flask route which gathers Pearson correlations against all combinations of columns with numeric data
    using pandas.DataFrame.corr

    :param query: string from flask.request.args['query'] which is applied to DATA using the query() function
    :returns: JSON {
        data: [{column: col1, col1: 1.0, col2: 0.99, colN: 0.45},...,{column: colN, col1: 0.34, col2: 0.88, colN: 1.0}],
    } or {error: 'Exception message', traceback: 'Exception stacktrace'}
    """
    try:
        query = get_str_arg(request, 'query')
        data = DATA.query(query) if query is not None else DATA
        data = data.corr(method='pearson')
        data.index.name = 'column'
        data = data.reset_index()
        col_types = grid_columns(data)
        f = grid_formatter(col_types, nan_display=None)
        return jsonify(data=f.format_dicts(data.itertuples()))
    except BaseException as e:
        return jsonify(
            dict(error=str(e), traceback=str(traceback.format_exc())))
Esempio n. 16
0
def _build_timeseries_chart_data(name,
                                 df,
                                 cols,
                                 min=None,
                                 max=None,
                                 sub_group=None):
    """
    Helper function for grabbing JSON serialized data for one or many date groupings

    :param name: base name of series in chart
    :param df: data frame to be grouped
    :param cols: columns whose data is to be returned
    :param min: optional hardcoded minimum to be returned for all series
    :param max: optional hardcoded maximum to be returned for all series
    :param sub_group: optional sub group to be used in addition to date
    :return: generator of string keys and JSON serialized dictionaries
    """
    base_cols = ['date']
    if sub_group in df:
        dfs = df.groupby(sub_group)
        base_cols.append(sub_group)
    else:
        dfs = [('', df)]

    for sub_group_val, grp in dfs:
        for col in cols:
            key = '{0}:{1}:{2}'.format(
                sub_group_val if isinstance(sub_group_val, string_types) else
                '{0:.0f}'.format(sub_group_val), name, col)
            data = grp[base_cols + [col]].dropna(subset=[col])
            f = grid_formatter(grid_columns(data),
                               overrides={
                                   'D': lambda f, i, c: f.add_timestamp(i, c)
                               })
            data = f.format_dicts(data.itertuples())
            data = dict(data=data,
                        min=min or grp[col].min(),
                        max=max or grp[col].max())
            yield key, data
Esempio n. 17
0
 def build_formatters(df):
     cols = grid_columns(df)
     data_f = grid_formatter(cols, nan_display=None)
     overrides = {'F': lambda f, i, c: f.add_float(i, c, precision=2)}
     range_f = grid_formatter(cols, overrides=overrides, nan_display=None)
     return data_f, range_f
Esempio n. 18
0
def find_coverage():
    """
    Flask route which returns coverage information(counts) for a column grouped by other column(s)

    :param query: string from flask.request.args['query'] which is applied to DATA using the query() function
    :param col: string from flask.request.args['col'] containing name of a column in your dataframe
    :param filters(deprecated): JSON string from flaks.request.args['filters'] with filtering information from group
           drilldown [
        {name: col1, prevFreq: Y, freq: Q, date: YYYY-MM-DD},
        ...
        {name: col1, prevFreq: D, freq: W, date: YYYY-MM-DD},
    ]
    :param group: JSON string from flask.request.args['group'] containing grouping logic in this structure [
        {name: col1} or {name: date_col1, freq: [D,W,M,Q,Y]}
    ]
    :returns: JSON {
        data: {
            [col]: [count1,count2,...,countN],
            labels: [{group_col1: gc1_v1, group_col2: gc2_v1},...,{group_col1: gc1_vN, group_col2: gc2_vN}],
            success: True
    } or {error: 'Exception message', traceback: 'Exception stacktrace', success: False}
    """
    def filter_data(df, req, groups, query=None):
        filters = get_str_arg(req, 'filters')
        if not filters:
            return df.query(query or 'index == index'), groups, ''
        filters = json.loads(filters)
        col, prev_freq, freq, end = map(filters[-1].get,
                                        ['name', 'prevFreq', 'freq', 'date'])
        start = DATE_RANGES[prev_freq](pd.Timestamp(end)).strftime('%Y%m%d')
        range_query = "{col} >= '{start}' and {col} <= '{end}'".format(
            col=col, start=start, end=end)
        logger.info('filtered coverage data to slice: {}'.format(range_query))
        updated_groups = [
            dict(name=col, freq=freq) if g['name'] == col else g
            for g in groups
        ]
        return df.query(
            query or
            'index == index').query(range_query), updated_groups, range_query

    try:
        col = get_str_arg(request, 'col')
        groups = get_str_arg(request, 'group')
        if groups:
            groups = json.loads(groups)
        data = DATA[get_port()]
        data, groups, query = filter_data(data,
                                          request,
                                          groups,
                                          query=get_str_arg(request, 'query'))
        grouper = []
        for g_cfg in groups:
            if 'freq' in g_cfg:
                freq_grp = data.set_index([g_cfg['name']]).index.to_period(
                    g_cfg['freq']).to_timestamp(how='end')
                freq_grp.name = g_cfg['name']
                grouper.append(freq_grp)
            else:
                grouper.append(data[g_cfg['name']])

        data_groups = data.groupby(grouper)
        group_data = data_groups[col].count()
        if len(groups) > 1:
            unstack_order = enumerate(
                zip(group_data.index.names, group_data.index.levels))
            unstack_order = sorted([(uo[0], uo[1][0], len(uo[1][1]))
                                    for uo in unstack_order],
                                   key=lambda k: k[2])
            for i, n, l in unstack_order[:-1]:
                group_data = group_data.unstack(i)
            group_data = group_data.fillna(0)
            if len(unstack_order[:-1]) > 1:
                group_data.columns = [
                    ', '.join([
                        str(group_data.columns.levels[c2[0]][c2[1]])
                        for c2 in enumerate(c)
                    ]) for c in zip(*group_data.columns.labels)
                ]
            else:
                group_data.columns = map(str, group_data.columns.values)

        if len(group_data) > 15000:
            return jsonify(
                dict(error=(
                    'Your grouping created {} groups, chart will not render. '
                    'Try making date columns a higher frequency (W, M, Q, Y)'
                ).format(len(data_groups))))
        if len(groups) == 1:
            data = {col: [json_int(v) for v in group_data.values]}
        else:
            data = dict([(c, [json_int(v) for v in group_data[c].values])
                         for c in group_data.columns])
        labels = pd.DataFrame(group_data.index.values,
                              columns=group_data.index.names)
        labels_f_overrides = {
            'D': lambda f, i, c: f.add_date(i, c, fmt='%Y-%m-%d'),
        }
        labels_f = grid_formatter(grid_columns(labels),
                                  overrides=labels_f_overrides)
        labels = labels_f.format_dicts(labels.itertuples())
        return jsonify(data=data, labels=labels, success=True)
    except BaseException as e:
        return jsonify(
            dict(error=str(e), traceback=str(traceback.format_exc())))
Esempio n. 19
0
def get_data():
    """
    Flask route which returns current rows from DATA (based on scrollbar specs and saved settings) to front-end as
    JSON

    :param ids: required dash separated string "START-END" stating a range of row indexes to be returned to the screen
    :param query: string from flask.request.args['query'] which is applied to DATA using the query() function
    :param sort: JSON string from flask.request.args['sort'] which is applied to DATA using the sort_values() or
                 sort_index() function.  Here is the JSON structure: [col1,dir1],[col2,dir2],....[coln,dirn]
    :param port: number string from flask.request.environ['SERVER_PORT'] for retrieving saved settings
    :return: JSON {
        results: [
            {dtale_index: 1, col1: val1_1, ...,colN: valN_1},
            ...,
            {dtale_index: N2, col1: val1_N2, ...,colN: valN_N2}.
        ],
        columns: [{name: col1, dtype: 'int64'},...,{name: colN, dtype: 'datetime'}],
        total: N2,
        success: True/False
    }
    """
    try:
        global SETTINGS, DATA

        params = retrieve_grid_params(request)
        ids = get_str_arg(request, 'ids')
        if ids:
            ids = json.loads(ids)
        else:
            return jsonify({})
        col_types = grid_columns(DATA)

        f = grid_formatter(col_types)
        curr_settings = SETTINGS.get(
            request.environ.get('SERVER_PORT', 'curr'), {})
        if curr_settings.get('sort') != params.get('sort'):
            DATA = sort_df_for_grid(DATA, params)
        df = DATA
        if params.get('sort') is not None:
            curr_settings = dict_merge(curr_settings,
                                       dict(sort=params['sort']))
        else:
            curr_settings = {
                k: v
                for k, v in curr_settings.items() if k != 'sort'
            }
        df = filter_df_for_grid(df, params)
        if params.get('query') is not None:
            curr_settings = dict_merge(curr_settings,
                                       dict(query=params['query']))
        else:
            curr_settings = {
                k: v
                for k, v in curr_settings.items() if k != 'query'
            }
        SETTINGS[request.environ.get('SERVER_PORT', 'curr')] = curr_settings

        total = len(df)
        results = {}
        for sub_range in ids:
            sub_range = list(map(int, sub_range.split('-')))
            if len(sub_range) == 1:
                sub_df = df.iloc[sub_range[0]:sub_range[0] + 1]
                sub_df = f.format_dicts(sub_df.itertuples())
                results[sub_range[0]] = dict_merge(
                    dict(dtale_index=sub_range[0]), sub_df[0])
            else:
                [start, end] = sub_range
                sub_df = df.iloc[start:] if end >= len(
                    df) - 1 else df.iloc[start:end + 1]
                sub_df = f.format_dicts(sub_df.itertuples())
                for i, d in zip(range(start, end + 1), sub_df):
                    results[i] = dict_merge(dict(dtale_index=i), d)
        return_data = dict(results=results,
                           columns=[dict(name='dtale_index', dtype='int64')] +
                           col_types,
                           total=total)
        return jsonify(return_data)
    except BaseException as e:
        return jsonify(
            dict(error=str(e), traceback=str(traceback.format_exc())))