def _VSCODE_getDataFrameRows(df, start, end): df = _VSCODE_convertToDataFrame(df) # Turn into JSON using pandas. We use pandas because it's about 3 orders of magnitude faster to turn into JSON rows = df.iloc[start:end] return _VSCODE_pd_json.to_json(None, rows, orient="table", date_format="iso")
def to_json(self, path_or_buf=None, orient=None, date_format='epoch', double_precision=10, force_ascii=True, date_unit='ms'): """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : the path or buffer to write the result string if this is None, return a StringIO of the converted string orient : string * Series - default is 'index' - allowed values are: {'split','records','index'} * DataFrame - default is 'columns' - allowed values are: {'split','records','index','columns','values'} * The format of the JSON string - split : dict like {index -> [index], columns -> [columns], data -> [values]} - records : list like [{column -> value}, ... , {column -> value}] - index : dict like {index -> {column -> value}} - columns : dict like {column -> {index -> value}} - values : just the values array date_format : string, default 'epoch' type of date conversion, 'epoch' for timestamp, 'iso' for ISO8601 double_precision : The number of decimal places to use when encoding floating point values, default 10. force_ascii : force encoded string to be ASCII, default True. date_unit : string, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. Returns ------- result : a JSON compatible string written to the path_or_buf; if the path_or_buf is none, return a StringIO of the result """ from pandas.io import json return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit)
def to_json(self, path_or_buf=None, orient=None, date_format='epoch', double_precision=10, force_ascii=True): """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : the path or buffer to write the result string if this is None, return a StringIO of the converted string orient : string * Series - default is 'index' - allowed values are: {'split','records','index'} * DataFrame - default is 'columns' - allowed values are: {'split','records','index','columns','values'} * The format of the JSON string - split : dict like {index -> [index], columns -> [columns], data -> [values]} - records : list like [{column -> value}, ... , {column -> value}] - index : dict like {index -> {column -> value}} - columns : dict like {column -> {index -> value}} - values : just the values array date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601) default is epoch double_precision : The number of decimal places to use when encoding floating point values, default 10. force_ascii : force encoded string to be ASCII, default True. Returns ------- result : a JSON compatible string written to the path_or_buf; if the path_or_buf is none, return a StringIO of the result """ from pandas.io import json return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii)
def _VSCODE_getDataFrameRows(df, start, end): df = _VSCODE_convertToDataFrame(df, start, end) # Turn into JSON using pandas. We use pandas because it's about 3 orders of magnitude faster to turn into JSON try: df = df.replace({ _VSCODE_np.inf: "inf", -_VSCODE_np.inf: "-inf", _VSCODE_np.nan: "nan", }) except: pass return _VSCODE_pd_json.to_json(None, df, orient="split", date_format="iso")
def _VSCODE_getDataFrameInfo(df): df = _VSCODE_convertToDataFrame(df) rowCount = _VSCODE_getRowCount(df) # If any rows, use pandas json to convert a single row to json. Extract # the column names and types from the json so we match what we'll fetch when # we ask for all of the rows if rowCount: try: row = df.iloc[0:1] json_row = _VSCODE_pd_json.to_json(None, row, date_format="iso") columnNames = list(_VSCODE_json.loads(json_row)) except: columnNames = list(df) else: columnNames = list(df) columnTypes = _VSCODE_builtins.list(df.dtypes) # Compute the index column. It may have been renamed try: indexColumn = df.index.name if df.index.name else "index" except AttributeError: indexColumn = "index" # Make sure the index column exists if indexColumn not in columnNames: columnNames.insert(0, indexColumn) columnTypes.insert(0, "int64") # Then loop and generate our output json columns = [] for n in _VSCODE_builtins.range(0, _VSCODE_builtins.len(columnNames)): column_type = columnTypes[n] column_name = str(columnNames[n]) colobj = {} colobj["key"] = column_name colobj["name"] = column_name colobj["type"] = str(column_type) columns.append(colobj) # Save this in our target target = {} target["columns"] = columns target["indexColumn"] = indexColumn target["rowCount"] = rowCount # return our json object as a string return _VSCODE_json.dumps(target)
def to_json(self, path_or_buf=None, orient=None, date_format='epoch', double_precision=10, force_ascii=True): """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : the path or buffer to write the result string if this is None, return a StringIO of the converted string orient : Series : default is 'index' allowed values are: {'split','records','index'} DataFrame : default is 'columns' allowed values are: {'split','records','index','columns','values'} The format of the JSON string split : dict like {index -> [index], columns -> [columns], data -> [values]} records : list like [{column -> value}, ... , {column -> value}] index : dict like {index -> {column -> value}} columns : dict like {column -> {index -> value}} values : just the values array date_format : type of date conversion (epoch = epoch milliseconds, iso = ISO8601), default is epoch double_precision : The number of decimal places to use when encoding floating point values, default 10. force_ascii : force encoded string to be ASCII, default True. Returns ------- result : a JSON compatible string written to the path_or_buf; if the path_or_buf is none, return a StringIO of the result """ from pandas.io import json return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii)
def _update_table(self, update_columns=False, triggered_by=None, scroll_to_row=None, fire_data_change_event=True): df = self._df.copy() from_index = max(self._viewport_range[0] - PAGE_SIZE, 0) to_index = max(self._viewport_range[0] + PAGE_SIZE, 0) new_df_range = (from_index, to_index) if triggered_by is 'viewport_changed' and \ self._df_range == new_df_range: return self._df_range = new_df_range df = df.iloc[from_index:to_index] self._row_count = len(self._df.index) if type(df.index) == pd.core.index.MultiIndex: self._multi_index = True else: self._multi_index = False if update_columns: self._string_columns = list( df.select_dtypes( include=[np.dtype('O'), 'category']).columns.values) # call map(str) for all columns identified as string columns, in # case any are not strings already for col_name in self._string_columns: sort_column_name = self._sort_helper_columns.get(col_name) if sort_column_name: series_to_set = df[sort_column_name] else: series_to_set = self._get_col_series_from_df(col_name, df).map(str) self._set_col_series_on_df(col_name, df, series_to_set) df_json = pd_json.to_json(None, df, orient='table', date_format='iso', double_precision=self.precision) if update_columns: self._interval_columns = [] self._sort_helper_columns = {} self._period_columns = [] # parse the schema that we just exported in order to get the # column metadata that was generated by 'to_json' parsed_json = json.loads(df_json) df_schema = parsed_json['schema'] if ('primaryKey' in df_schema): self._primary_key = df_schema['primaryKey'] else: # for some reason, 'primaryKey' isn't set when the index is # a single interval column. that's why this case is here. self._primary_key = [df.index.name] columns = {} for i, cur_column in enumerate(df_schema['fields']): col_name = cur_column['name'] if 'constraints' in cur_column and \ isinstance(cur_column['constraints']['enum'][0], dict): cur_column['type'] = 'interval' self._interval_columns.append(col_name) if 'freq' in cur_column: self._period_columns.append(col_name) if col_name in self._primary_key: cur_column['is_index'] = True cur_column['position'] = i columns[col_name] = cur_column self._columns = columns # special handling for interval columns: convert to a string column # and then call 'to_json' again to get a new version of the table # json that has interval columns replaced with text columns if len(self._interval_columns) > 0: for col_name in self._interval_columns: col_series = self._get_col_series_from_df(col_name, df) col_series_as_strings = col_series.map(lambda x: str(x)) self._set_col_series_on_df(col_name, df, col_series_as_strings) # special handling for period index columns: call to_timestamp to # convert the series to a datetime series before displaying if len(self._period_columns) > 0: for col_name in self._period_columns: sort_column_name = self._sort_helper_columns.get(col_name) if sort_column_name: series_to_set = df[sort_column_name] else: series_to_set = self._get_col_series_from_df( col_name, df).to_timestamp() self._set_col_series_on_df(col_name, df, series_to_set) # and then call 'to_json' again to get a new version of the table # json that has interval columns replaced with text columns if len(self._interval_columns) > 0 or len(self._period_columns) > 0: df_json = pd_json.to_json(None, df, orient='table', date_format='iso', double_precision=self.precision) self._df_json = df_json if fire_data_change_event: data_to_send = { 'type': 'update_data_view', 'columns': self._columns, 'triggered_by': triggered_by } if scroll_to_row: data_to_send['scroll_to_row'] = scroll_to_row self.send(data_to_send)
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult) elif _VSCODE_targetVariable["type"] == "ndarray": _VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult) elif hasattr(_VSCODE_df, "toPandas"): _VSCODE_df = _VSCODE_df.toPandas() _VSCODE_targetVariable["rowCount"] = _VSCODE_getRowCount(_VSCODE_df) # If any rows, use pandas json to convert a single row to json. Extract # the column names and types from the json so we match what we'll fetch when # we ask for all of the rows if (hasattr(_VSCODE_targetVariable, "rowCount") and _VSCODE_targetVariable["rowCount"]): try: _VSCODE_row = _VSCODE_df.iloc[0:1] _VSCODE_json_row = _VSCODE_pd_json.to_json(None, _VSCODE_row, date_format="iso") _VSCODE_columnNames = list(_VSCODE_json.loads(_VSCODE_json_row)) del _VSCODE_row del _VSCODE_json_row except: _VSCODE_columnNames = list(_VSCODE_df) else: _VSCODE_columnNames = list(_VSCODE_df) # Compute the index column. It may have been renamed _VSCODE_indexColumn = _VSCODE_df.index.name if _VSCODE_df.index.name else "index" _VSCODE_columnTypes = _VSCODE_builtins.list(_VSCODE_df.dtypes) del _VSCODE_df # Make sure the index column exists
_VSCODE_df = _VSCODE_evalResult if isinstance(_VSCODE_evalResult, list): _VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult) elif isinstance(_VSCODE_evalResult, _VSCODE_pd.Series): _VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult) elif isinstance(_VSCODE_evalResult, dict): _VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult) _VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult) elif _VSCODE_targetVariable['type'] == 'ndarray': _VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult) # If not a known type, then just let pandas handle it. elif not (hasattr(_VSCODE_df, 'iloc')): _VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult) # Turn into JSON using pandas. We use pandas because it's about 3 orders of magnitude faster to turn into JSON _VSCODE_rows = _VSCODE_df.iloc[_VSCODE_startRow:_VSCODE_endRow] _VSCODE_result = _VSCODE_pd_json.to_json(None, _VSCODE_rows, orient='table', date_format='iso') print(_VSCODE_result) # Cleanup our variables del _VSCODE_df del _VSCODE_endRow del _VSCODE_startRow del _VSCODE_rows del _VSCODE_result del _VSCODE_json del _VSCODE_pd del _VSCODE_pd_json
elif isinstance(_VSCODE_evalResult, _VSCODE_pd.Series): _VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult) elif isinstance(_VSCODE_evalResult, dict): _VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult) _VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult) elif _VSCODE_targetVariable["type"] == "ndarray": _VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult) elif hasattr(_VSCODE_df, "toPandas"): _VSCODE_df = _VSCODE_df.toPandas() # If not a known type, then just let pandas handle it. elif not (hasattr(_VSCODE_df, "iloc")): _VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult) # Turn into JSON using pandas. We use pandas because it's about 3 orders of magnitude faster to turn into JSON _VSCODE_rows = _VSCODE_df.iloc[_VSCODE_startRow:_VSCODE_endRow] _VSCODE_result = _VSCODE_pd_json.to_json(None, _VSCODE_rows, orient="table", date_format="iso") print(_VSCODE_result) # Cleanup our variables del _VSCODE_df del _VSCODE_endRow del _VSCODE_startRow del _VSCODE_rows del _VSCODE_result del _VSCODE_json del _VSCODE_pd del _VSCODE_pd_json