def test__stream_good_dict_of_index_and_series_data_transformed(self): df = pd.DataFrame( index=pd.date_range('now', periods=30, freq='T'), columns=['A'], data=np.cumsum(np.random.standard_normal(30), axis=0) ) ds = ColumnDataSource(data={'index': convert_datetime_array(df.index.values), 'A': df.A}) ds._document = "doc" stuff = {} mock_setter = object() def mock(*args, **kw): stuff['args'] = args stuff['kw'] = kw ds.data._stream = mock new_df = pd.DataFrame( index=df.index + pd.to_timedelta('30m'), columns=df.columns, data=np.random.standard_normal(30) ) ds._stream({'index': new_df.index, 'A': new_df.A}, "foo", mock_setter) self.assertTrue(np.array_equal(stuff['args'][2]['index'], convert_datetime_array(new_df.index.values))) self.assertTrue(np.array_equal(stuff['args'][2]['A'], new_df.A.values))
def test_remove_exists2(self): with warnings.catch_warnings(record=True) as w: ds = ColumnDataSource() ds.remove("foo") assert ds.column_names == [] assert len(w) == 1 assert w[0].category == UserWarning assert str(w[0].message) == "Unable to find column 'foo' in data source"
def test_patch_bad_columns(self): ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21])) with self.assertRaises(ValueError) as cm: ds.patch(dict(c=[(0, 100)])) self.assertEqual(str(cm.exception), "Can only patch existing columns (extra: c)") with self.assertRaises(ValueError) as cm: ds.patch(dict(a=[(0,100)], c=[(0, 100)], d=[(0, 100)])) self.assertEqual(str(cm.exception), "Can only patch existing columns (extra: c, d)")
def test_init_propertyvaluecolumndata_copy(self): data = dict(a=[1], b=[2]) cd = ColumnDataSource(data).data ds = ColumnDataSource(data=cd) assert ds.data == cd assert id(ds.data) != id(cd) ds.data['a'][0] = 2 assert cd['a'][0] == 2
def test_remove_exists2(self): with warnings.catch_warnings(record=True) as w: ds = ColumnDataSource() ds.remove("foo") self.assertEquals(ds.column_names, []) self.assertEquals(len(w), 1) self.assertEquals(w[0].category, UserWarning) self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
def test_stream_df_to_ds_created_from_df_default_index(self): data = pd.DataFrame(dict(a=[10], b=[20], c=[30])) ds = ColumnDataSource(data) ds._document = "doc" notify_owners_stuff = {} def notify_owners_mock(*args, **kw): notify_owners_stuff['args'] = args notify_owners_stuff['kw'] = kw ds.data._notify_owners = notify_owners_mock stream_stuff = {} data_stream = ds.data._stream def stream_wrapper(*args, **kwargs): stream_stuff['args'] = args stream_stuff['kwargs'] = kwargs data_stream(*args, **kwargs) ds.data._stream = stream_wrapper ds._stream(pd.DataFrame(dict(a=[11, 12], b=[21, 22], c=[31, 32])), 7) self.assertEqual(len(stream_stuff['args']), 5) expected_df = pd.DataFrame(dict(a=np.array([11, 12]), b=np.array([21, 22]), c=np.array([31, 32]))) expected_stream_data = expected_df.to_dict('series') expected_stream_data['index'] = expected_df.index.values expected_args = ("doc", ds, expected_stream_data, 7, None) for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'], expected_args)): if i == 2: for k, v in arg.items(): self.assertTrue(np.array_equal(v, ex_arg[k])) else: self.assertEqual(stream_stuff['args'][i], expected_args[i]) self.assertEqual(stream_stuff['kwargs'], {}) self.assertEqual(len(notify_owners_stuff['args']), 1) self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0], dict(a=np.array([10]), b=np.array([20]), c=np.array([30]), index=np.array([0]))) self._assert_equal_dicts_of_arrays(dict(ds.data), dict(a=np.array([10, 11, 12]), b=np.array([20, 21, 22]), c=np.array([30, 31, 32]), index=np.array([0, 0, 1])))
def test_patch_good_data(self): ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21])) ds._document = "doc" stuff = {} def mock(*args, **kw): stuff['args'] = args stuff['kw'] = kw ds.data._patch = mock ds.patch(dict(a=[(0,100), (1,101)], b=[(0,200)])) self.assertEqual(stuff['args'], ("doc", ds, dict(a=[(0,100), (1,101)], b=[(0,200)]))) self.assertEqual(stuff['kw'], {})
def test_stream_good_data(self): ds = ColumnDataSource(data=dict(a=[10], b=[20])) ds._document = "doc" stuff = {} def mock(*args, **kw): stuff['args'] = args stuff['kw'] = kw ds.data._stream = mock ds.stream(dict(a=[11, 12], b=[21, 22]), "foo") self.assertEqual(stuff['args'], ("doc", ds, dict(a=[11, 12], b=[21, 22]), "foo")) self.assertEqual(stuff['kw'], {})
def test_patch_good_simple_indices(self): ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21])) ds._document = "doc" stuff = {} mock_setter = object() def mock(*args, **kw): stuff['args'] = args stuff['kw'] = kw ds.data._patch = mock ds.patch(dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter) assert stuff['args'] == ("doc", ds, dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter) assert stuff['kw'] == {}
def test_patch_good_slice_indices(self): ds = ColumnDataSource(data=dict(a=[10, 11, 12, 13, 14, 15], b=[20, 21, 22, 23, 24, 25])) ds._document = "doc" stuff = {} mock_setter = object() def mock(*args, **kw): stuff['args'] = args stuff['kw'] = kw ds.data._patch = mock ds.patch(dict(a=[(slice(2), [100, 101]), (slice(3, 5), [100, 101])], b=[(slice(0, None, 2), [100, 101, 102])]), mock_setter) assert stuff['args'] == ("doc", ds, dict(a=[(slice(2), [100, 101]), (slice(3, 5), [100, 101])], b=[(slice(0, None, 2), [100, 101, 102])]), mock_setter) assert stuff['kw'] == {}
def test_stream_df_to_ds_created_from_df_named_index(self, pd): data = pd.DataFrame(dict(a=[10], b=[20], c=[30])).set_index('c') ds = ColumnDataSource(data) ds._document = "doc" notify_owners_stuff = {} def notify_owners_mock(*args, **kw): notify_owners_stuff['args'] = args notify_owners_stuff['kw'] = kw ds.data._notify_owners = notify_owners_mock stream_stuff = {} data_stream = ds.data._stream def stream_wrapper(*args, **kwargs): stream_stuff['args'] = args stream_stuff['kwargs'] = kwargs data_stream(*args, **kwargs) ds.data._stream = stream_wrapper ds._stream(pd.DataFrame(dict(a=[11, 12], b=[21, 22], c=[31, 32])).set_index('c'), 7) assert len(stream_stuff['args']) == 5 expected_steam_data = dict(a=np.array([11, 12]), b=np.array([21, 22]), c=np.array([31, 32])) expected_args = ("doc", ds, expected_steam_data, 7, None) for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'], expected_args)): if i == 2: assert arg.keys() == ex_arg.keys() for k, v in arg.items(): assert np.array_equal(v, ex_arg[k]) else: assert stream_stuff['args'][i] == expected_args[i] assert stream_stuff['kwargs'] == {} assert len(notify_owners_stuff['args']) == 1 self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0], dict(a=np.array([10]), b=np.array([20]), c=np.array([30]))) self._assert_equal_dicts_of_arrays(dict(ds.data), dict(a=np.array([10, 11, 12]), b=np.array([20, 21, 22]), c=np.array([30, 31, 32])))
def test_stream_series_to_ds_created_from_df(self, pd): data = pd.DataFrame(dict(a=[10], b=[20], c=[30])) ds = ColumnDataSource(data) ds._document = "doc" notify_owners_stuff = {} def notify_owners_mock(*args, **kw): notify_owners_stuff['args'] = args notify_owners_stuff['kw'] = kw ds.data._notify_owners = notify_owners_mock stream_stuff = {} data_stream = ds.data._stream def stream_wrapper(*args, **kwargs): stream_stuff['args'] = args stream_stuff['kwargs'] = kwargs data_stream(*args, **kwargs) ds.data._stream = stream_wrapper ds._stream(pd.Series([11, 21, 31], index=list('abc')), 7) assert len(stream_stuff['args']) == 5 expected_df = pd.DataFrame(dict(a=np.array([11]), b=np.array([21]), c=np.array([31]))) expected_stream_data = expected_df.to_dict('series') expected_stream_data['index'] = expected_df.index.values expected_args = ("doc", ds, expected_stream_data, 7, None) for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'], expected_args)): if i == 2: self._assert_equal_dicts_of_arrays(arg, ex_arg) else: assert arg == ex_arg assert stream_stuff['kwargs'] == {} assert len(notify_owners_stuff['args']) == 1 self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0], dict(a=np.array([10]), b=np.array([20]), c=np.array([30]), index=np.array([0]))) self._assert_equal_dicts_of_arrays(dict(ds.data), dict(a=np.array([10, 11]), b=np.array([20, 21]), c=np.array([30, 31]), index=np.array([0, 0])))
def test_stream_good_data(self): ds = ColumnDataSource(data=dict(a=[10], b=[20])) ds._document = "doc" stuff = {} def mock(*args, **kw): stuff['args'] = args stuff['kw'] = kw ds.data._stream = mock # public implementation of stream ds._stream(dict(a=[11, 12], b=[21, 22]), "foo") assert stuff['args'] == ("doc", ds, dict(a=[11, 12], b=[21, 22]), "foo", None) assert stuff['kw'] == {}
def test_stream_dict_to_ds_created_from_df(self): data = pd.DataFrame(dict(a=[10], b=[20], c=[30])).set_index('c') ds = ColumnDataSource(data) ds._document = "doc" notify_owners_stuff = {} def notify_owners_mock(*args, **kw): notify_owners_stuff['args'] = args notify_owners_stuff['kw'] = kw ds.data._notify_owners = notify_owners_mock stream_stuff = {} data_stream = ds.data._stream def stream_wrapper(*args, **kwargs): stream_stuff['args'] = args stream_stuff['kwargs'] = kwargs data_stream(*args, **kwargs) ds.data._stream = stream_wrapper ds._stream(dict(a=[11, 12], b=np.array([21, 22]), c=pd.Series([31, 32])), 7) self.assertEqual(len(stream_stuff['args']), 5) expected_stream_args = ("doc", ds, dict(a=[11, 12], b=np.array([21, 22]), c=pd.Series([31, 32])), 7, None) for i, (arg, ex_arg) in enumerate(zip(stream_stuff['args'], expected_stream_args)): if i == 2: self.assertEqual(arg['a'], ex_arg['a']) del arg['a'], ex_arg['a'] self._assert_equal_dicts_of_arrays(arg, ex_arg) else: self.assertEqual(arg, ex_arg) self.assertEqual(stream_stuff['kwargs'], {}) self.assertEqual(len(notify_owners_stuff['args']), 1) self._assert_equal_dicts_of_arrays(notify_owners_stuff['args'][0], dict(a=np.array([10]), b=np.array([20]), c=np.array([30]))) self._assert_equal_dicts_of_arrays(dict(ds.data), dict(a=np.array([10, 11, 12]), b=np.array([20, 21, 22]), c=np.array([30, 31, 32])))
def test__stream_good_data(self): ds = ColumnDataSource(data=dict(a=[10], b=[20])) ds._document = "doc" stuff = {} mock_setter = object() def mock(*args, **kw): stuff['args'] = args stuff['kw'] = kw ds.data._stream = mock # internal implementation of stream ds._stream(dict(a=[11, 12], b=[21, 22]), "foo", mock_setter) self.assertEqual(stuff['args'], ("doc", ds, dict(a=[11, 12], b=[21, 22]), "foo", mock_setter)) self.assertEqual(stuff['kw'], {})
def test__stream_good_datetime64_data(self): now = dt.datetime.now() dates = np.array([now+dt.timedelta(i) for i in range(1, 10)], dtype='datetime64') ds = ColumnDataSource(data=dict(index=dates, b=list(range(1, 10)))) ds._document = "doc" stuff = {} mock_setter = object() def mock(*args, **kw): stuff['args'] = args stuff['kw'] = kw ds.data._stream = mock # internal implementation of stream new_date = np.array([now+dt.timedelta(10)], dtype='datetime64') ds._stream(dict(index=new_date, b=[10]), "foo", mock_setter) self.assertTrue(np.array_equal(stuff['args'][2]['index'], new_date))
def test__df_index_name_with_named_multi_index(self): data = io.StringIO(u''' Fruit,Color,Count,Price Apple,Red,3,$1.29 Apple,Green,9,$0.99 Pear,Red,25,$2.59 Pear,Green,26,$2.79 Lime,Green,99,$0.39 ''') df = pd.read_csv(data).set_index(['Fruit', 'Color']) assert df.index.names == ['Fruit', 'Color'] assert ColumnDataSource._df_index_name(df) == "Fruit_Color"
def test_data_column_lengths(self): # TODO: use this when soft=False # #with self.assertRaises(ValueError): # ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22])) # #ds = ColumnDataSource() #with self.assertRaises(ValueError): # ds.data = dict(a=[10, 11], b=[20, 21, 22]) # #ds = ColumnDataSource(data=dict(a=[10, 11])) #with self.assertRaises(ValueError): # ds.data["b"] = [20, 21, 22] # #ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21])) #with self.assertRaises(ValueError): # ds.data.update(dict(a=[10, 11, 12])) with warnings.catch_warnings(record=True) as warns: ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22])) self.assertEquals(len(warns), 1) self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length") ds = ColumnDataSource() with warnings.catch_warnings(record=True) as warns: ds.data = dict(a=[10, 11], b=[20, 21, 22]) self.assertEquals(len(warns), 1) self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length") ds = ColumnDataSource(data=dict(a=[10, 11])) with warnings.catch_warnings(record=True) as warns: ds.data["b"] = [20, 21, 22] self.assertEquals(len(warns), 1) self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length") ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21])) with warnings.catch_warnings(record=True) as warns: ds.data.update(dict(a=[10, 11, 12])) self.assertEquals(len(warns), 1) self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length")
def test_data_column_lengths(self): # TODO: use this when soft=False # #with pytest.raises(ValueError): # ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22])) # #ds = ColumnDataSource() #with pytest.raises(ValueError): # ds.data = dict(a=[10, 11], b=[20, 21, 22]) # #ds = ColumnDataSource(data=dict(a=[10, 11])) #with pytest.raises(ValueError): # ds.data["b"] = [20, 21, 22] # #ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21])) #with pytest.raises(ValueError): # ds.data.update(dict(a=[10, 11, 12])) with warnings.catch_warnings(record=True) as warns: ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22])) assert len(warns) == 1 assert str(warns[0].message) == "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)" ds = ColumnDataSource() with warnings.catch_warnings(record=True) as warns: ds.data = dict(a=[10, 11], b=[20, 21, 22]) assert len(warns) == 1 assert str(warns[0].message) == "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)" ds = ColumnDataSource(data=dict(a=[10, 11])) with warnings.catch_warnings(record=True) as warns: ds.data["b"] = [20, 21, 22] assert len(warns) == 1 assert str(warns[0].message) == "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 2), ('b', 3)" ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21])) with warnings.catch_warnings(record=True) as warns: ds.data.update(dict(a=[10, 11, 12])) assert len(warns) == 1 assert str(warns[0].message) == "ColumnDataSource's columns must be of the same length. Current lengths: ('a', 3), ('b', 2)"
def test__stream_good_df_with_date_index_data(self, pd): df = pd.DataFrame( index=pd.date_range('now', periods=30, freq='T'), columns=['A'], data=np.cumsum(np.random.standard_normal(30), axis=0) ) ds = ColumnDataSource(data=df) ds._document = "doc" stuff = {} mock_setter = object() def mock(*args, **kw): stuff['args'] = args stuff['kw'] = kw ds.data._stream = mock new_df = pd.DataFrame( index=df.index + pd.to_timedelta('30m'), columns=df.columns, data=np.random.standard_normal(30) ) ds._stream(new_df, "foo", mock_setter) assert np.array_equal(stuff['args'][2]['index'], new_df.index.values) assert np.array_equal(stuff['args'][2]['A'], new_df.A.values)
def _init_bridge_plot(self): lg.info('-- INIT BRIDGE PLOT') self.bridge_plot = figure( plot_width=200, plot_height=200, ) self.bridge_plot.css_classes = ['bridge_plot'] source = ColumnDataSource({ 'x': [1, 2, 3], 'y': [4, 5, 6], }) self.bridge_trigger = self.bridge_plot.scatter( x='x', y='y', source=source, size=1, color="navy", alpha=0.5 ) self.bridge_plot_callback = CustomJS(code=""" // This content will be overwritten by the method run_js_code() """) self.bridge_trigger.glyph.js_on_change('size', self.bridge_plot_callback)
def _update_prof_circle_sources(self, df_fs=None, stt_order=[]): ''' Update the self.env.pc_source in order to mark the selected samples on all the plots. @df_fs: DF with data only with the current stations to show @stt_order: selected stations, red color at the end of the list ''' lg.info('-- UPDATE PROFILE CIRCLE SOURCES') current_plotted_cols, prof_df = self._get_empty_prof_df() # TODO: stt_order should have more than one station if they are actually selected in the map # TODO: Improve the performance of this algorithm, maybe using groupby instead of loops # BUILDING THE NEW PROF_DF stt_order_reversed = list(reversed(stt_order)) # lg.warning('>> STT ORDER REVERSED: {}'.format(stt_order_reversed)) for tab in self.env.f_handler.tab_list: for col in current_plotted_cols: i = NPROF - 1 for stt in stt_order_reversed: df_aux = df_fs[(df_fs[STNNBR] == stt) & df_fs[col].notnull()] if self.env.plot_prof_invsbl_points is False: flag = self.env.tabs_flags_plots[tab]['flag'] df_aux = df_aux[df_aux[flag].isin(self.env.visible_flags)] prof_df.loc[df_aux.index.values, '{}_{}_{}'.format(tab, col, i)] = df_aux[col] # lg.warning('>> STT: {} | COL: {} | I: {}'.format(stt, col, i)) i -= 1 prof_df.dropna(how='all', inplace=True) prof_cds = ColumnDataSource(prof_df) self.env.pc_source.data = prof_cds.data # NOTE: this translates the selection indices into positional indices # bokeh with each ColumnDataSource uses a new index with consecutive integers [0, 1, 2, 3, ...] # it doesn´t matter if you have a different index in the DF that you use to create the CDS sel = [] for i in self.env.selection: # TODO: only selected points within profiles if i in prof_df.index: sel.append(prof_df.index.get_loc(i)) self.env.pc_source.selected.indices = sel self._update_asterisk_source(current_plotted_cols)
def generate_rdf_column_data_source(self, files, column_title="ID"): """ Generate data table based on files list with ``id``. :param List files: List of Dictionaries that contain the key ``id``. :param string column_title: Title of the data table column. :return: Data table containing the file IDs. """ ids = [tmp_id['id'] for tmp_id in files] data = dict(id=ids) table_source = ColumnDataSource(data) columns = [TableColumn(field="id", title=column_title)] data_table = DataTable(source=table_source, columns=columns, width=500, height=400, selectable=True) return data_table
def timePlot(request,xkey,ykey,coin = False,begin = False, end = False): baseURL = "http://18.220.161.116/ajax/time/" baseName = "Bitcoin" ajaxRoute = baseURL + str(xkey) + "/" + str(ykey) + "/" if(coin): ajaxRoute += (str(coin) + "/") baseName = str(coin).capitalize() if(begin): ajaxRoute += (str(begin) + "/") if(end): ajaxRoute += str(end) #originally ajaxDataSource, now is a get route into a column data source. ajaxDataSource is good for real time data jsonDict = AWSsign.amazonCall(ajaxRoute).json() #print("timePlot data from api:",jsonDict["x"][0],jsonDict["y"][0]) nDays = 2 df = pd.DataFrame({'x': pd.to_datetime(jsonDict["x"][::nDays],yearfirst = True), 'y' : jsonDict["y"][::nDays] , 'date' : jsonDict["x"][::nDays]}) #df = df.fillna(0) #print("dataframe:\n",df) # print("dataframe x:\n",df['x']) # print("dataframe y:\n",df['y']) titleStr = baseName + " " + str(xkey) + " vs " + str(ykey) TOOLTIPS = [ ("Date", "@date"), ("Price", "$@y{0,0.00}") ] FORMAT = { "Date" : "datetime" } plot = figure(x_axis_type="datetime", plot_width=1000, plot_height=700, x_axis_label = str(xkey), y_axis_label = str(ykey), title = titleStr) plot.toolbar.logo = None plot.toolbar_location = None hover = HoverTool(tooltips=TOOLTIPS, mode = 'vline', formatters = FORMAT) plot.add_tools(hover) source = ColumnDataSource(df) #print("CDSx::",source.data['x'],"\nCDSy::",source.data['y'],"\nCDScols::",source.column_names) plot.line(x='x',y='y', source=source) #print("plot complete:",plot.select(dict(type=HoverTool))[0].tooltips) script, div = components(plot) context = { "script" : script, "div" : div } #print("context:", context) template = loader.get_template("bokehGraphs/ajaxGraph.html") return HttpResponse(template.render(context = context, request = request))
def create_bar_chart(data, title, x_name, y_name, hover_tool=None, width=1200, height=300): """ Creates a bar chart plot with the exact styling for the centcom dashboard. Pass in data as a dictionary, desired plot title, name of x axis, y axis and the hover tool HTML. """ source = ColumnDataSource(data) xdr = FactorRange(factors=data[x_name]) ydr = Range1d(start=0,end=max(data[y_name])*1.5) tools = [] if hover_tool: tools = [hover_tool,] plot = figure(title=title, x_range=xdr, y_range=ydr, plot_width=width, x_axis_type="datetime", plot_height=height, min_border=0, toolbar_location="above", tools=tools, sizing_mode='fixed', outline_line_color="#666666") glyph = VBar(x=x_name, top=y_name, bottom=0, width=.8, fill_color="#e12127") plot.add_glyph(source, glyph) plot.xaxis.ticker = DaysTicker(days=np.arange(1,32)) xaxis = LinearAxis() yaxis = LinearAxis() plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) plot.toolbar.logo = None plot.min_border_top = 0 plot.xgrid.grid_line_color = None plot.ygrid.grid_line_color = "#999999" plot.yaxis.axis_label = "Bugs found" plot.ygrid.grid_line_alpha = 0.1 plot.xaxis.axis_label = "Days after app deployment" plot.xaxis.major_label_orientation = 1 return plot
def _upd_astk_src(self): ''' Creates a new CDS with the new asterisk source data (selected sample) If nothing is selected the CDS is reset. NOTE: Be careful with this method because the lists orders is very important and dificult to follow ''' lg.info('-- UPDATE ASTERISK SOURCE') if self.env.sample_to_select is not None: values = [np.nan] * ( len(self.env.cur_plotted_cols) * len(self.env.f_handler.tab_list) ) # values should have the same order than the CDS columns columns = [] pos = 0 for tab in self.env.f_handler.tab_list: for col in self.env.cur_plotted_cols: columns.append('{}_{}'.format(tab, col)) if self.env.plot_prof_invsbl_points: # then always visible values[pos] = self.env.cds_df.loc[ self.env.sample_to_select, col] else: flag = self.env.tabs_flags_plots[tab]['flag'] if self.env.cds_df.loc[self.env.sample_to_select, flag] in self.env.visible_flags: values[pos] = self.env.cds_df.loc[ self.env.sample_to_select, col] pos += 1 # lg.info('>> COLUMNS: {}'.format(columns)) # lg.info('>> VALUES: {}'.format(values)) df = pd.DataFrame(columns=columns) if any(not np.isnan(x) for x in values): df.loc[self.env.sample_to_select] = values else: # posibbly reset lg.info('>> RESETTING ASTERISK') column_names = list(self.env.astk_src.data.keys()) if 'index' in column_names: column_names.remove('index') df = pd.DataFrame(columns=column_names) astk_cds = ColumnDataSource(df) return astk_cds
def get(self, request): diabetesLogic = DiabetesLogic() dataset_file_path = "dataset_management/diabetes.txt" df = diabetesLogic.read_dataset(dataset_file_path) source = ColumnDataSource(df) columns = [TableColumn(field=Ci, title=Ci) for Ci in df.columns] # bokeh columns data_table = DataTable(source=source, columns=columns, width=1000, editable=True, fit_columns=True) script, div = components(widgetbox(data_table)) return render(request, 'vis_diabetes.html', { 'script': script, 'div': div, 'dataset_file_path': dataset_file_path })
def selected_values(self) -> Union[pd.DataFrame, ColumnDataSource, None]: """Returns the selected rows of the data based Raises: ValueError: If the value is not of the supported type. Returns: Union[pd.DataFrame, ColumnDataSource, None]: The selected values of the same type as value. Based on the the current selection. """ # Selection is a list of row indices. For example [0,2] if self.value is None: return None if isinstance(self.value, pd.DataFrame): return self.value.iloc[self.selection, ] if isinstance(self.value, ColumnDataSource): # I could not find a direct way to get a selected ColumnDataSource selected_data = self.value.to_df().iloc[self.selection, ] return ColumnDataSource(selected_data) raise ValueError("The value is not of a supported type!")
def plot_3(data, ss, *args): """t-SNE embedding of the parameters, colored by score """ if len(data) <= 1: warnings.warn("Only one datapoint. Could not compute t-SNE embedding.") return None scores = np.array([d['mean_test_score'] for d in data]) # maps each parameters to a vector of floats warped = np.array([ss.point_to_gp(d['parameters']) for d in data]) # Embed into 2 dimensions with t-SNE X = TSNE(n_components=2).fit_transform(warped) e_scores = np.exp(scores) mine, maxe = np.min(e_scores), np.max(e_scores) color = (e_scores - mine) / (maxe - mine) mapped_colors = list(map(rgb2hex, cm.get_cmap('RdBu_r')(color))) p = bk.figure(title='t-SNE (unsupervised)', tools=TOOLS) df_params = nonconstant_parameters(data) df_params['score'] = scores df_params['x'] = X[:, 0] df_params['y'] = X[:, 1] df_params['color'] = mapped_colors df_params['size'] = 10 p.circle( x='x', y='y', color='color', size='size', source=ColumnDataSource(data=df_params), fill_alpha=0.6, line_color=None) cp = p hover = cp.select(dict(type=HoverTool)) format_tt = [(s, '@%s' % s) for s in df_params.columns] hover.tooltips = OrderedDict([("index", "$index")] + format_tt) xax, yax = p.axis xax.axis_label = 't-SNE coord 1' yax.axis_label = 't-SNE coord 2' return p
def make_flight_progress_bar_plot(include_greenland=False): if include_greenland: stats = {} for k in flight_progress_stats['antarctica']: stats[k] = flight_progress_stats['antarctica'][ k] + flight_progress_stats['greenland'][k] else: stats = flight_progress_stats['antarctica'] fps_df = pd.DataFrame(stats).sort_values(by=['dataset', 'flight_ids'], ascending=False) p = figure(y_range=fps_df['flights'], plot_height=20 * len(stats['flight_ids']), toolbar_location=None, tools="hover,tap", tooltips="@$name film segments") p.hbar_stack(['verified', 'unverified'], y='flights', height=0.8, source=ColumnDataSource(fps_df), color=[app.config['COLOR_SKY'], app.config['COLOR_GRAY']], legend_label=['Verified', 'Unverified']) p.y_range.range_padding = 0.1 p.ygrid.grid_line_color = None p.legend.location = "top_right" p.axis.minor_tick_line_color = None p.outline_line_color = None p.min_border_top = 0 p.min_border_bottom = 0 p.sizing_mode = 'stretch_width' url = "@url" taptool = p.select(type=TapTool) taptool.callback = OpenURL(url=url, same_tab=True) script, div = components(p) return f'\n{script}\n\n{div}\n'
def confidence_plot(doc_data,height,width): min_confidence = min(doc_data['prediction_confidence'].values) max_confidence = max(doc_data['prediction_confidence'].values) doc_data = ColumnDataSource(doc_data) TOOLS = "pan,box_zoom,reset,box_select" plot = figure(plot_width=width, plot_height=height, tools=TOOLS) plot.add_tools(HoverTool(tooltips=[("Name", "@documents"), ("Label", "@label"), ("Prediction", "@prediction"), ("Confidence", "@prediction_confidence")])) exp_cmap = LinearColorMapper(palette='Magma256', low=min_confidence, high=max_confidence) plot.scatter(x='x', y='y', fill_color={ "field":'prediction_confidence', 'transform': exp_cmap }, size=SIZE, source=doc_data, ) plot.js_on_event(Tap, CustomJS(args=dict(source=doc_data), code="handle_tap(source,cb_obj)")) doc_data.selected.js_on_change('indices', CustomJS(args=dict(source=doc_data), code="handle_select(source,cb_obj)")) bar = ColorBar(color_mapper=exp_cmap, location=(0, 0)) plot.add_layout(bar, "right") return plot
def plot_noshow_by_age(X): source = ColumnDataSource( X.tail(5000).groupby('age').apply(lambda x: x.tail(1))) hover = HoverTool(tooltips=[ ("Prob", "@{ages.PERCENT_TRUE(appointments.no_show)}"), ("Age", "@age"), ]) p7 = figure(title="Probability no-show by Age", x_axis_label='Age', y_axis_label='Probability of no-show', width=400, height=400, tools=[hover, 'box_zoom', 'reset', 'save']) p7.scatter('age', 'ages.PERCENT_TRUE(appointments.no_show)', alpha=.7, source=source) return p7
def generate_data_table_data_source(source_graph: Graph) -> ColumnDataSource: """ :param source_graph: :return: """ df = convert_node_attribute2df(source_graph) df['Created_str'] = df.Created_dt.apply(lambda x: x.strftime('%Y-%m-%d') if x == x else 'No Data') df['status_node_color'] = df['Status'].apply( lambda x: STATUS_COLOR_MAP_DICT[x]) df['status_font_color'] = df['Status'].apply( lambda x: STATUS_FONT_COLOR_MAP_DICT[x]) df = df[[ 'PEP', 'Title', 'Status', 'Created_str', 'status_node_color', 'status_font_color' ]] return ColumnDataSource(df)
def process_data(self): # produce polar ranges based on aggregation specification polar_data = build_wedge_source( self._data.df, cat_cols=self.attributes['label'].columns, agg_col=self.values.selection, agg=self.agg, level_width=self.level_width, level_spacing=self.level_spacing) # add placeholder color column that will be assigned colors polar_data['color'] = '' # set the color based on the assigned color for the group for group in self._data.groupby(**self.attributes): polar_data.loc[group['stack'], 'color'] = group['color'] # create the source for the wedges and the text self.chart_data = ColumnDataSource(polar_data) self.text_data = build_wedge_text_source(polar_data)
def plot_ages(fm): tmp = fm.tail(5000).groupby('age').apply(lambda df: df.tail(1))[[ 'ages.COUNT(appointments)' ]].sort_values(by='ages.COUNT(appointments)').reset_index().reset_index() hover = HoverTool(tooltips=[ ("Count", "@{ages.COUNT(appointments)}"), ("Age", "@age"), ]) source = ColumnDataSource(tmp) p6 = figure(width=400, height=400, tools=[hover, 'box_zoom', 'reset', 'save']) p6.scatter('age', 'ages.COUNT(appointments)', alpha=.7, source=source, color='magenta') p6.title.text = 'Appointments by Age' p6.xaxis.axis_label = 'Age' p6.yaxis.axis_label = 'Count' return p6
def _generate_items(self, df, columns): """Produce list of unique tuples that identify each item.""" if not self.bin: super(ColorAttr, self)._generate_items(df, columns) else: if len(columns) == 1 and ChartDataSource.is_number(df[columns[0]]): self.bins = Bins(source=ColumnDataSource(df), column=columns[0], bins=len(self.iterable), aggregate=False) if self.sort: self.bins.sort(ascending=self.ascending) self.items = [bin.label[0] for bin in self.bins] else: raise ValueError( 'Binned colors can only be created for one column of \ numerical data.')
def plot_locations(fm): tmp = fm.groupby('neighborhood').apply( lambda df: df.tail(1))['locations.COUNT(appointments)'].sort_values( ).reset_index().reset_index() hover = HoverTool(tooltips=[ ("Count", "@{locations.COUNT(appointments)}"), ("Place", "@neighborhood"), ]) source = ColumnDataSource(tmp) p4 = figure(width=400, height=400, tools=[hover, 'box_zoom', 'reset', 'save']) p4.scatter('index', 'locations.COUNT(appointments)', alpha=.7, source=source, color='teal') p4.title.text = 'Appointments by Neighborhood' p4.xaxis.axis_label = 'Neighborhoods (hover to view)' p4.yaxis.axis_label = 'Count' return p4
def _init_bathymetric_map_data(self): try: x_wm, y_wm = self._epsg4326_to_epsg3857( self.env.cds_df.LONGITUDE.to_numpy(), self.env.cds_df.LATITUDE.to_numpy()) except: # deprecated since 0.23.0 x_wm, y_wm = self._epsg4326_to_epsg3857( self.env.cds_df.LONGITUDE.as_matrix(), self.env.cds_df.LATITUDE.as_matrix()) aux_df = pd.DataFrame( dict(X_WMTS=x_wm, Y_WMTS=y_wm, STNNBR=list(self.env.cds_df[STNNBR]))) aux_df.drop_duplicates(subset=STNNBR, keep='first', inplace=True) lg.info('>> AUX DF LEN: {}'.format(aux_df.index.size)) new_index_column = list(range(aux_df.index.size)) lg.info('>> AUX DF new_index_column: {}'.format(len(new_index_column))) aux_df = aux_df.assign(NEW_INDEX=new_index_column) aux_df.set_index(keys='NEW_INDEX', inplace=True) self.env.wmts_map_df = aux_df.copy(deep=True) self.env.wmts_map_source = ColumnDataSource(self.env.wmts_map_df)
def compressIm(Im, File): newX = list(range(326)) * 326 newY = np.repeat(list(range(326)), 326) newDF = pd.DataFrame(data={"X": newX, "Y": newY}) for band in possible_bands: color_band = Im[band] color_band = np.reshape(color_band, (651, 651)) newIm = [] for row in range(0, 651, 2): tmp = [] for col in range(0, 651, 2): tmp.append(np.mean(color_band[row:row + 1, col:col + 1])) newIm.append(tmp) newIm = np.array(newIm, dtype=np.uint8) imVec = np.reshape(newIm.T, np.prod(newIm.shape), -1) newDF[band] = imVec newCDS = ColumnDataSource(data=newDF) # saveName = File[-4:] + ".pickle" newDF.to_csv(File) print("Saved", File, " as csv.") return newCDS
def __init__(self, columns=None, df=None, iterable=None, default=None, items=None, **properties): """Create a lazy evaluated attribute specification. Args: columns: a list of column labels df(:class:`~pandas.DataFrame`): the data source for the attribute spec. iterable: an iterable of distinct attribute values default: a value to use as the default attribute when no columns are passed items: the distinct values in columns. If items is provided as input, then the values provided are used instead of being calculated. This can be used to force a specific order for assignment. **properties: other properties to pass to parent :class:`HasProps` """ properties['columns'] = self._ensure_list(columns) if df is not None: properties['data'] = ColumnDataSource(df) if default is None and iterable is not None: default_iter = copy(iterable) properties['default'] = next(iter(default_iter)) elif default is not None: properties['default'] = default if iterable is not None: properties['iterable'] = iterable if items is not None: properties['items'] = items super(AttrSpec, self).__init__(**properties) if self.default is None and self.iterable is not None: self.default = next(iter(copy(self.iterable)))
def to_bokeh(self, columns = None): """Convert the dataset to a bokeh ColumnDataSource Parameters ---------- columns: list(string or int) (optional) The columns to include. (default: All columns) Returns ------- bokeh.models.sources.ColumnDataSource """ if columns is None: columns = self.columns return ColumnDataSource(dict([ ( column.name, [ row.get_value(column.identifier if column.identifier >= 0 else column.name) for row in self.rows ] ) for column in self.columns ]))
def __init__(self, columns=None, df=None, iterable=None, default=None, **properties): properties['columns'] = self._ensure_list(columns) if df is not None: properties['data'] = ColumnDataSource(df) if default is None and iterable is not None: default_iter = copy(iterable) properties['default'] = next(iter(default_iter)) elif default is not None: properties['default'] = default if iterable is not None: properties['iterable'] = iterable super(AttrSpec, self).__init__(**properties)
def draw_line_param_zeroline(df_data, param, colorname): data_array = np.array(df_data[param]) df_data['trade_date'] = df_data['trade_date'].apply(str) df_data['trade_date'] = df_data['trade_date'].apply(parse) datetime_array = np.array(df_data['trade_date'], dtype=np.datetime64) p = figure(width=1800, height=400, x_axis_type="datetime") p.line(datetime_array, data_array, color=colorname, legend=param) p.line(datetime_array, 0, color='black', legend='0') p.legend.location = "top_left" source = ColumnDataSource(data=dict(tradedatearry=datetime_array, paramarry=df_data[param].tolist())) labels = LabelSet(x="tradedatearry", y="paramarry", text="paramarry", level="glyph", x_offset=5, y_offset=0, source=source #render_mode="canvas" ) p.add_layout(labels) return p
def _init_prof_sources(self): ''' Multiline ColumnDataSource Initialization ''' colors = [] line_width = [] init_ml_profs = [] # VIEWS for i in range(NPROF - 1, -1, -1): if i == NPROF - 1: # TODO: add this to the CDS colors.append(Reds3[0]) line_width.append(3) else: colors.append(BLUES[i]) line_width.append(2) init_ml_profs.append([]) # ML SOURCE init_source_dict = dict(colors=colors, line_width=line_width) for i in range(self.env.n_plots): init_source_dict['xs{}'.format(i)] = init_ml_profs init_source_dict['ys{}'.format(i)] = init_ml_profs self.env.ml_source = ColumnDataSource(data=init_source_dict)
def refresh(self): """Update the GlyphRenderers. .. note: this method would be called after data is added. """ if self.renderers is not None: data = self.build_source() if data is not None: if isinstance(data, dict): source = ColumnDataSource(data) if not isinstance(source, ColumnDataSource) and source is not None: raise TypeError( 'build_source must return dict or ColumnDataSource.') else: self.source = self.add_chart_index(source) self._set_sources()
def build_source(self): # ToDo: Handle rotation self.start = self.get_start() self.end = self.get_end() self.span = self.get_span() width = [self.width] if self.dodge_shift is not None: x = [self.get_dodge_label()] else: x = [self.label_value or self.label] height = [self.span] y = [self.stack_shift + (self.span / 2.0) + self.start] color = [self.color] fill_alpha = [self.fill_alpha] return ColumnDataSource( dict(x=x, y=y, width=width, height=height, color=color, fill_alpha=fill_alpha))
def df_to_bar(df): """ d is dict form of current_state df. """ data = ColumnDataSource(df.filter(like='state')) K = list(data.data.keys()) P = [] for k in K: p = figure(tools="pan,box_zoom,reset,save", title=k, x_axis_label="time", y_axis_label="", plot_width=400, plot_height=200, x_axis_type="datetime", y_axis_location='right', toolbar_location='above') p.vbar('second', 30 * 1000, k, bottom=0, color="black", source=data) P.append(p) script, div = components(P) div = dict(zip(K, div)) return script, div
def chart(): brand = request.form['brand'] response = get_data_from_api(brand=brand) # print(response) total = response['number_of_tweets'] positive = response['positive'] neutral = response['neutral'] negative = response['negative'] sentiments = ["Positive", "Neutral", "Negative"] distribution = [positive, neutral, negative] source = ColumnDataSource(data=dict(sentiments=sentiments, distribution=distribution)) p = figure(x_range=sentiments, plot_height=550, plot_width= 1000, title="Sentiment distribution", x_axis_label = 'Sentiments') p.vbar(x='sentiments', top='distribution', width=0.9, source=source, legend_field="sentiments", line_color=None, fill_color=factor_cmap('sentiments', palette=['#39DA00', '#FDB229', '#FF0445'], factors=sentiments)) p.xgrid.grid_line_color = None p.y_range.start = 0 p.legend.orientation = "horizontal" p.legend.location = "top_right" p.background_fill_color = "#0B0022" script, div = components(p) pos = round((float(positive)/float(total))*100, 1) neu = round((float(neutral)/float(total))*100, 1) neg = round((float(negative) / float(total)) * 100, 1) print(pos) print(neu) print(neg) html = render_template("chart.html", the_div=div, the_script=script, positive=pos, neutral=neu, negative=neg) return encode_utf8(html)
def get_data(): path = '/home/ubuntu/data/' pickle_names = listdir(path) # don't read in the sqlite db pickle_names = [x for x in pickle_names if x.split('.')[1] == 'p'] df = pd.DataFrame() for pickle_name in pickle_names: df = pd.concat([df, pd.DataFrame(pd.read_pickle(path + pickle_name))]) # here we update our sqlite database and then cleanup the datafiles in the # directory conn = sqlite3.connect(path + 'environmentals.db') # write new records to sql table df.to_sql('raw', conn, if_exists='append') # now read all records from sql table df = pd.read_sql_query('select * from raw', conn) conn.close() # clean up pickle files from data directory for name in pickle_names: os.remove(path + name) # update datatypes df['shTemp'] = df['shTemp'].astype('float') df['shHum'] = df['shHum'].astype('float') df['shPres'] = df['shPres'].astype('float') df['mplTemp'] = df['mplTemp'].astype('float') df['mplAltitude'] = df['mplAltitude'].astype('float') df['mplPressure'] = df['mplPressure'].astype('float') df['Timestamp'] = pd.to_datetime(df['Timestamp']) df = df.sort_values('Timestamp', ascending=True) # resample to 10 min intervals df.index = df['Timestamp'] df = df.resample('5T').mean() df.reset_index(inplace=True) source = ColumnDataSource(df) return source
def plot(self, output_file="termite.html"): t = blz.Data(self.input_file) df = pd.read_csv(self.input_file) MAX = blz.compute(t.weight.max()) MIN = blz.compute(t.weight.min()) # Create a size variable to define the size of the the circle for the plot. t = blz.transform(t, size=blz.sqrt((t.weight - MIN) / (MAX - MIN)) * 50) WORDS = t['word'].distinct() WORDS = into(list, WORDS) topics = t['topic'].distinct() topics = into(list, topics) # Convert topics to strings TOPICS = [str(i) for i in topics] source = into(pd.DataFrame, t) plt.output_file(output_file) data_source = ColumnDataSource(source) p = plt.figure(x_range=TOPICS, y_range=WORDS, plot_width=1000, plot_height=1700, title=self.title) p.circle(x="topic", y="word", size="size", fill_alpha=0.6, source=data_source) #p.xaxis().major_label_orientation = np.pi/3 logging.info("generating termite plot for file %s" % self.input_file) plt.show(p)
def test_remove_exists(self): ds = ColumnDataSource() name = ds.add([1,2,3], "foo") assert name ds.remove("foo") self.assertEquals(ds.column_names, [])
def test_add_with_and_without_name(self): ds = ColumnDataSource() name = ds.add([1,2,3], "foo") self.assertEquals(name, "foo") name = ds.add([4,5,6]) self.assertEquals(name, "Series 1")
def test_add_with_name(self): ds = ColumnDataSource() name = ds.add([1,2,3], name="foo") self.assertEquals(name, "foo") name = ds.add([4,5,6], name="bar") self.assertEquals(name, "bar")
height=500, toolbar_location='right') p.line('Date', 'Price', color='#A6CEE3', source=source, legend='AAPL') style_axis(p) hover =p.select(dict(type=HoverTool)) hover.mode='vline' hover.tooltips = OrderedDict([ ("Date", "@Date"), ("Price", "$ @Price"), ("Date", "@DateFmt"), ]) url = "http://127.0.0.1:5000/alldata" res = requests.get(url, timeout=20) data = res.json() static_source = ColumnDataSource(data) selection_plot = figure( height=100, tools="box_select", x_axis_location="above", x_axis_type="datetime", toolbar_location=None, outline_line_color=None, name="small_plot" ) selection_source = ColumnDataSource() for k in ['end', 'values', 'start', 'bottom']: selection_source.add([], k) selection_plot.quad(top='values', bottom='bottom', left='start', right='end', source=selection_source, color='#c6dbef', fill_alpha=0.5) selection_plot.line('Date', 'Price', color='#A6CEE3', source=static_source) selection_plot.circle('Date', 'Price', color='#A6CEE3', source=static_source, size=1)
def test_stream_bad_data(self): ds = ColumnDataSource(data=dict(a=[10], b=[20])) with self.assertRaises(ValueError) as cm: ds.stream(dict()) self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: a, b)") with self.assertRaises(ValueError) as cm: ds.stream(dict(a=[10])) self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b)") with self.assertRaises(ValueError) as cm: ds.stream(dict(a=[10], b=[10], x=[10])) self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (extra: x)") with self.assertRaises(ValueError) as cm: ds.stream(dict(a=[10], x=[10])) self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b, extra: x)") with self.assertRaises(ValueError) as cm: ds.stream(dict(a=[10], b=[10, 20])) self.assertEqual(str(cm.exception), "All streaming column updates must be the same length") with self.assertRaises(ValueError) as cm: ds.stream(dict(a=[10], b=np.ones((1,1)))) self.assertTrue( str(cm.exception).startswith("stream(...) only supports 1d sequences, got ndarray with size (") )
def test_patch_bad_simple_indices(self): ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21])) with self.assertRaises(ValueError) as cm: ds.patch(dict(a=[(3, 100)])) self.assertEqual(str(cm.exception), "Out-of bounds index (3) in patch for column: a")
def test_patch_bad_slice_indices(self): ds = ColumnDataSource(data=dict(a=[10, 11, 12, 13, 14, 15], b=[20, 21, 22, 23, 24, 25])) with self.assertRaises(ValueError) as cm: ds.patch(dict(a=[(slice(10), list(range(10)))])) self.assertEqual(str(cm.exception), "Out-of bounds slice index stop (10) in patch for column: a") with self.assertRaises(ValueError) as cm: ds.patch(dict(a=[(slice(10, 1), list(range(10)))])) self.assertEqual(str(cm.exception), "Patch slices must have start < end, got slice(10, 1, None)") with self.assertRaises(ValueError) as cm: ds.patch(dict(a=[(slice(None, 10, -1), list(range(10)))])) self.assertEqual(str(cm.exception), "Patch slices must have non-negative (start, stop, step) values, got slice(None, 10, -1)") with self.assertRaises(ValueError) as cm: ds.patch(dict(a=[(slice(10, 1, 1), list(range(10)))])) self.assertEqual(str(cm.exception), "Patch slices must have start < end, got slice(10, 1, 1)") with self.assertRaises(ValueError) as cm: ds.patch(dict(a=[(slice(10, 1, -1), list(range(10)))])) self.assertEqual(str(cm.exception), "Patch slices must have start < end, got slice(10, 1, -1)") with self.assertRaises(ValueError) as cm: ds.patch(dict(a=[(slice(1, 10, -1), list(range(10)))])) self.assertEqual(str(cm.exception), "Patch slices must have non-negative (start, stop, step) values, got slice(1, 10, -1)")
def test_set_data_from_json_list(self): ds = ColumnDataSource() data = {"foo": [1, 2, 3]} ds.set_from_json('data', data) self.assertEquals(ds.data, data)
def test_set_data_from_json_base64(self): ds = ColumnDataSource() data = {"foo": np.arange(3)} json = transform_column_source_data(data) ds.set_from_json('data', json) self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
def test_set_data_from_json_nested_base64_and_list(self): ds = ColumnDataSource() data = {"foo": [np.arange(3), [1, 2, 3]]} json = transform_column_source_data(data) ds.set_from_json('data', json) self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))