def _varName_toMultiIndex(index_varDictionnary: pd.Index) -> pd.MultiIndex: long_names = index_varDictionnary.tolist() splits = [ long_name.strip('\\').split('\\') for long_name in long_names ] multi_index = pd.MultiIndex.from_tuples(splits) return multi_index
def to_omx(file: str, matrices: Dict[str, MATRIX_TYPES], zone_index: pd.Index = None, title: str = '', descriptions: Dict[str, str] = None, attrs: Dict[str, dict] = None, mapping: str = 'zone_numbers'): """Creates a new (or overwrites an old) OMX file with a collection of matrices. Args: file: OMX to write. matrices: Collection of matrices to write. MUST be a dict, to permit the encoding of matrix metadata, and must contain the same types: all Numpy arrays, all Series, or all DataFrames. Checking is done to ensure that all items have the same shape and labels. zone_index: Override zone labels to use. Generally only useful if writing a dict of raw Numpy arrays. title: The title saved in the OMX file. descriptions: A dict of descriptions (one for each given matrix), or None to not use. attrs: A dict of dicts (one for each given matrix), or None to not use mapping: Name of the mapping internal to the OMX file """ matrices, zone_index = _prep_matrix_dict(matrices, zone_index) if descriptions is None: descriptions = {name: '' for name in matrices.keys()} if attrs is None: attrs = {name: None for name in matrices.keys()} file = str(file) # Converts from Path with omx.open_file(file, mode='w', title=title) as omx_file: omx_file.create_mapping(mapping, zone_index.tolist()) for name, array in matrices.items(): description = descriptions[name] attr = attrs[name] omx_file.create_matrix(name, obj=np.ascontiguousarray(array), title=description, attrs=attr) return
def _validate_indexer(indexer: pd.Index) -> Iterable: """Ensure `indexer` can be used as an indexer on another index. https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html """ if not isinstance(indexer, pd.MultiIndex) and pd.isna(indexer).any(): # isna is not defined for MultiIndex return indexer.tolist() return indexer
def find_overlap_in_dataset(data_in: np.ndarray, list_1: pd.Index, list_2: list): if not isinstance(list_1, pd.Index): raise TypeError("{} is supposed to be of type [pd.Index]. " "Wrong format instead: {}".format(list_1, type(list_1))) else: overlap = list(set(list_1.tolist()).intersection(list_2)) found_bool = list_1.isin(overlap) return found_bool, data_in.X[:, found_bool]
def build_autocomplete_grph_driver(rtplot: Figure, plots: List, ms_plot: Figure, patchsources: Dict[str, Tuple], source: ColumnDataSource, default_county: str, counties: pd.Index) -> Tuple[CDSView, AutocompleteInput]: choices = AutocompleteInput(completions=counties.tolist(), case_sensitive=False, value=default_county, title='Search for county or select from table:', name="county_input", width_policy='fit', css_classes=['autocomplete_input'], min_width=250, align="start") someargs = dict(source=source, rtplot=rtplot, rtxaxis=rtplot.xaxis[0], rtyaxis=rtplot.yaxis[0], ms_plot=ms_plot, ms_plot_xaxis=ms_plot.xaxis[0], ms_plot_yaxis0=ms_plot.yaxis[0], plots=plots, choices=choices, patchsources=patchsources) someargs['xaxes'], someargs['yaxes'], someargs['plots'] = [], [], [] for p in plots: someargs['xaxes'].append(p['plot'].xaxis[0]) someargs['yaxes'].append(p['plot'].yaxis[0]) someargs['plots'].append(p['plot']) callback = CustomJS(args=someargs, code=constants.autocomplete_input_code) choices.js_on_change('value', callback) js_filter = CustomJSFilter(args=dict(choices=choices), code=constants.cdsview_jsfilter_code) view = CDSView(source=source, filters=[js_filter]) return view, choices
def convert_pandas_index_to_array(value: pd.Index): if isinstance(value, pd.Index): return value.tolist()