def get_tokens(self): try: context.get_context().ggpk ggpk = (Token.Warn, "GGPK Loaded") except AttributeError: ggpk = (Token.Info, "No GGPK loaded") return [ggpk]
def newdb(dbfile: str, password="", type=".kdbx"): """ This command is used to create a new database. """ ctx = context.get_context() db = ctx.keepass.db if ctx.keepass.is_open(): cprint( "Please close the current database {} before you can create a new database." .format(db.Name)) else: newfile = dbfile + type for file in lsdb(): if newfile == file: print("Data file {} already exist.".format(newfile)) return if not password: import getpass password = getpass.getpass('Password:'******'/' + newfile ctx.keepass.new(db_path, password) ctx.keepass.db.Name = dbfile ctx.keepass.db.DefaultUserName = dbfile ctx.keepass.db.Description = db_path
async def mark_data_unrecoverable(self, reason: str, user: Optional[str] = ""): """ [DANGER] Marks all the UNAVAILABLE shards (stuck on DATA_MIGRATION storage state) as unrecoverable. This will advice the readers to not wait for data on these shards and issue data loss gaps if necessary. """ ctx = context.get_context() try: async with ctx.get_cluster_admin_client() as client: response: MarkAllShardsUnrecoverableResponse response = await mark_all_shards_unrecoverable(client=client, user=user or getuser(), reason=reason) if response.shards_succeeded: succeeded_str = ", ".join( [str(shard) for shard in response.shards_succeeded]) cprint(f"Suceeded: {succeeded_str}", "green") if response.shards_failed: failed_str = ", ".join( [str(shard) for shard in response.shards_failed]) cprint(f"Failed: {failed_str}", "red") # Both are empty if not (response.shards_succeeded or response.shards_failed): cprint("No UNAVAILABLE shards to mark unrecoverable!", "yellow") except Exception as e: print(colored(f"Cannot mark the data unrecoverable: {e}", "red")) return
def set_ctxt( pager: str = 'on', hostname: typing.List[str] = [], start_time: str = "", end_time: str = "", namespace: typing.List[str] = [], engine: str = "", datadir: str = "", ): """set certain contexts for subsequent commands. Cmd is additive""" plugin_ctx = context.get_context() if namespace: plugin_ctx.namespace = namespace if hostname: plugin_ctx.hostname = hostname if start_time: plugin_ctx.start_time = start_time if end_time: plugin_ctx.end_time = end_time if engine: plugin_ctx.change_engine(engine) if datadir: if os.path.isdir(datadir): plugin_ctx.cfg['data-directory'] = datadir else: print(f'{datadir} is not a valid directory') if pager == 'on': plugin_ctx.pager = True
def delete(self, modalities=None, cognitive_sources=None, embeddings=None): ''' Remove cognitive sources or experiments (cog.source - embedding combinations) from specified configuration or delete entire configuration. ''' ctx = context.get_context() configuration = ctx.open_config if not configuration: cprint('No configuration open, aborting ...', 'red') return embedding_registry = ctx.embedding_registry resources_path = ctx.resources_path main_conf_dict = _open_config(configuration, resources_path) cog_config_dict = _open_cog_config(resources_path) main_conf_dict = commands.config_delete(configuration, main_conf_dict, cog_config_dict, embedding_registry, modalities, cognitive_sources, embeddings) _backup_config(configuration, resources_path) if main_conf_dict: _save_config(main_conf_dict, configuration, resources_path)
def report(run_id=0, modalities=['eye-tracking', 'eeg', 'fmri'], alpha=0.01, test="Wilcoxon", precision=3, average_multi_hypothesis=True, include_history_plots=False, include_features=True, html=True, open_html=False, pdf=False, open_pdf=False): ''' Compute significance tests, aggregate results and generate report ― ''' ctx = context.get_context() configuration = ctx.open_config if not configuration: cprint('No configuration open, aborting ...', 'red') return resources_path = ctx.resources_path cprint('Computing significance stats ...', 'yellow') significance(run_id, modalities, alpha, test, quiet=True) cprint('Aggregating ...', 'yellow') aggregate(run_id, modalities, test, quiet=True) generate_report(configuration, run_id, resources_path, precision, average_multi_hypothesis, include_history_plots, include_features, html, pdf, open_html, open_pdf)
def experiment(self, baselines=True, modalities=None, cognitive_sources=['all'], embeddings=['all'], single_edit=False, edit_cog_source_params=False, scope=None): ''' Edit configuration of single, multiple or all combinations of embeddings and cognitive sources. ''' ctx = context.get_context() configuration = ctx.open_config if not configuration: cprint('No configuration open, aborting ...', 'red') return embedding_registry = ctx.embedding_registry resources_path = ctx.resources_path main_conf_dict = _open_config(configuration, resources_path) cog_data_config_dict = _open_cog_config(resources_path) try: commands.config_experiment( configuration, main_conf_dict, cog_data_config_dict, embedding_registry, resources_path, baselines, modalities, cognitive_sources, embeddings, single_edit, edit_cog_source_params, scope) except NothingToDoException: cprint( "Nothing to do. If the configuration is populated, pass scope=all to add embeddings and cognitive-sources in bulk.", "yellow")
def run(embeddings=['all'], modalities=None, cognitive_sources=['all'], cognitive_features=None, processes=None, n_gpus=None, baselines=True): ''' Run parallelized evaluation of single, selected or all combinations of embeddings and cognitive sources. ''' ctx = context.get_context() resources_path = ctx.resources_path configuration = ctx.open_config embedding_registry = ctx.embedding_registry max_gpus = ctx.max_gpus visible_gpu_ids = ctx.visible_gpus if not configuration: cprint('No configuration open, aborting ...', 'red') return config_dict = _open_config(configuration, resources_path) config_dict = commands.run(configuration, config_dict, resources_path, embedding_registry, embeddings, modalities, cognitive_sources, cognitive_features, processes, n_gpus, max_gpus, visible_gpu_ids, baselines) if config_dict: _save_config(config_dict, configuration, resources_path)
async def dump(self): """ Prints the server config in JSON format """ ctx = context.get_context() async with ctx.get_cluster_admin_client() as client: print(await client.dumpServerConfigJson())
def set_ctxt( pager: str = 'on', hostname: typing.List[str] = [], start_time: str = "", end_time: str = "", namespace: typing.List[str] = [], engine: str = "", ): """set certain contexts for subsequent commands. Cmd is additive""" plugin_ctx = context.get_context() if namespace: plugin_ctx.namespace = namespace if hostname: plugin_ctx.hostname = hostname if start_time: plugin_ctx.start_time = start_time if end_time: plugin_ctx.end_time = end_time if engine: plugin_ctx.change_engine(engine) if pager == 'on': plugin_ctx.pager = True
def close(): """ Close the database """ ctx = context.get_context() ctx.keepass.close() cprint("Database is closed")
def remove(self, path: str, recursive: bool = False): """ Removes a directory or a log-group under a specific directory path in the LogsConfig tree. This will NOT delete the directory if it is not empty by default, you need to use --recursive (recursive=True). """ try: c = _get_client() if not context.get_context().args.yes and not confirm( "Are you sure you want to REMOVE " "'{}'? (y/n)".format(path)): return try: version = c.remove_log_group(str(path)) except LogDeviceError as e: if e.args[0] == ErrorStatus.NOTFOUND: version = c.remove_directory(str(path), recursive) cprint("'{}' has been removed in version {}".format(path, version)) except LogDeviceError as e: cprint("Cannot remove '{}'. Reason: {}".format(path, e.args[2]), "red") return 1
def set_range(self, path: str, from_id: int, to_id: int): """ This updates the log id range for the LogGroup under a specific directory path in the LogsConfig tree. This only works if the tier has LogsConfigManager enabled. """ try: c = _get_client() current_log_group = c.get_log_group_by_name(str(path)) if not context.get_context().args.yes and not confirm( "Are you sure you want to set" " the log range at " '"{}" to be ({}..{}) instead of({}..{})? (y/n)'.format( path, from_id, to_id, current_log_group.range[0], current_log_group.range[1], )): return version = c.set_log_group_range(str(path), from_id, to_id) cprint("Log group '{}' has been updated in version {}!".format( path, version)) except LogDeviceError as e: cprint("Cannot update range for '{}': {}".format(path, e), "red")
def print_group(group, detail=False): ctx = context.get_context() if detail: s_table = PrettyTable(["Time", "Type", "Name"]) for gp in group.Groups: s_table.add_row( [gp.LastModificationTime.ToString(), "Group", gp.get_Name()]) for entry in group.Entries: s_table.add_row([ entry.LastModificationTime.ToString(), "Entry", entry.Strings.ReadSafe(KeePass.TITLE) ]) else: s_table = PrettyTable(["Name"]) for gp in group.Groups: s_table.add_row([gp.get_Name() + "/"]) for entry in group.Entries: s_table.add_row([entry.Strings.ReadSafe(KeePass.TITLE)]) s_table.align = "l" s_table.border = False s_table.header = False print("Current group is {}.\n".format(group)) print(s_table)
def get_token(path): "get OTP token in the entry." ctx = context.get_context() if ctx.keepass.is_open(): entry = ctx.keepass.find_entry_by_path(path) if entry: print("Token: {}, Progress: {}".format(entry.get_Token(), entry.get_Progress()))
def clear_ctxt( pager: str = 'off', hostname: str = "", start_time: str = "", end_time: str = "", namespace: str = "", ): """clear certain contexts for subsequent commands. Cmd is additive""" plugin_ctx = context.get_context() ctxt = plugin_ctx.ctxt if namespace: ctxt.namespace = [] if hostname: ctxt.hostname = [] if start_time: ctxt.start_time = "" if end_time: ctxt.end_time = "" if pager: ctxt.pager = False
def get_tokens(self): spacer = (Token.Spacer, " ") if context.get_context().verbose: is_verbose = (Token.Warn, "ON") else: is_verbose = (Token.Info, "OFF") if context.get_context().pager: is_pager = (Token.Warn, "ON") else: is_pager = (Token.Info, "OFF") return [ (Token.Toolbar, "Suzieq"), spacer, (Token.Toolbar, "Verbose "), spacer, is_verbose, spacer, (Token.Toolbar, "Pager "), spacer, is_pager, spacer, (Token.Toolbar, "Namespace "), spacer, (Token.Info, ", ".join(self.ctx.namespace)), spacer, (Token.Toolbar, "Hostname "), spacer, (Token.Info, ", ".join(self.ctx.hostname)), spacer, (Token.Toolbar, "StartTime "), spacer, (Token.Info, self.ctx.start_time), spacer, (Token.Toolbar, "EndTime "), spacer, (Token.Info, self.ctx.end_time), spacer, (Token.Toolbar, "Engine "), spacer, (Token.Info, self.ctx.engine_name), spacer, (Token.Toolbar, "Query Time "), spacer, (Token.Info, self.ctx.exec_time), ]
def _build_cmd_verb_list(self): if not self._allcmds: ctx = context.get_context() self._allcmds = ctx.registry.get_all_commands_map() self._sqcmds = [ x for x in sorted(self._allcmds) if not self._allcmds[x].built_in ]
def set_ctxt(pager: str = "", hostname: typing.List[str] = None, start_time: str = "", end_time: str = "", namespace: typing.List[str] = None, engine: str = "", datadir: str = "", col_width: int = 50, rest_server_ip: str = "", rest_server_port: str = "", rest_api_key: str = "", rest_use_https: str = ""): """set certain contexts for subsequent commands. Cmd is additive""" plugin_ctx = context.get_context() ctxt = plugin_ctx.ctxt if namespace: ctxt.namespace = namespace if hostname: ctxt.hostname = hostname if start_time: ctxt.start_time = start_time if end_time: ctxt.end_time = end_time if engine: plugin_ctx.change_engine(engine) if datadir: if os.path.isdir(datadir): ctxt.cfg['data-directory'] = datadir else: print(f'{datadir} is not a valid directory') if col_width: ctxt.col_width = int(col_width) if pager == 'on': ctxt.pager = True elif pager == 'off': ctxt.pager = False if rest_server_ip: ctxt.rest_server_ip = rest_server_ip if rest_server_port: ctxt.rest_server_port = rest_server_port if rest_api_key: ctxt.rest_api_key = rest_api_key if rest_use_https == 'True': ctxt.rest_transport = 'https' elif rest_use_https == 'False': ctxt.rest_transport = 'http'
def _test_context_filtering(cmd, options): assert len(options) == 1 ctx = context.get_context() k = next(iter(options)) v = options[k] setattr(ctx, k, v) s = _test_command(cmd, 'show', None) setattr(ctx, k, "") # reset ctx back to no filtering return s
def get_tokens(self): spacer = (Token.Spacer, " ") if context.get_context().keepass.is_hidden: is_verbose = (Token.Warn, "Hidden") else: is_verbose = (Token.Info, "Show") return [ (Token.Info, context.get_context().db_type), spacer, (Token.Toolbar, "Password"), spacer, is_verbose, spacer, (Token.Toolbar, "Status"), spacer, (Token.Info, context.get_context().current_group), ]
def renderGGPK(path: str = ""): """ Renders the ggpk tree """ ctx = context.get_context() try: ctx.ggpk.render(path) except AttributeError: cprint("No GGPK loaded!", "red")
def update_url(path, url=""): "Update OTP Url in the entry." ctx = context.get_context() if ctx.keepass.is_open(): entry = ctx.keepass.find_entry_by_path(path) if entry: if url: ctx.keepass.update_otp_url(entry, url) print("Token: {}, Progress: {}".format(entry.get_Token(), entry.get_Progress()))
def cat(path: str): "Show an entry" ctx = context.get_context() if ctx.keepass.is_open(): entry = ctx.keepass.find_entry_by_path(path) if entry: ctx.keepass.print_entry(entry) else: print("cannot access {}: No such file or directory".format(path))
def rmdir(group_name: str): "Delete a directory (group)" ctx = context.get_context() if ctx.keepass.is_open(): group = ctx.keepass.current_group.FindCreateGroup(group_name, False) if group: ctx.keepass.db.DeleteGroup(group) cprint("Removed {}.".format(group)) else: cprint("rmdir: failed to remove {}: No such group.".format(group))
def run_interactive(self, cmd, arg_str, raw): if len(arg_str) < 1: msg = "Cluster admin server address required" cprint(msg, "red") return -1 ctx = context.get_context() # Updating the context with the new socket for the admin server ctx._set_admin_server_socket_address(parse_socket_address(arg_str)) return self._run()
def rename_group(src, dst): ctx = context.get_context() try: if (ctx.keepass.groups[src]): ctx.keepass.groups[src].set_Name(dst) return ctx.keepass.groups[dst] else: return None except KeyError: return None
def save(): """ Save te database to disk """ ctx = context.get_context() db = ctx.keepass.db if ctx.keepass.is_open(): logger = KPCLibPyLogger() db.Save(logger)
def _test_context_filtering(cmd, filter): assert len(filter) == 1 ctx = context.get_context() k = next(iter(filter)) v = filter[k] setattr(ctx, k, v) s = _test_command(cmd, 'show', None) assert s == 0 setattr(ctx, k, "") # reset ctx back to no filtering return s
async def run_status(nodes, hostnames, extended, formatter, **kwargs): ctx = context.get_context() async with ctx.get_cluster_admin_client() as client: nodes_state = await get_nodes_state(client) if nodes_state is None: return host_tasks = [] for node_state in nodes_state.states: config = node_state.config use_data_address = (config.other_addresses is None or config.other_addresses.admin is None) address = (SocketAddress( address_family=SocketAddressFamily.INET, address=config.data_address.address, port=DEFAULT_THRIFT_PORT, ) if use_data_address else config.other_addresses.admin) host_tasks.append( get_host_info(ctx.get_node_admin_client, address=address)) hosts_info = await asyncio.gather(*host_tasks) additional_info_mapping = defaultdict(dict) additional_info = await get_additional_info() if additional_info: # Add the info to the defaultdict mapping. for address, values in additional_info.items(): if not values: continue additional_info_mapping[address].update(values) # Convert the additional info dict mapping to a list that is aligned # with both the node_state.states and hosts_info so they can be # easily zipped. additional_info = [] for node_state in nodes_state.states: config = node_state.config use_data_address = (config.other_addresses is None or config.other_addresses.admin is None) socket_address = (config.data_address if use_data_address else config.other_addresses.admin) mapping_key = (ip_address(socket_address.address) if socket_address.address_family == SocketAddressFamily.INET else socket_address.address) additional_info.append(additional_info_mapping[mapping_key]) merged_info = await merge_information( nodes_state=nodes_state, hosts_info=hosts_info, additional_info=additional_info, ) merged_info = filter_merged_information(merged_info, nodes, hostnames) await formatter(merged_info, **kwargs)