def load_settings(self): toml_path = os.path.join(click.get_app_dir("Pyborg"), "pyborg.toml") if os.path.exists(click.get_app_dir("Pyborg")) and not os.path.exists(toml_path): settings = {'pyborg-core': {"max_words": False}} toml.dump(settings, open(toml_path, "w")) d = toml.load(toml_path)['pyborg-core'] if d['max_words']: cfg = FakeCfg2(max_words=d['max_words']) else: cfg = FakeCfg2(max_words=50000) return cfg
def get(ctx, model_id): """ Retrieves a model from the repository. """ try: os.makedirs(click.get_app_dir(APP_NAME)) except OSError: pass repo.get_model(model_id, click.get_app_dir(APP_NAME), partial(spin, 'Retrieving model')) click.secho(u'\b\u2713', fg='green', nl=False) click.echo('\033[?25h\n', nl=False) ctx.exit(0)
def ocr(ctx, model, pad, hocr, lines, conv): """ Recognizes text in line images. """ # we do the locating and loading of the model here to spare us the overhead # in each worker. # first we try to find the model in the absolue path, then ~/.kraken, then # LEGACY_MODEL_DIR search = [model, os.path.join(click.get_app_dir(APP_NAME), model), os.path.join(LEGACY_MODEL_DIR, model)] # if automatic conversion is enabled we look for an converted model in # ~/.kraken if conv is True: search.insert(0, os.path.join(click.get_app_dir(APP_NAME), os.path.basename(os.path.splitext(model)[0]) + '.hdf5')) location = None for loc in search: if os.path.isfile(loc): location = loc break if not location: raise click.BadParameter('No model found') click.echo('Loading RNN\t', nl=False) try: rnn = models.load_any(location) except: click.secho(u'\u2717', fg='red') raise ctx.exit(1) click.secho(u'\u2713', fg='green') # convert input model to protobuf if conv and rnn.kind == 'pyrnn': name, _ = os.path.splitext(os.path.basename(model)) op = os.path.join(click.get_app_dir(APP_NAME), name + '.pronn') try: os.makedirs(click.get_app_dir(APP_NAME)) except OSError: pass models.pyrnn_to_pronn(rnn, op) # set output mode if hocr: ctx.meta['mode'] = 'hocr' else: ctx.meta['mode'] = 'text' return partial(recognizer, model=rnn, pad=pad, lines=lines)
def __write_settings_file(url, username, password, authMethod, json): data = {} data['url'] = url.replace("/" + url.split('/')[-1], "") data['project'] = url.split('/')[-1] data['authMethod'] = authMethod data['username'] = username data['password'] = password data['repo_id'] = {} for project in json['value']: data['repo_id'][project['name'].lower()] = project['id'] if not os.path.exists(click.get_app_dir("Codereview")): os.makedirs(click.get_app_dir("Codereview")) stream = open(Configuration.file_path, 'w') yaml.dump(data, stream)
def get_root(): """ Return the root object of the current database. """ db_path = click.get_app_dir('katalog', force_posix=True) connection = ZODB.connection(db_path + '/data.fs') return connection.root()
def read_config(): global NICKNAME, PASSWORD, API_KEY, MIRROR_BASE def sane_get(parser, key): try: return parser.get(section, key) except ConfigParser.NoSectionError: click.echo('Config section "%s" missing' % section) except ConfigParser.NoOptionError: click.echo('Config section "%s" missing key "%s"' % (section, key)) section = 'main' config = os.path.join(click.get_app_dir(APP_NAME), 'config.ini') try: with open(config) as f: parser = ConfigParser.RawConfigParser() parser.readfp(f) NICKNAME = sane_get(parser, 'nickname') PASSWORD = sane_get(parser, 'password') MIRROR_BASE = sane_get(parser, 'mirrorpath') API_KEY = sane_get(parser, 'apikey') except IOError: pass # It's okay to be missing a password return NICKNAME and MIRROR_BASE and API_KEY
def reset(): """Reset your Twitter auth information. """ app_dir = click.get_app_dir(APP_NAME) path = os.path.join(app_dir, 'config.ini') if os.path.exists(path): os.remove(path) click.echo('Configuration has been reset.')
def get_config_file(): cfg = os.environ.get("POC_CONFIG", None) if not cfg: cfg = os.path.join(click.get_app_dir(APP_NAME), "config") if os.path.isfile(cfg): return cfg return None
def load_from_config_dir(self): """ Load the config file from the application directory (e.g. in the users home folder) if it exists. """ conf = os.path.join(click.get_app_dir("temci"), "config.yaml") if os.path.exists(conf) and os.path.isfile(conf): self.load_file(conf)
def init(force, *args, **kwargs): e("Initializing your config file") path = click.get_app_dir(APP_NAME, force_posix=True) if os.path.exists(path): if force: try: shutil.rmtree(path) except: e( "There already is a config and the script cannot remove " "it. Please do so manually and rerun this command. You " "need to delete the directory %s" % path ) return else: e( 'There already is a config. Please use the "--force" option ' 'to force a new config setup' ) return os.mkdir(path) config = ConfigParser.RawConfigParser(allow_no_value=True) e( 'Please answer the following question to add your first ' 'environment/user combo' ) config = edit_environment(config, *args, **kwargs) save_config(config)
def __init__(self, fname=None): if fname is None: self.conf = py.path.local( click.get_app_dir('tin') ).join('config.json') else: self.conf = py.path.local(fname) # access self.key = None self.token = None # locations self.board = None self.inbox = None self.today = None self.this_week = None self.later = None self.waiting = None self.done = None # priorities self.important = None self.not_important = None self.urgent = None self.not_urgent = None # meta self._loaded = False
def read_config(config=None): if config is None: cfg = os.path.join(click.get_app_dir('gpd'), 'config.ini') else: cfg = os.path.abspath(config) if not os.path.exists(cfg): logging.critical("Config file not present. Checked: %s" % cfg) sys.exit(1) parser = configparser.RawConfigParser() parser.read([cfg]) rv = {} jgi_present = False username_present = False password_present = False for section in parser.sections(): if section.lower() == "jgi": jgi_present = True for key, value in parser.items(section): if key.lower() == "username": username_present = True elif key.lower() == "password": password_present = True rv['%s.%s' % (section.lower(), key.lower())] = value if jgi_present and username_present and password_present: return rv else: logging.critical("The configuration file (%s) is improperly formatted. See --help." % cfg) sys.exit(1)
def __init__(self, api_key=None, library_id=None, library_type='user'): """ Service class for communicating with the Zotero API. This is mainly a thin wrapper around :py:class:`pyzotero.zotero.Zotero` that handles things like transparent HTML<->[edit-formt] conversion. :param api_key: API key for the Zotero API, will be loaded from the configuration if not specified :param library_id: Zotero library ID the API key is valid for, will be loaded from the configuration if not specified :param library_type: Type of the library, can be 'user' or 'group' """ self._logger = logging.getLogger() idx_path = os.path.join(click.get_app_dir(APP_NAME), 'index.sqlite') self.config = load_config() self.note_format = self.config['zotcli.note_format'] self.storage_dir = self.config.get('zotcli.storage_dir') api_key = api_key or self.config.get('zotcli.api_key') library_id = library_id or self.config.get('zotcli.library_id') if not api_key or not library_id: raise ValueError( "Please set your API key and library ID by running " "`zotcli configure` or pass them as command-line options.") self._zot = Zotero(library_id=library_id, api_key=api_key, library_type=library_type) self._index = SearchIndex(idx_path) sync_interval = self.config.get('zotcli.sync_interval', 300) since_last_sync = int(time.time()) - self._index.last_modified if since_last_sync >= int(sync_interval): self._logger.info("{} seconds since last sync, synchronizing." .format(since_last_sync)) self.synchronize()
def main( rpc_provider: HTTPProvider, private_key: str, private_key_password_file: str, state_file: str, channel_manager_address: str, minimum_amount: int, gas_price: int, ): if minimum_amount <= 0: click.echo('Minimum amount need to be at least 1') sys.exit(1) private_key = utils.get_private_key(private_key, private_key_password_file) if private_key is None: sys.exit(1) receiver_address = utils.privkey_to_addr(private_key) web3 = Web3(HTTPProvider(rpc_provider, request_kwargs={'timeout': 60})) config.NETWORK_CFG.set_defaults(int(web3.version.network)) web3.eth.defaultAccount = receiver_address channel_manager_address = ( channel_manager_address or config.NETWORK_CFG.channel_manager_address ) channel_manager_contract = make_channel_manager_contract(web3, channel_manager_address) if not state_file: state_file_name = "%s_%s.db" % ( channel_manager_address[:10], receiver_address[:10] ) app_dir = click.get_app_dir('microraiden') if not os.path.exists(app_dir): click.echo('No state file or directory found!') sys.exit(1) state_file = os.path.join(app_dir, state_file_name) try: click.echo('Loading state file from {}'.format(state_file)) state = ChannelManagerState.load(state_file) except StateFileException: click.echo('Error reading state file') traceback.print_exc() sys.exit(1) if not is_same_address(state.receiver, receiver_address): click.echo('Private key does not match receiver address in state file') sys.exit(1) elif not is_same_address(state.contract_address, channel_manager_address): click.echo('Channel manager contract address mismatch') sys.exit(1) click.echo('Withdrawing all paid tokens with at least {} due from ' 'receiver {}'.format(minimum_amount, receiver_address)) withdraw_from_channels( private_key, state, channel_manager_contract, minimum_amount, gas_price * denoms.gwei if gas_price else None, )
def __init__(self): nox_dir = Path(click.get_app_dir('nox', force_posix=True)) if not nox_dir.exists(): nox_dir.mkdir() nixpkgs = nox_dir / 'nixpkgs' self.path = str(nixpkgs) if not nixpkgs.exists(): click.echo('==> Creating nixpkgs repo in {}'.format(nixpkgs)) self.git(['init', '--quiet', self.path], cwd=False) self.git('remote add origin https://github.com/NixOS/nixpkgs.git') self.git('config user.email [email protected]') self.git('config user.name nox') if (Path.cwd() / '.git').exists(): git_version = self.git('version', output=True).strip() if git_version >= 'git version 2': click.echo("==> We're in a git repo, trying to fetch it") self.git(['fetch', str(Path.cwd()), '--update-shallow', '--quiet']) else: click.echo("==> Old version of git detected ({}, maybe on travis)," " not trying to fetch from local, fetch 50 commits from master" " instead".format(git_version)) self.git('fetch origin master --depth 50')
def get_config_file() -> str: """ Get the path to Flintrock's default configuration file. """ config_dir = click.get_app_dir(app_name='Flintrock') config_file = os.path.join(config_dir, 'config.yaml') return config_file
def setup_readline(self): try: import readline except ImportError: self.has_readline = False else: # determine history file name if not self.command: histname = 'history' else: histname = 'history-' + self.command[0].replace(os.sep, '_') appdir = click.get_app_dir('repl') if not os.path.exists(appdir): os.makedirs(appdir) histfile = os.path.join(appdir, histname) # read old history, of present try: readline.read_history_file(histfile) except IOError: pass atexit.register(readline.write_history_file, histfile) readline.parse_and_bind('tab: complete') readline.set_completer_delims(' \t\n') readline.set_completer(fcomplete) self.has_readline = True # got readline support return self.has_readline
def save_brain(self): """ Save brain as 1.4.0 JSON-Unsigned format """ logger.info("Writing dictionary...") saves_version = u"1.4.0" folder = click.get_app_dir("Pyborg") logger.info("Saving pyborg brain to %s", self.brain_path) cnt = collections.Counter() for key, value in self.words.items(): cnt[type(key)] += 1 # cnt[type(value)] += 1 for i in value: cnt[type(i)] += 1 logger.debug("Types: %s", cnt) logger.debug("Words: %s", self.words) logger.debug("Lines: %s", self.lines) brain = {'version': saves_version, 'words': self.words, 'lines': self.lines} tmp_file = os.path.join(folder, "tmp", "current.pyborg.json") with open(tmp_file, 'w') as f: # this can fail half way... json.dump(brain, f) # if we didn't crash os.rename(tmp_file, self.brain_path) logger.debug("Successful writing of brain & renaming. Quitting.")
def options(f): """ Shared options, used by all bartender commands """ f = click.option('--config', envvar='VODKA_HOME', default=click.get_app_dir('vodka'), help="location of config file")(f) return f
def get_default_config_paths(): HOME = os.getenv('DICTO_HOME', click.get_app_dir(u'dicto', force_posix=True)) return [ os.path.join(os.getcwd(), u'.dicto.yaml'), # local config os.path.join(os.getcwd(), u'.dicto', u'config.yaml'), # project config os.path.join(HOME, u'config.yaml') # home config ]
def get_default_output_path(self): tree = self.get_tree() if isinstance(tree, unicode): tree = tree.encode('utf-8') hash = hashlib.md5(tree) return os.path.join(click.get_app_dir('Lektor'), 'build-cache', hash.hexdigest())
def latest_github_release(force_update=False, timeout=1.0, cache=None): """ Get GitHub data for latest shub release. If it was already requested today, return a cached version unless ``force_update`` is set to ``True``. """ REQ_URL = "https://api.github.com/repos/scrapinghub/shub/releases/latest" cache = cache or os.path.join(click.get_app_dir("scrapinghub"), "last_release.txt") today = datetime.date.today().toordinal() if not force_update and os.path.isfile(cache): with open(cache, "r") as f: try: release_data = json.load(f) except Exception: release_data = {} # Check for equality (and not smaller or equal) so we don't get thrown # off track if the clock was ever misconfigured and a future date was # saved if release_data.get("_shub_last_update", 0) == today: return release_data release_data = requests.get(REQ_URL, timeout=timeout).json() release_data["_shub_last_update"] = today try: shubdir = os.path.dirname(cache) try: os.makedirs(shubdir) except OSError: if not os.path.isdir(shubdir): raise with open(cache, "w") as f: json.dump(release_data, f) except Exception: pass return release_data
def get_package_cache_path(self): """The path where plugin packages are stored.""" h = hashlib.md5() h.update(self.id) h.update(sys.version) h.update(sys.prefix) return os.path.join(click.get_app_dir('Lektor'), 'package-cache', h.hexdigest())
def get_database_session(): directory = click.get_app_dir("mydeploy") os.makedirs(directory, exist_ok=True) cfg = os.path.join(directory, 'database.db') (engine, sessionmaker) = get_engine(cfg) Base.metadata.create_all(engine) click.echo(cfg) return sessionmaker()
def make_conf(self): appdir = get_app_dir(self.progname, force_posix=True) if not os.path.exists(appdir): os.makedirs(appdir, mode=0750) os.makedirs(pathjoin(appdir, 'templates.d'), mode=0750) shutil.copyfile( pathjoin(self.cfg_dirs[0], 'examples/settings.local'), pathjoin(appdir, 'settings'))
def read_config(): cfg = os.path.join(click.get_app_dir(APP_NAME), 'config.ini') parser = ConfigParser.RawConfigParser() parser.read([cfg]) rv = {} for section in parser.sections(): for key, value in parser.items(section): rv['%s.%s' % (section, key)] = value return rv
def get_application_dir(): """ Returns ------- Path Path representing the application config directory """ return Path(click.get_app_dir('AWS SAM', force_posix=True))
def get(ctx, model_id): """ Retrieves a model from the repository. """ from kraken import repo try: os.makedirs(click.get_app_dir(APP_NAME)) except OSError: pass message('Retrieving model ', nl=False) filename = repo.get_model(model_id, click.get_app_dir(APP_NAME), partial(message, '.', nl=False)) message('\b\u2713', fg='green', nl=False) message('\033[?25h') message('Model name: {}'.format(filename)) ctx.exit(0)
def start_proxy(receiver_privkey: str) -> PaywalledProxy: state_file_name = 'ticker_proxy.db' app_dir = click.get_app_dir('microraiden') if not os.path.exists(app_dir): os.makedirs(app_dir) app = make_paywalled_proxy(receiver_privkey, os.path.join(app_dir, state_file_name)) app.run() return app
def main( ctx, channel_manager_address, ssl_key, ssl_cert, gas_price, state_file, private_key, private_key_password_file, paywall_info, rpc_provider, ): private_key = utils.get_private_key(private_key, private_key_password_file) if private_key is None: sys.exit(1) receiver_address = privkey_to_addr(private_key) constants.paywall_html_dir = paywall_info while True: try: web3 = Web3(HTTPProvider(rpc_provider, request_kwargs={'timeout': 60})) NETWORK_CFG.set_defaults(int(web3.version.network)) channel_manager_address = to_checksum_address( channel_manager_address or NETWORK_CFG.CHANNEL_MANAGER_ADDRESS ) if gas_price is not None: NETWORK_CFG.gas_price = gas_price if not state_file: state_file_name = "%s_%s.db" % ( channel_manager_address[:10], receiver_address[:10] ) app_dir = click.get_app_dir('microraiden') if not os.path.exists(app_dir): os.makedirs(app_dir) state_file = os.path.join(app_dir, state_file_name) app = make_paywalled_proxy(private_key, state_file, contract_address=channel_manager_address, web3=web3) except StateFileLocked as ex: log.warning('Another uRaiden process is already running (%s)!' % str(ex)) except InsecureStateFile as ex: msg = ('The permission bits of the state file (%s) are set incorrectly (others can ' 'read or write) or you are not the owner. For reasons of security, ' 'startup is aborted.' % state_file) log.fatal(msg) raise except NetworkIdMismatch as ex: log.fatal(str(ex)) raise except requests.exceptions.ConnectionError as ex: log.warning("Ethereum node refused connection: %s" % str(ex)) else: break sleep(constants.SLEEP_RELOAD) ctx.obj = app
def cli(config_file=None): click.secho("Starting PyOTRS CLI") if not config_file: config_file = click.get_app_dir('PyOTRS', force_posix=True) load_config(config_file)
import codecs import os import click from layeredconfig import ( Defaults, Environment, DictSource, LayeredConfig, ) import ruamel.yaml DEFAULT_CONFIG_DIR = click.get_app_dir('farmer', force_posix=True) DEFAULTS = { 'api_url': 'https://my.vmfarms.com/api/v1/', } class RoundTripYAMLFile(DictSource): def __init__(self, yaml_filename=None, writable=True, **kwargs): """ Loads and optionally saves configuration files in YAML format using the ruamel.yaml RoundTripLoader, which preserves comments. Args: yamlfile (str): The name of a YAML file. Nested sections are turned into nested config objects. writable (bool): Whether changes to the LayeredConfig object that has this YAMLFile object amongst its sources should be saved in the YAML file. """
def __init__( self, privkey: str = None, key_path: str = None, key_password_path: str = None, datadir: str = click.get_app_dir('microraiden'), channel_manager_address: str = CHANNEL_MANAGER_ADDRESS, web3: Web3 = None, channel_manager_proxy: ChannelContractProxy = None, token_proxy: ContractProxy = None, contract_metadata: dict = CONTRACT_METADATA ) -> None: assert privkey or key_path assert not privkey or isinstance(privkey, str) # Plain copy initializations. self.privkey = privkey self.datadir = datadir self.channel_manager_address = channel_manager_address self.web3 = web3 self.channel_manager_proxy = channel_manager_proxy self.token_proxy = token_proxy # Load private key from file if none is specified on command line. if not privkey: self.privkey = get_private_key(key_path, key_password_path) assert self.privkey is not None os.makedirs(datadir, exist_ok=True) assert os.path.isdir(datadir) self.account = privkey_to_addr(self.privkey) self.channels = [] # type: List[Channel] # Create web3 context if none is provided, either by using the proxies' context or creating # a new one. if not web3: if channel_manager_proxy: self.web3 = channel_manager_proxy.web3 self.channel_manager_address = channel_manager_proxy.address elif token_proxy: self.web3 = token_proxy.web3 else: self.web3 = Web3(RPCProvider()) # Create missing contract proxies. if not channel_manager_proxy: channel_manager_abi = contract_metadata[CHANNEL_MANAGER_ABI_NAME]['abi'] self.channel_manager_proxy = ChannelContractProxy( self.web3, self.privkey, channel_manager_address, channel_manager_abi, GAS_PRICE, GAS_LIMIT ) token_address = self.channel_manager_proxy.contract.call().token() if not token_proxy: token_abi = contract_metadata[TOKEN_ABI_NAME]['abi'] self.token_proxy = ContractProxy( self.web3, self.privkey, token_address, token_abi, GAS_PRICE, GAS_LIMIT ) else: assert is_same_address(self.token_proxy.address, token_address) assert self.web3 assert self.channel_manager_proxy assert self.token_proxy assert self.channel_manager_proxy.web3 == self.web3 == self.token_proxy.web3 netid = self.web3.version.network self.balances_filename = 'balances_{}_{}.json'.format( NETWORK_NAMES.get(netid, netid), self.account[:10] ) self.filelock = filelock.FileLock(os.path.join(self.datadir, self.balances_filename)) self.filelock.acquire(timeout=0) self.load_channels() self.sync_channels()
import time import tzlocal import yaml from click import ClickException from contextlib import contextmanager from patroni.config import Config from patroni.dcs import get_dcs as _get_dcs from patroni.exceptions import PatroniException from patroni.postgresql import Postgresql from patroni.utils import is_valid_pg_version, patch_config from prettytable import PrettyTable from six.moves.urllib_parse import urlparse from six import text_type CONFIG_DIR_PATH = click.get_app_dir('patroni') CONFIG_FILE_PATH = os.path.join(CONFIG_DIR_PATH, 'patronictl.yaml') DCS_DEFAULTS = { 'zookeeper': { 'port': 2181, 'template': "zookeeper:\n hosts: ['{host}:{port}']" }, 'exhibitor': { 'port': 8181, 'template': "exhibitor:\n hosts: [{host}]\n port: {port}" }, 'consul': { 'port': 8500, 'template': "consul:\n host: '{host}:{port}'" }, 'etcd': {
device_id=self.device_id, device_model_id=self.device_model_id, )) if self.display: config.screen_out_config.screen_mode = PLAYING # Continue current conversation with later requests. self.is_new_conversation = False # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.AssistRequest(audio_in=data) device_config = os.path.join(click.get_app_dir('googlesamples_assistant'), 'device_config.json') lang = "en-GB" display = True verbose = False input_audio_file = None output_audio_file = None audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE grpc_deadline = DEFAULT_GRPC_DEADLINE once = False
def get_app_dir(*args): return os.path.join(click.get_app_dir('cget'), *args)
# TODO: Typically, a lot of this code would be split out into several modules, # but I'm not sure how that will work with Pyinstaller. # Try it out so this code is more maintainable. # TODO: Logging is confusing here. simplify it by moving it into it's own module. os.environ.update( {"LC_CTYPE": "en_US.UTF-8",} ) APP_NAME = "svo-print" AWS_CONFIG_SECTION = "AWS" CRON_CONFIG_SECTION = "CRON" CONFIGURED_PRINTERS_SECTION = "CONFIGURED_PRINTERS" EXECUTABLE_PATH = ensure_str(str(Path(__file__).absolute())) CONFIG_FILE = os.path.join(click.get_app_dir(APP_NAME), "config.json") LOG_FILE = ensure_str( str(Path(click.get_app_dir(APP_NAME), "log/{}.log".format(APP_NAME))) ) LOG_LEVEL_LOOKUP = { "error": logging.ERROR, "info": logging.INFO, "debug": logging.DEBUG, } CLI_WARN = "yellow" CLI_ERROR = "red" CLI_SUCCESS = "green" CLI_INFO = "blue"
self.deadline): assistant_helpers.log_assist_response_without_audio(resp) if resp.screen_out.data: html_response = resp.screen_out.data if resp.dialog_state_out.conversation_state: conversation_state = resp.dialog_state_out.conversation_state self.conversation_state = conversation_state if resp.dialog_state_out.supplemental_display_text: text_response = resp.dialog_state_out.supplemental_display_text if any(p in text_response.lower() for p in ['public ip', 'ip address', '::; 1']): text_response = 'I need permission to display that information' return text_response, html_response try: with open(os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'), 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: # Before cog is loaded so no bot.logger :( print('Failed to connect to Google Assistant. ') credentials = None grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, ASSISTANT_API_ENDPOINT) gassistant = GoogleAssistant('en-us', 'fire0682-444871677176709141', '287698408855044097', True, grpc_channel, DEFAULT_GRPC_DEADLINE)
@click.group(help="SoftLayer Command-line Client", epilog="""To use most commands your SoftLayer username and api_key need to be configured. The easiest way to do that is to use: 'slcli setup'""", cls=CommandLoader, context_settings={'help_option_names': ['-h', '--help'], 'auto_envvar_prefix': 'SLCLI'}) @click.option('--format', default=DEFAULT_FORMAT, show_default=True, help="Output format", type=click.Choice(VALID_FORMATS)) @click.option('--config', '-C', required=False, default=click.get_app_dir('softlayer', force_posix=True), show_default=True, help="Config file location", type=click.Path(resolve_path=True)) @click.option('--verbose', '-v', help="Sets the debug noise level, specify multiple times " "for more verbosity.", type=click.IntRange(0, 3, clamp=True), count=True) @click.option('--proxy', required=False, help="HTTP[S] proxy to be use to make API calls") @click.option('--really / --not-really', '-y', is_flag=True, required=False, help="Confirm all prompt actions")
) device = click.option( "-d", "--device", type=click.STRING, envvar="EZSP_DEVICE", required=True ) baudrate = click.option( "-b", "--baudrate", type=click.INT, envvar="EZSP_BAUDRATE", default=57600 ) database_file = click.option( "-D", "--database", type=click.Path(exists=True, dir_okay=False, writable=True), required=True, default=os.path.join(click.get_app_dir("bellows"), "app.db"), ) duration_ms = click.option( "-t", "--duration", "duration_ms", type=click.INT, metavar="MILLISECONDS", default=50, show_default=True, ) duration_s = click.option( "-t", "--duration",
import sys import time from pathlib import Path from threading import Thread from urllib.parse import urlparse import click from . import __version__ from .controllers import Cache, CastState, StateFileError, StateMode, get_chromecast, get_chromecasts, setup_cast from .error import CastError, CattUserError, CliError from .http_server import serve_file from .subs_info import SubsInfo from .util import echo_json, human_time, hunt_subtitles, is_ipaddress, warning CONFIG_DIR = Path(click.get_app_dir("catt")) CONFIG_PATH = Path(CONFIG_DIR, "catt.cfg") STATE_PATH = Path(CONFIG_DIR, "state.json") class CattTimeParamType(click.ParamType): def convert(self, value, param, ctx): try: tdesc = [int(x) for x in value.split(":")] tlen = len(tdesc) if (tlen > 1 and any(t > 59 for t in tdesc)) or tlen > 3: raise ValueError except ValueError: self.fail("{} is not a valid time description.".format(value)) tdesc.reverse()
class Config: config_dir = click.get_app_dir("twtxt") config_name = "config" def __init__(self, config_file, cfg): """Initializes new :class:`Config` object. :param config_file: full path to the loaded config file. :param cfg: a ConfigParser object, with config loaded. """ self.config_file = config_file self.cfg = cfg @classmethod def from_file(cls, file): """Try loading given config file.""" if not os.path.exists(file): raise ValueError("Config file not found.") cfg = configparser.ConfigParser() try: cfg.read(file) return cls(file, cfg) except configparser.Error: raise ValueError("Config file is invalid.") @classmethod def discover(cls): """Make a guess about the config file location an try loading it.""" file = os.path.join(Config.config_dir, Config.config_name) return cls.from_file(file) @classmethod def create_config(cls, nick, twtfile, add_news): """Creates a new config file at the default location.""" if not os.path.exists(Config.config_dir): os.makedirs(Config.config_dir) file = os.path.join(Config.config_dir, Config.config_name) cfg = configparser.ConfigParser() cfg.add_section("twtxt") cfg.set("twtxt", "nick", nick) cfg.set("twtxt", "twtfile", twtfile) cfg.add_section("following") if add_news: cfg.set("following", "twtxt", "https://buckket.org/twtxt_news.txt") conf = cls(file, cfg) conf.write_config() return conf def write_config(self): """Writes ConfigParser object to file.""" with open(self.config_file, "w") as config_file: self.cfg.write(config_file) @property def following(self): """Returns a list of all source objects.""" following = [] try: for (nick, url) in self.cfg.items("following"): source = Source(nick, url) following.append(source) except configparser.NoSectionError as e: logger.debug(e) return following @property def options(self): """Returns a dict of all config options.""" try: return dict(self.cfg.items("twtxt")) except configparser.NoSectionError as e: logger.debug(e) return {} @property def nick(self): return self.cfg.get("twtxt", "nick", fallback=os.environ.get("USER", "").lower()) @property def twtfile(self): return os.path.expanduser( self.cfg.get("twtxt", "twtfile", fallback="twtxt.txt")) @property def twturl(self): return self.cfg.get("twtxt", "twturl", fallback=None) @property def check_following(self): return self.cfg.getboolean("twtxt", "check_following", fallback=True) @property def use_pager(self): return self.cfg.getboolean("twtxt", "use_pager", fallback=False) @property def use_cache(self): return self.cfg.getboolean("twtxt", "use_cache", fallback=True) @property def porcelain(self): return self.cfg.getboolean("twtxt", "porcelain", fallback=False) @property def disclose_identity(self): return self.cfg.getboolean("twtxt", "disclose_identity", fallback=False) @property def limit_timeline(self): return self.cfg.getint("twtxt", "limit_timeline", fallback=20) @property def timeout(self): return self.cfg.getfloat("twtxt", "timeout", fallback=5.0) @property def sorting(self): return self.cfg.get("twtxt", "sorting", fallback="descending") @property def source(self): return Source(self.nick, self.twturl) @property def pre_tweet_hook(self): return self.cfg.get("twtxt", "pre_tweet_hook", fallback=None) @property def post_tweet_hook(self): return self.cfg.get("twtxt", "post_tweet_hook", fallback=None) def add_source(self, source): """Adds a new source to the config’s following section.""" if not self.cfg.has_section("following"): self.cfg.add_section("following") self.cfg.set("following", source.nick, source.url) self.write_config() def get_source_by_nick(self, nick): """Returns the source of the given nick.""" url = self.cfg.get("following", nick, fallback=None) return Source(nick, url) if url else None def remove_source_by_nick(self, nick): """Removes a source form the config’s following section.""" if not self.cfg.has_section("following"): return False ret_val = self.cfg.remove_option("following", nick) self.write_config() return ret_val def build_default_map(self): """Maps the set options to the default values used by click.""" default_map = { "following": { "check": self.check_following, "timeout": self.timeout, "porcelain": self.porcelain, }, "tweet": { "twtfile": self.twtfile, }, "timeline": { "pager": self.use_pager, "cache": self.use_cache, "limit": self.limit_timeline, "timeout": self.timeout, "sorting": self.sorting, "porcelain": self.porcelain, "twtfile": self.twtfile, }, "view": { "pager": self.use_pager, "cache": self.use_cache, "limit": self.limit_timeline, "timeout": self.timeout, "sorting": self.sorting, "porcelain": self.porcelain, } } return default_map
self.conversation_state = conversation_state if resp.dialog_state_out.supplemental_display_text: text_response = resp.dialog_state_out.supplemental_display_text return text_response, html_response @click.command() @click.option('--api-endpoint', default=ASSISTANT_API_ENDPOINT, metavar='<api endpoint>', show_default=True, help='Address of Google Assistant API service.') @click.option('--credentials', metavar='<credentials>', show_default=True, default=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'), help='Path to read OAuth2 credentials.') @click.option('--device-model-id', metavar='<device model id>', required=True, help=(('Unique device model identifier, ' 'if not specifed, it is read from --device-config'))) @click.option('--device-id', metavar='<device id>', required=False, help=(('Unique registered device instance identifier, ' 'if not specified, it is read from --device-config, ' 'if no device_config found: a new device is registered ' 'using a unique id and a new device config is saved'))) @click.option('--lang',
# WARNING: do not import unnecessary things here to keep cli startup time under # control import click from click.core import Context from swh.auth.cli import auth as auth_cli from swh.auth.cli import generate_token as auth_generate_token from swh.auth.cli import revoke_token as auth_revoke_token from swh.core.cli import swh as swh_cli_group CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # TODO (T1410): All generic config code should reside in swh.core.config DEFAULT_CONFIG_PATH = os.environ.get( "SWH_CONFIG_FILE", os.path.join(click.get_app_dir("swh"), "global.yml")) DEFAULT_CONFIG: Dict[str, Any] = { "api_url": "https://archive.softwareheritage.org/api/1", "bearer_token": None, } @swh_cli_group.group(name="web", context_settings=CONTEXT_SETTINGS) @click.option( "-C", "--config-file", default=None, type=click.Path(exists=True, dir_okay=False, path_type=str), help=f"Configuration file (default: {DEFAULT_CONFIG_PATH})", )
import click import pluggy import muacrypt from .cmdline_utils import ( get_account, get_account_manager, MyGroup, MyCommandUnknownOptions, out_red, log_info, mycommand, ) from .account import AccountManager, AccountNotFound, effective_date, parse_date_to_float from .bingpg import find_executable from . import mime, hookspec from .bot import bot_reply @click.command(cls=MyGroup, context_settings=dict(help_option_names=["-h", "--help"])) @click.option("--basedir", type=click.Path(), default=click.get_app_dir("muacrypt"), envvar="MUACRYPT_BASEDIR", help="directory where muacrypt state is stored") @click.version_option() @click.pass_context def muacrypt_main(context, basedir): """access and manage Autocrypt keys, options, headers.""" basedir = os.path.abspath(os.path.expanduser(basedir)) context.account_manager = AccountManager(basedir, _pluginmanager) context.plugin_manager = _pluginmanager @mycommand("destroy-all") @click.option("--yes", default=False, is_flag=True, help="needs to be specified to actually destroy") @click.pass_context
import click import os import errno import json from anime_downloader import util APP_NAME = 'anime downloader' APP_DIR = click.get_app_dir(APP_NAME) DEFAULT_CONFIG = { 'dl': { 'url': False, 'player': None, 'skip_download': False, 'download_dir': '.', 'quality': '1080p', 'chunk_size': '10', 'fallback_qualities': ['720p', '480p', '360p'], 'force_download': False, 'file_format': '{anime_title}/{anime_title}_{ep_no}', 'provider': 'twist.moe', 'external_downloader': '', 'aria2c_for_torrents': False, 'selescrape_browser': None, 'selescrape_browser_executable_path': None, 'selescrape_driver_binary_path': None, 'speed_limit': 0, }, 'ezdl': { 'file_format': '{animeinfo_anime_title}/{animeinfo_anime_title}_{provider}_{ep_no}', 'provider': 'twist.moe',
def _get_global_config_dir(): """Return user's config directory.""" return click.get_app_dir(APP_NAME, force_posix=True)
) @click.option( '--matrix-username', default=None, required=True, help='Matrix username', ) @click.option( '--matrix-password', default=None, required=True, help='Matrix password', ) @click.option( '--state-db', default=os.path.join(click.get_app_dir('raiden-monitoring-service'), 'state.db'), type=str, help='state DB to save received balance proofs to', ) @click.option( '--log-level', default='INFO', type=click.Choice(['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']), help='Print log messages of this level and more important ones', ) @click.option( '--log-config', type=click.File('r'), help='Use the given JSON file for logging configuration', ) def main(
def main(api_endpoint=ASSISTANT_API_ENDPOINT, credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'), project_id=None, device_model_id=None, device_id=None, device_config=os.path.join( click.get_app_dir('googlesamples_assistant'), 'device_config.json'), lang="en_GB", display=True, verbose=False, input_audio_file=None, output_audio_file=None, audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE, grpc_deadline=DEFAULT_GRPC_DEADLINE, once=False, *args, **kwargs): # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) #|=============================================| #| | #| Handle commands for Google Assistant Stuff | #| | #|=============================================| device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.SetVolume') def changeVolume(volumeLevel, isPercentage): if (isPercentage): os.system( 'pactl set-sink-volume "alsa_output.usb-Generic_USB2.0_Device_20130100ph0-00.analog-stereo" ' + str(volumeLevel) + '%') with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: input("PRESS ENTER TO SPEAK") continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def assist(self): """Send a voice request to the Assistant and playback the response. Returns: True if conversation should continue. """ continue_conversation = False device_actions_futures = [] # Configure audio source and sink. audio_device = None audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=16000, sample_width=2, block_size=6400, flush_size=25600 ) ) audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=16000, sample_width=2, block_size=6400, flush_size=25600 ) ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=3200, sample_width=2, ) self.conversation_stream = conversation_stream # Get the device id and device model id device_config = os.path.join( click.get_app_dir('googlesamples-assistant'), 'device_config.json') try: with open(device_config) as f: self.device = json.load(f) self.device_id = self.device['id'] self.device_model_id = self.device['model_id'] logging.info("Using device model %s and device id %s", self.device_model_id, self.device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.warning('Please re run the google assistant configuration and register the device using the samples provided') self.device_handler = device_helpers.DeviceRequestHandler(self.device_id) self.conversation_stream.start_recording() self.logger.info('Recording audio request.') def iter_log_assist_requests(): for c in self.gen_assist_requests(): assistant_helpers.log_assist_request_without_audio(c) yield c logging.debug('Reached end of AssistRequest iteration.') # This generator yields AssistResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Assist(iter_log_assist_requests(), self.deadline): assistant_helpers.log_assist_response_without_audio(resp) if resp.event_type == END_OF_UTTERANCE: self.logger.info('End of audio request detected') logging.info('Stopping recording.') self.conversation_stream.stop_recording() if resp.speech_results: self.logger.info('Transcript of user request: "%s".', ' '.join(r.transcript for r in resp.speech_results)) if resp.speech_results[0].stability==1.0: spoken_text = resp.speech_results[0].transcript playstandard, self.quiet = self.lab.work(spoken_text, self.quiet, self.conversation_stream) #playstandard = True if playstandard: self.logger.info('Playing assistant response.') if len(resp.audio_out.audio_data) > 0 and playstandard and not self.quiet: if not self.conversation_stream.playing: self.conversation_stream.stop_recording() self.conversation_stream.start_playback() self.conversation_stream.write(resp.audio_out.audio_data) if resp.dialog_state_out.conversation_state: conversation_state = resp.dialog_state_out.conversation_state logging.debug('Updating conversation state.') self.conversation_state = conversation_state if resp.dialog_state_out.volume_percentage != 0: volume_percentage = resp.dialog_state_out.volume_percentage logging.info('Setting volume to %s%%', volume_percentage) self.conversation_stream.volume_percentage = volume_percentage if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True logging.info('Expecting follow-on query from user.') elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE: continue_conversation = False if resp.device_action.device_request_json: device_request = json.loads( resp.device_action.device_request_json ) fs = self.device_handler(device_request) if fs: device_actions_futures.extend(fs) self.logger.info('Finished response.') self.conversation_stream.stop_playback() try: self.conversation_stream.close() except: self.logger.error("Cant close conversation stream") return continue_conversation
yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.AssistRequest(audio_in=data) @click.command() @click.option('--api-endpoint', default=ASSISTANT_API_ENDPOINT, metavar='<api endpoint>', show_default=True, help='Address of Google Assistant API service.') @click.option('--credentials', metavar='<credentials>', show_default=True, default=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'), help='Path to read OAuth2 credentials.') @click.option('--project-id', metavar='<project id>', help=('Google Developer Project ID used for registration ' 'if --device-id is not specified')) @click.option('--device-model-id', metavar='<device model id>', help=(('Unique device model identifier, ' 'if not specifed, it is read from --device-config'))) @click.option('--device-id', metavar='<device id>', help=(('Unique registered device instance identifier, ' 'if not specified, it is read from --device-config, ' 'if no device_config found: a new device is registered '
for data in self.conversation_stream: # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.ConverseRequest(audio_in=data) @click.command() @click.option('--api-endpoint', default=ASSISTANT_API_ENDPOINT, metavar='<api endpoint>', show_default=True, help='Address of Google Assistant API service.') @click.option('--credentials', metavar='<credentials>', show_default=True, default=os.path.join( click.get_app_dir(common_settings.ASSISTANT_APP_NAME), common_settings.ASSISTANT_CREDENTIALS_FILENAME), help='Path to read OAuth2 credentials.') @click.option('--verbose', '-v', is_flag=True, default=False, help='Verbose logging.') @click.option('--input-audio-file', '-i', metavar='<input file>', help='Path to input audio file. ' 'If missing, uses audio capture') @click.option('--output-audio-file', '-o', metavar='<output file>',
global continue_conversation continue_conversation = assistant.assist() print('done================') return 0 signal.signal(signal.SIGINT, signal_handler) detector = snowboydecoder.HotwordDetector('saaedy.pmdl', sensitivity=0.6) detector.start(detected_callback=det_call, interrupt_check=interrupt_callback, sleep_time=0.1) api_endpoint = 'embeddedassistant.googleapis.com' credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json') project_id = 'handassist-581d5' device_model_id = 'handassist-581d5-handassist-jkbpdx' device_config = os.path.join(click.get_app_dir('googlesamples-assistant'), 'device_config.json') lang = 'en-US' display = False verbose = False audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE grpc_deadline = 60 * 3 + 5
def _get_config_path(): return os.path.join(click.get_app_dir(APP_NAME), 'config.ini')
import webbrowser from contextlib import suppress from pathlib import Path from typing import Optional import click import dacite import yaml from furl import furl from datapane import _IN_PYTEST, log from .utils import InvalidTokenError APP_NAME = "datapane" APP_DIR = Path(click.get_app_dir(APP_NAME)) APP_DIR.mkdir(parents=True, exist_ok=True) DEFAULT_ENV = "default" DEFAULT_SERVER = "https://datapane.com" DEFAULT_TOKEN = "TOKEN_HERE" # TODO - wrap into a singleton object that includes callable? @dc.dataclass class Config: """Global config read from config file""" server: str = DEFAULT_SERVER token: str = DEFAULT_TOKEN username: str = "" session_id: str = dc.field(default_factory=lambda: uuid.uuid4().hex)
import platform import multiprocessing if platform.system().lower() != 'windows': os.environ["NOWAL"] = "1" else: # while click.echo works when run under normal conditions, # it has started failing when packaged with PyInstaller. The # implementation of click's _winterm module seems to replicate # a lot of logic found in win_unicode_console.streams, but using # win_unicode_console seems to fix the problem, (found after tracing # why importing ipdb which imported IPython which called this fixed # the problem) import win_unicode_console # win_unicode_console.enable() app_dir = click.get_app_dir("glycresoft") _mpl_cache_dir = os.path.join(app_dir, 'mpl') if not os.path.exists(_mpl_cache_dir): os.makedirs(_mpl_cache_dir) os.environ["MPLCONFIGDIR"] = _mpl_cache_dir try: matplotlib.use("agg") except Exception: pass from rdflib.plugins import stores, memory from rdflib.plugins.stores import sparqlstore
from kubernaut.clustergroups.cmd import clustergroups from kubernaut.config.model import Config from pathlib import Path from typing import Optional @click.group() @click.option("--kubernaut-backend", help="Set an alternate API backend", default=None, envvar="KUBERNAUT_BACKEND", type=str) @click.option("--kubernaut-config", help="Set an alternate config file", default=os.path.join( click.get_app_dir("kubernaut", roaming=True), "config"), envvar="KUBERNAUT_CONFIG", type=click.Path()) @click.pass_context @click.version_option(version=__version__, prog_name="kubernaut") def cli(ctx: Context, kubernaut_backend: Optional[str], kubernaut_config: str): config = Config.load(Path(kubernaut_config)) app_ctx = KubernautContext(config) if kubernaut_backend: config.current_backend = kubernaut_backend ctx.obj = app_ctx cli.add_command(config_cmd)
DEFAULT_WINDOW_WIDTH = 1400 DEFAULT_WINDOW_HEIGHT = 800 DEFAULT_WINDOW_LEFT = 0 DEFAULT_WINDOW_TOP = 0 SESSION_TOKEN = str(uuid4()) ALIAS_FOLDER_NAMES = ['inbox', 'sent', 'archive', 'drafts', 'trash', 'spam'] # App directory/filenames # # "App" directory for this user - settings/logs/cache go here APP_DIR = environ.get('KANMAIL_APP_DIR', get_app_dir(APP_NAME)) # Cache directory CACHE_DIR = path.join(APP_DIR, 'cache') ICON_CACHE_DIR = path.join(CACHE_DIR, 'icons') CONTACTS_CACHE_DB_FILE = path.join(CACHE_DIR, 'contacts.db') FOLDER_CACHE_DB_FILE = path.join(CACHE_DIR, 'folders.db') # Window settings/position cache filename WINDOW_CACHE_FILE = path.join(CACHE_DIR, 'window_position.json') # Settings JSON filename SETTINGS_FILE = path.join(APP_DIR, 'settings.json') # License JSON filename
"contexts": pyborg.settings.num_contexts, "lines": len(pyborg.lines) } @bottle.route("/commands.json") def commands_json(pyborg): return pyborg.commanddict @bottle.route("/logging-level", method="POST") def set_log_level(): # when we drop 2 support this can use strings instead of the enums levels = { "DEBUG": logging.DEBUG, "INFO": logging.INFO, "WARNING": logging.WARNING, "ERROR": logging.ERROR, "CRITICAL": logging.CRITICAL } target = levels[request.POST.get("level").upper()] logger.setLevel(target) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) folder = click.get_app_dir("Pyborg") brain_path = os.path.join(folder, "brains", "current.pyborg.json") bottle.install(BottledPyborg(brain_path=brain_path)) bottle.run(host="localhost", port=2001, reloader=True)
"""Minimal todo list manager for procrastinators""" import click # for the command line interfacing import arrow # for date handling (due/deadline) import json # for parsing tasks.json file; contains all the tasks. import os # for handling file manipulations. (saving and deleting tasks) import shutil # for copying the tasks.json file into appconfig directory. import math # for ceiling user task lengths from collections import deque # for manipulations on tasks.json file_dir = click.get_app_dir('proactive') file_path = os.path.join(file_dir, 'tasks.json') first_time = True tuts = None class TaskSet: """The list of tasks at hand""" # TODO: --hard-reset option to generate a new tasks.json def __init__(self): self.file = None # tasks.json file opened for parsing and dumping self.tasks = [] # Sorted list of tasks self.id_list = [] # list of available task ids to act on self.current_task = None # current selection of task marked for acting upon self.change_settings = False # Flag to hint settings change self.tutorial = None # A quickstart parsed from tasks.json self.settings = None # Settings for the app such as defaults parsed from tasks.json if not os.path.exists(file_path): click.secho("tasks.json file doesn't exist at %s, Creating..." % file_path, dim=True)