def read_configfile(): global cfg, DEBUG,DOVECOT,GPGMAILENCRYPT,MAILDIRLOCK cfg=dict() _cfg = RawConfigParser() try: _cfg.read(CONFIGFILE) except: log("Could not read config file '%s'."%CONFIGFILE,"e",ln=lineno()) return for sect in _cfg.sections(): cfg[sect] = dict() for (name, value) in _cfg.items(sect): cfg[sect][name] = value if 'default' in cfg: if 'gpgmailencrypt' in cfg['default']: GPGMAILENCRYPT=cfg['default']['gpgmailencrypt'] if 'mail' in cfg: if 'dovecot' in cfg['mail'] and cfg['mail']['dovecot']=="yes": DOVECOT=True if 'maildirlock' in cfg['mail']: MAILDIRLOCK=cfg['mail']['maildirlock']
def read_merged(self, filenames, encoding=None): cfg = [] for filename in filenames: _cfg = RawConfigParser() _cfg.read(filename) cfg.append(_cfg) for _cfg in cfg: for section in _cfg.sections(): if not self.has_section(section): self.add_section(section) for option in _cfg.options(section): value = _cfg.get(section, option) if ";" in value: current = self.getdefault(section, option, "") if ";" in current: val = [] for v in value.split(";"): if v and v not in val: val.append(v) for v in self.getlist(section, option): if v and v not in val: val.append(v) self.set(section, option, ";".join(val) + ";") continue self.set(section, option, value)
def init(app, config_file): global metadata config = ConfigParser() config.read(config_file) try: filename = config.get('bibtex', 'path') except ConfigParserError: model_path = config.get('main', 'path') filename = os.path.join(model_path, 'library.bib') print("Loading Bibtex metadata from", filename) bib = parse_file(filename) metadata = dict() for entry in bib.entries: key = '/' + bib.entries[entry].fields.get('file', '').replace(':pdf', '')[1:] if 'C$\\backslash$:' in key: key = key.replace('C$\\backslash$:', '') key = key[1:] key = os.path.normpath(key) key = os.path.basename(key) try: citation = pybtex.format_from_file( filename, style='plain', output_backend='text', citations=[entry])[3:] metadata[key] = citation except PybtexError: metadata[key] = filename
def __init__(self, config_path=None): if config_path is None: config_path = 'regexbot.ini' config = RawConfigParser() config.read_dict(DEFAULT_CONFIG) config.read(config_path) self.rtm_token = config.get('regexbot', 'rtm_token') self.channel_flood_cooldown = timedelta(seconds=config.getint('regexbot', 'channel_flood_cooldown')) self.global_flood_cooldown = timedelta(seconds=config.getint('regexbot', 'global_flood_cooldown')) self.max_messages = config.getint('regexbot', 'max_messages') self.max_message_size = config.getint('regexbot', 'max_message_size') self.version = str(config.get('regexbot', 'version')) + '; %s' try: self.version = self.version % Popen(["git","branch","-v","--contains"], stdout=PIPE).communicate()[0].strip() except: self.version = self.version % 'unknown' self._last_message_times = {} self._last_message = datetime.utcnow() self._message_buffer = {} self.ignore_list = [] if config.has_section('ignore'): for k,v in config.items('ignore'): try: self.ignore_list.append(regex.compile(str(v), regex.I)) except Exception, ex: print "Error compiling regular expression in ignore list (%s):" % k print " %s" % v print ex exit(1)
def _set_repository_id(self, path, id): config = RawConfigParser() config.read(os.path.join(path, 'config')) config.set('repository', 'id', hexlify(id).decode('ascii')) with open(os.path.join(path, 'config'), 'w') as fd: config.write(fd) return Repository(self.repository_path).id
def read_config(self, config): result = [] stack = [config] while 1: config = stack.pop() src = None if isinstance(config, (str, unicode)): src = os.path.relpath(config) _config = RawConfigParser() _config.optionxform = lambda s: s if getattr(config, 'read', None) is not None: _config.readfp(config) path = self.path else: if not os.path.exists(config): log.error("Config file '%s' doesn't exist.", config) sys.exit(1) _config.read(config) path = os.path.dirname(config) for section in reversed(_config.sections()): for key, value in reversed(_config.items(section)): result.append((src, path, section, key, value)) result.append((src, path, section, None, None)) if _config.has_option('global', 'extends'): extends = _config.get('global', 'extends').split() elif _config.has_option('global:global', 'extends'): extends = _config.get('global:global', 'extends').split() else: break stack[0:0] = [ os.path.abspath(os.path.join(path, x)) for x in reversed(extends)] return reversed(result)
def test(): config = CParser() config.read('./etc/observant/observant.cfg') pid = os.fork() if pid != 0: os.waitpid(pid, 0) else: dump = './.test.out' sockPath = './.test.sock' server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server.setblocking(0) with open(dump, 'w') as fd: fd.write("test") server.bind(sockPath) server.listen(1) transport = daemon.transportFromConfig(config) lock = str(config.get('daemon', 'lock')) sock = str(config.get('daemon', 'sock')) key = str(config.get('daemon', 'key')) client = StatsCore.attachOrCreateStatsDaemon(key, transport, pid=lock, sock=sock) client.postWatchPid('test', os.getpid()) time.sleep(4) client.postLogMessageForKey('test', 'some random logmessage') time.sleep(4) client.postLogMessageForKey('test', 'some random logmessage') client.close() time.sleep(4) os.remove(dump) server.close() os.unlink(sockPath)
def add_handler(filename, event, handler): config = RawConfigParser() config.read(filename) config['Handlers'][event] = handler with open(filename, 'w') as f: config.write(f)
def populate_config(): config_path = os.path.expanduser('~/.pseudomyth') if os.path.exists(config_path): parser = RawConfigParser(dict_type=dict) parser.read(config_path) CONFIG.update(parser.defaults())
def _iter_config_parsers(self, pattern): for path in self.config_directories: count = 0 file_list = glob.glob(os.path.join(path, pattern)) file_list.sort() for config_file in file_list: count += 1 parser = RawConfigParser() try: # noinspection PyTypeChecker open(config_file, "rb").read(1) parser.read([config_file]) except IOError as e: if e.errno == errno.EACCES: username = pwd.getpwuid(os.getuid())[0] self.print_info( "%s is ignored because user %s cannot read it" % (config_file, username) ) continue raise except ConfigError: raise ValueError( "File '%s' is not a valid '.ini' file" % config_file ) self.print_info("File %s added to the configuration" % config_file) yield config_file, parser if count == 0: self.print_info("No %s file found in %s" % (pattern, path))
def get_sections(self): """ Returns a list of sections in the ini file """ config = RawConfigParser() config.read(self.file_name) return config.sections()
def readcfg(filename): """ Read .cfg file :param str filename: input filename :raise FileSystemError :return: configparser.RawConfigParser """ expression = '^([\/a-z].*?opendoor.*?)\/' find_dir = re.search(expression, __file__, re.IGNORECASE) if None is not find_dir: os.chdir(find_dir.group()) filepath = os.path.join(os.path.sep, os.getcwd(), filename) if not os.path.isfile(filepath): raise FileSystemError("{0} is not a file ".format(filepath)) if not os.access(filepath, os.R_OK): raise FileSystemError("Configuration file {0} can not be read. Setup chmod 0644".format(filepath)) try: config = RawConfigParser() config.read(filepath) return config except (ParsingError, NoOptionError) as error: raise FileSystemError(error)
def _parse_legacy_config_file(self): """ Parse a legacy configuration file. """ conf = RawConfigParser() conf.read(LEGACY_CONFIG_FILE) styles = self.styles.copy() if conf.has_option('params', 'dm_template'): styles['dm_template'] = conf.get('params', 'dm_template') if conf.has_option('params', 'header_template'): styles['header_template'] = conf.get('params', 'header_template') self.styles.update(styles) if conf.has_option('params', 'logging_level'): self.logging_level = conf.getint('params', 'logging_level') for binding in self.key_bindings: if conf.has_option('keys', binding): custom_key = conf.get('keys', binding) self._set_key_binding(binding, custom_key) palette_labels = [color[0] for color in PALETTE] for label in palette_labels: if conf.has_option('colors', label): custom_fg = conf.get('colors', label) self._set_color(label, custom_fg)
def run(): # print a warning about copyright print("WARNING: Do not use the script to produce public podcasts, it is for personal use only.") print("If you publically serve programmes you may be in violation of the BBC's copyright.") # load the config file global master_config master_config = RawConfigParser() master_config.read(config_directory + MASTER_CONFIG_FILENAME) # set the get-iplayer path global get_iplayer_path if master_config.has_option("General", "get-iplayer_path"): get_iplayer_path = master_config.get("General", "get-iplayer_path") # refresh the get-iplayer cache print("Refreshing get-iplayer... (this may take some time)") subprocess.check_output([get_iplayer_path, "--type=all", "--quiet"]) # scan for feed config files and process each for root, directories, files in os.walk(config_directory + FEED_CONFIG_DIRECTORY): for filename in files: load_feed(filename) print("Finished.") return # stop here, we have processed the feeds # if we have not returned at this point, then no config directory was found, this is a problem print("No config directory found")
def _set_repository_id(self, path, id): config = RawConfigParser() config.read(os.path.join(path, "config")) config.set("repository", "id", hexlify(id).decode("ascii")) with open(os.path.join(path, "config"), "w") as fd: config.write(fd) return Repository(self.repository_path).id
def run(self): try: config_file = DEFAULT_CONF_FILE if not os.path.isfile(config_file): raise ValueError('Configuration file not found: {0}'.format(config_file)) config = RawConfigParser() config.read(config_file) if 'security' in config.sections(): return 'The configuration file is already migrated to the new version' config.add_section('security') config.add_section('database') for item in config.items('paths'): if item[0] == 'database_file': config.set('database', item[0], item[1]) else: config.set('security', item[0], item[1]) config.remove_section('paths') config.set('security', 'crl_file_url', 'None') config.set('logging', 'log_level', 'INFO') with open(config_file, 'w') as file: config.write(file) except Exception as exc: return exc return 'Configuration file migrated'
def _start_observing(self): # ``StatsCore`` is the original observant daemon. It seems to have been # replaced more recently by ``vigilant.daemon.Daemon`` # TODO: update for vigilant Daemon # Get vigilant to read its own config config = CParser() config.read(os.path.join(etc_location, 'vigilant.cfg')) # Extract the information we need from the config object lock = str(config.get('daemon', 'lock')) sock = str(config.get('daemon', 'sock')) transport_type = str(config.get('transport', 'type')) host = str(config.get('transport', 'host')) port = int(config.get('transport', 'port')) transport_means = UDPStatsTransport if transport_type == 'udp' else TCPStatsTransport transport = transport_means(host=host, port=port) # Start the daemon self.client = StatsCore.attachOrCreateStatsDaemon(transport, pid=lock, sock=sock) # Tell the daemon who we are self.client.postWatchPid('go-smart-launcher', os.getpid()) # Give it a second to avoid confusion by posting before registered # TODO: tidy this up! time.sleep(1)
def load_rcfile(self): if os.path.exists(self.RCFILE): config = RawConfigParser() config.optionxform = lambda x: x.upper() config.read(self.RCFILE) items = [] if config.has_section('global'): items.extend(config.items('global')) if self.NAME is not None and config.has_section(self.NAME): items.extend(config.items(self.NAME)) for k,v in items: if k in CONFIG_TYPES and getattr(self,k) == DEFAULT_SETTINGS[k]: if CONFIG_TYPES[k] == 'str': setattr(self, k, v) elif CONFIG_TYPES[k] == 'int': setattr(self, k, int(v)) elif CONFIG_TYPES[k] == 'float': setattr(self, k, float(v)) elif CONFIG_TYPES[k] == 'list': setattr(self, k, [i.strip() for i in v.split(",")]) elif CONFIG_TYPES[k] == 'bool': if v.lower() in ('1', 'yes', 'true', 'on'): setattr(self, k, True) elif v.lower() in ('0', 'no', 'false', 'off'): setattr(self, k, False) else: raise ValueError("Not a boolean: %s" % v)
def read_config_file(filename): """ Reads a configuration file to modify the global settings. :param filename: cfg file pathname, read through os.path.normpath """ global LOG_FORMAT, LOG_FILENAME, STATE_REGEX, ZIP_REGEX # Config parser object, load settings for global variables config = RawConfigParser() config.read(path.normpath(filename)) # Sections should be "log" and "validators" for section in config.sections(): # Options for log: format, output # Options for validators: zip_code, state for option in config.options(section): if section == "log" and option == "format": LOG_FORMAT = config.get(section, option) elif section == "log" and option == "output": LOG_FILENAME = config.get(section, option) elif section == "validators" and option == "state": STATE_REGEX = compile(config.get(section, option)) elif section == "validators" and option == "zip_code": ZIP_REGEX = compile(config.get(section, option))
def populate_config_dict(config_path): """Load the configuration file into the config_file dictionary A ConfigParser-style configuration file can have multiple sections, but we ignore the section distinction and load the key/value pairs from all sections into a single key/value list. """ try: config_dict = {} parser = RawConfigParser() parser.optionxform = lambda x: x parser.read(config_path) sections = parser.sections() for section in sections: options = parser.options(section) for option in options: config_dict[option] = str(parser.get(section, option)) except Exception as e: logger.warning("Could not load configuration file due to exception. " "Only environment variable equivalents will be used.") return None for key in config_dict.keys(): if config_dict[key] == '': config_dict[key] = None elif isinstance(config_dict[key], str): config_dict[key] = os.path.expanduser(config_dict[key]) return config_dict
def parse_and_append(self, filename): try: parser = RawConfigParser() parser.read([filename]) if not parser.has_section(sect): return app_categories = parser.get(sect, 'Categories') if not app_categories: return if not any(category in self.PLAYER_CATEGORIES for category in app_categories.split(';')): return # Find out if we need it by comparing mime types app_mime = parser.get(sect, 'MimeType') for needed_type in self.mimetypes: if app_mime.find(needed_type + '/') != -1: app_name = parser.get(sect, 'Name') app_cmd = parser.get(sect, 'Exec') app_icon = parser.get(sect, 'Icon') if not self.__has_sep: self.add_separator() self.apps.append(UserApplication(app_name, app_cmd, app_mime, app_icon)) return except: return
def carregar_propriedades(file_path): from os import path global url_base, prefix_file_name, diretorio_saida, url_email_smtp_server, port_email, login_email,\ senha_email, destinatarios_email, titulo_email, prefixo_email, prefixo_sucesso_email, sufixo_email, assinatura_email config = RawConfigParser() config.read(file_path) #geral url_base = config.get("invocador", "url_base") prefix_file_name = config.get("invocador", "prefix_file_name") diretorio_saida = config.get("invocador", "diretorio_saida") if not (path.exists(diretorio_saida) and path.isdir(diretorio_saida)): raise ValueError(u"O caminho %s é inesistente, sem permissão ou não é um diretório" % diretorio_saida) #email url_email_smtp_server = str(config.get("email", "url_smtp_server")) port_email = int(config.get("email", "port")) login_email = config.get("email", "login") senha_email = config.get("email", "senha") destinatarios_email = eval(config.get("email", "destinatarios")) titulo_email = config.get("email", "titulo") prefixo_email = config.get("email", "prefixo") prefixo_sucesso_email = config.get("email", "prefixo_sucesso") sufixo_email = config.get("email", "sufixo") assinatura_email = config.get("email", "assinatura")
class BCMConfig(): """ Reads configuration information from bcm.ini. Args: username (str): bricklink username password (str): bricklink password wantedfilename (str): path of the wanted list pricefilename (str): path of the previously scrubbed price list reloadpricesfromweb (bool): if true, download and parse all of the price data again and save it to pricefilename. _parser (SafeConfigParser): parser that reads the config file _configfile (str): relative path of the config file """ def __init__(self): # _parser = SafeConfigParser() self._configfile = '../bcm.ini' self._parser = RawConfigParser() self._parser.read(self._configfile) self.username = self._parser.get('bricklink', 'username') self.password = self._parser.get('bricklink', 'password') self.wantedfilename = self._parser.get('filenames', 'wanted') self.pricefilename = self._parser.get('filenames', 'prices') self.reloadpricesfromweb = self._parser.getboolean('options', 'reloadpricesfromweb')
def load(self, statedir: str = None) -> None: if statedir is None: statedir = self.get_state_dir() statefile = os.path.join(statedir, "state.json") if os.path.exists(statefile): # Load state from JSON file with open(statefile, "rt") as fd: state = json.load(fd) self.projects = state["projects"] return # TODO: remove support for legacy format statefile = os.path.join(statedir, "state") if os.path.exists(statefile): # Load state from legacy .ini file from configparser import RawConfigParser cp = RawConfigParser() cp.read([statefile]) for secname in cp.sections(): if secname.startswith("proj "): name = secname.split(None, 1)[1] fname = cp.get(secname, "fname") self.projects[name] = {"fname": fname} return
def run(): # load the config file global master_config master_config = RawConfigParser() master_config.read(config_directory + MASTER_CONFIG_FILENAME) # set the get-iplayer path global get_iplayer_path if master_config.has_option("General", "get-iplayer_path"): get_iplayer_path = master_config.get("General", "get-iplayer_path") # refresh the get-iplayer cache # print("Refreshing get-iplayer... (this may take some time)") # subprocess.check_output([get_iplayer_path, "--type=all", "--quiet"]) # new BBC rules :-( now we get the programm info externally using a different script # and read that scripts output directly into a hash # global bbc_programmes # bbc_programmes = load_bbc_programmes() # scan for feed config files and process each for root, directories, files in os.walk(config_directory + FEED_CONFIG_DIRECTORY): for filename in files: if filename == ".DS_Store": continue print("about to read config " + filename ) load_feed(filename) print("Finished.") return # stop here, we have processed the feeds # if we have not returned at this point, then no config directory was found, this is a problem print("No config directory found")
def load_rcfile(self): self.process_args() if self.RCFILE == parser.get_default('RCFILE') and \ not os.path.exists(self.RCFILE) and os.path.exists(OLD_RCFILE): logger.warning("Using old rcfile found at %s, " "please rename to %s.", OLD_RCFILE, self.RCFILE) self.RCFILE = OLD_RCFILE if os.path.exists(self.RCFILE): config = RawConfigParser() config.optionxform = lambda x: x.upper() config.read(self.RCFILE) items = [] if config.has_section('global'): items.extend(config.items('global')) if self.NAME is not None and config.has_section(self.NAME): items.extend(config.items(self.NAME)) try: return self.parse_rcvalues(items) except (ValueError, argparse.ArgumentTypeError) as e: raise RuntimeError("Unable to parse RC values: %s" % e) return {}
def main(): logging_config = dict(level=INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') if PY2: logging_config['disable_existing_loggers'] = True basicConfig(**logging_config) args = get_args() if args.verbose: getLogger('').setLevel(DEBUG) config = RawConfigParser() if args.generate: with open(args.configuration, "w") as f: _generate_sample_configuration().write(f) return config.read(args.configuration) chatbot = import_string(config.get("BOT", "BOT")) sleep_timeout = config.getint("BOT", "SLEEP") receiver = IMAPReceiver(config.get("EMAIL", "USERNAME"), config.get("EMAIL", "PASSWORD"), config.get("EMAIL", "IMAP_SERVER")) sender = SMTPSender(config.get("EMAIL", "USERNAME"), config.get("EMAIL", "PASSWORD"), config.get("EMAIL", "SMTP_SERVER"), config.get("EMAIL", "SMTP_PORT")) try: while True: logger.info("Retrieving new messages") email_messages = receiver.get_new_emails() logger.info("Retrieved %d new messages", len(email_messages)) for email_message in email_messages: email_from, email_subject, email_body = get_email_content(email_message) chatbot_response = chatbot.respond(email_body) logger.info("Chatting with %s: in: %s out: %s", email_from, email_body, chatbot_response) message = make_simple_text_message(from_address=config.get("EMAIL", "USERNAME"), to_address=email_from, subject=email_subject, text=chatbot_response) sender.send_email(message) logger.info("Response complete") sleep(sleep_timeout) except KeyboardInterrupt: exit()
def setUp(self): config = RawConfigParser() config.read([CONFDIR + '/fuglu.conf.dist']) config.set('main', 'disablebounces', '1') guess_clamav_socket(config) self.mc = MainController(config) self.tempfiles = []
def read_file(self): RawConfigParser.read(self, str(self.file_name), encoding='utf-8') # Check config integrity and fix it if it’s wrong # only when the object is the main config if self.__class__ is Config: for section in ('bindings', 'var'): if not self.has_section(section): self.add_section(section)
def __init__(self, file_name): self.file_name = file_name RawConfigParser.__init__(self, None) RawConfigParser.read(self, file_name, encoding='utf-8') # Check config integrity and fix it if it’s wrong for section in ('bindings', 'var'): if not self.has_section(section): self.add_section(section)
For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os from configparser import RawConfigParser import django.conf.global_settings as DEFAULT_SETTINGS config = RawConfigParser() current_dir = os.path.dirname(__file__) config.read('%s/settings.ini' % current_dir) BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = config.get('secrets', 'SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['blog.juanwolf.fr', 'localhost', '127.0.0.1', 'blog.zell'] INTERNAL_IPS = ['127.0.0.1', 'blog.zell']
aio: Union[bool, str] = "" backup: Union[bool, str] = "" date_reset: str = "" per_page: int = 0 project_link: str = "" project_name: str = "" query: str = "" zh_cn: Union[bool, str] = "" # [encrypt] key: Union[bytes, str] = "" password: str = "" try: config = RawConfigParser() config.read("config.ini") # [basic] bot_token = config["basic"].get("bot_token", bot_token) prefix = list(config["basic"].get("prefix", prefix_str)) # [bots] ticket_id = int(config["bots"].get("ticket_id", str(ticket_id))) # [channels] critical_channel_id = int(config["channels"].get("critical_channel_id", str(critical_channel_id))) debug_channel_id = int(config["channels"].get("debug_channel_id", str(debug_channel_id))) error_channel_id = int(config["channels"].get("error_channel_id", str(error_channel_id)))
OkLogger, PsqlAbstract, PsqlQuery, QBag, Query, to_halfwidth, to_lower, ) from core.chat import RetrievalEvaluate from core.pipelines import OkPipeline from core.tokenizer import (SplitTokenizer, JiebaPosWeight) from configparser import RawConfigParser from core.metrics import MetricApiWrapper config_parser = RawConfigParser() config_parser.read('../config.ini') PsqlAbstract.set_database_info(config_parser.get('global', 'dbuser'), config_parser.get('global', 'dbname'), config_parser.get('global', 'dbpassword')) oklogger = OkLogger('evaluate', level=logging.INFO) oklogger2 = OkLogger('retrieve', level=logging.WARNING) query_post_sql = ''' SELECT * FROM pttcorpus_post; ''' query_title_sql = ''' SELECT * FROM pttcorpus_title WHERE post_id=%(pid)s AND tokenizer=%(tok)s; '''
from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC import time, winsound from configparser import RawConfigParser from colorama import Fore, init, deinit opts = Options() opts.add_argument("user-agent=Mozilla/5.0 (Linux; Android 7.0; SM-G930VC Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/58.0.3029.83 Mobile Safari/537.36") init() CONFIG = RawConfigParser() CONFIG.read('config.ini') driver_path = CONFIG.get('MAIN', 'DRIVER_LOCATION') email_inp = CONFIG.get('CREDENTIALS', 'USERNAME') pass_inp = CONFIG.get('CREDENTIALS', 'PASSWORD') order_link = CONFIG.get('ORDER', 'LINK') cvv_inp = CONFIG.get('ORDER', 'CVV') addr_input = CONFIG.get('ORDER', 'ADDRESS') pay_opt_input = CONFIG.get('ORDER', 'PAYMENT') bankname_input = CONFIG.get('EMIOPTIONS', 'BANK') tenure_input = CONFIG.get('EMIOPTIONS', 'TENURE') frequency = 2500 duration = 2000 def prCyan(skk): print(Fore.CYAN + skk)
def load_only_config_dict( config_file, gcloud_config_file="~/.config/gcloud/configurations/config_default", verbose=False, ): # first load defaults from gcloud config gcloud_config_file = os.path.expanduser(gcloud_config_file) defaults = {} if os.path.exists(gcloud_config_file): gcloud_config = RawConfigParser() gcloud_config.read(gcloud_config_file) zone = _safe_get(gcloud_config, "compute", "zone") zones = [] if zone: zones.append(zone) defaults = dict( account=_safe_get(gcloud_config, "core", "account"), project=_safe_get(gcloud_config, "core", "project"), zones=zone, region=_safe_get(gcloud_config, "compute", "region"), ) if verbose: print("Using defaults from {}: {}".format(gcloud_config_file, defaults)) config_file = get_config_path(config_file) config_file = os.path.expanduser(config_file) log.info("Using config: %s", config_file) if verbose: print("Using config: {}".format(config_file)) config = RawConfigParser() config.read(config_file) config_from_file = dict(config.items("config")) if "zones" in config_from_file: config_from_file["zones"] = [ x.strip() for x in config_from_file["zones"].split(",") ] merged_config = dict(defaults) merged_config.update(config_from_file) merged_config["sparkles_config_path"] = config_file for unused_property in ["default_resource_cpu", "default_resource_memory"]: if unused_property in merged_config: log.warning( "'%s' in config file but no longer used. Use 'machine_type' instead", unused_property, ) missing_values = [] required_properties = [ "default_url_prefix", "project", "default_image", "machine_type", "zones", "region", "account", ] for property in required_properties: if (property not in merged_config or merged_config[property] == "" or merged_config[property] is None): missing_values.append(property) if len(missing_values) > 0: print("Missing the following parameters in {}: {}".format( config_file, ", ".join(missing_values))) sys.exit(1) if "kubequeconsume_exe_path" not in merged_config: merged_config["kubequeconsume_exe_path"] = os.path.join( os.path.dirname(__file__), "bin/kubequeconsume") assert os.path.exists(merged_config["kubequeconsume_exe_path"]) if "cas_url_prefix" not in merged_config: merged_config[ "cas_url_prefix"] = merged_config["default_url_prefix"] + "/CAS/" assert isinstance(merged_config["zones"], list) project_id = merged_config["project"] service_account_key = os.path.expanduser( merged_config.get("service_account_key", f"~/.sparkles-cache/service-keys/{project_id}.json")) merged_config["service_account_key"] = service_account_key return merged_config
#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst import sys # import ah_bootstrap from setuptools import setup, find_packages, Extension # Get some values from the setup.cfg from configparser import RawConfigParser conf = RawConfigParser() conf.read(['setup.cfg']) metadata = dict(conf.items('metadata')) PACKAGENAME = metadata['package_name'] DESCRIPTION = metadata['description'] AUTHOR = metadata['author'] AUTHOR_EMAIL = metadata['author_email'] LICENSE = metadata['license'] URL = metadata['url'] # Get the long description from the package's docstring __import__(PACKAGENAME) package = sys.modules[PACKAGENAME] LONG_DESCRIPTION = package.__doc__ # Define entry points for command-line scripts # TODO: this shuold be automated (e.g. look for main functions and # rename _ to -, and prepend 'ctapipe' entry_points = {} entry_points['console_scripts'] = [ 'ctapipe-info = ctapipe.tools.info:main',
class Manager(metaclass=ManagerMetaClass): def __init__(self): pass def get_ini(self, file): self.__ini__ = RawConfigParser() self.__ini__.read(file, encoding='utf-8') return self.__ini__ def callback_func(self, blocknum, blocksize, totalsize): percent = 100.0 * blocknum * blocksize / totalsize if percent > 100: percent = 100 downsize = blocknum * blocksize if downsize >= totalsize: downsize = totalsize s = "%.2f%%" % (percent) + "====>" + "%.2f" % (downsize / 1024 / 1024) + "M/" + "%.2f" % ( totalsize / 1024 / 1024) + "M \r" sys.stdout.write(s) sys.stdout.flush() if percent == 100: print('') def shell(self, cmd): output, errors = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() self.o = output if output else errors return self.o.decode('utf-8') def browser_version(self, browser_type): cmd_mapping = { Browser_type.GOOGLE: { OS_type.LINUX: 'google-chrome --version || google-chrome-stable --version', OS_type.MAC: r'/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --version', OS_type.WIN: r'reg query "HKEY_CURRENT_USER\Software\Google\Chrome\BLBeacon" /v version' }, Browser_type.CHROMIUM: { OS_type.LINUX: 'chromium --version || chromium-browser --version', OS_type.MAC: r'/Applications/Chromium.app/Contents/MacOS/Chromium --version', OS_type.WIN: r'reg query "HKLM\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall\Google Chrome" /v version' }, Browser_type.MSEDGE: { OS_type.LINUX: 'microsoft-edge --version', OS_type.MAC: r'/Applications/Microsoft\ Edge.app/Contents/MacOS/Microsoft\ Edge --version', OS_type.WIN: r'reg query "HKEY_CURRENT_USER\SOFTWARE\Microsoft\Edge\BLBeacon" /v version', } } cmd = cmd_mapping[browser_type][self.os_name] info = self.shell(cmd) try: return VERSION_RE.findall(info)[0] except IndexError: print( f"Couldn't get version for \"{browser_type.name}\" cause \033[0;31;40m{info}\033[0m") sys.exit(-1) def download_file(self, url, save_path): print(f'will download from \033[0;36;40m{url}\033[0m') file = os.path.join(save_path, os.path.basename(url)) print(f'will saved in \033[0;36;40m{file}\033[0m') try: urllib.request.urlretrieve(url, file, self.callback_func) except ContentTooShortError: print( f'\033[0;31;40mtimeout!!\033[0m please try again or visit {url}') os.remove(file) sys.exit(-1) print( f'Complete!!\r\n\tunpack file list: \033[0;32;40m{self.unpack(file)}\033[0m') def unpack(self, file): print(f'unpacking {file}') if file.endswith('.zip'): with zipfile.ZipFile(file, 'r') as zFile: try: zFile.extractall(os.path.dirname(file)) namelist = zFile.namelist() except Exception as e: if e.args[0] not in [26, 13] and e.args[1] not in ['Text file busy', 'Permission denied']: raise e return namelist elif file.endswith('.tar.gz') or file.endswith('.tgz'): try: tar = tarfile.open(file, mode='r:gz') except tarfile.ReadError: tar = tarfile.open(file, mode='r:bz2') members = tar.getmembers() tar.extractall(os.path.dirname(file)) tar.close() return [x.name for x in members]
import os from configparser import RawConfigParser BASE_DIR = os.path.dirname(os.path.realpath(__file__)) env = RawConfigParser() env.read(BASE_DIR + '/env.ini') TELEGRAM_TOKEN = env['telegram']['token'] API_HOST = env['api']['host'] API_TOKEN = env['api']['token'] REGISTRATION_CODE = env['telegram']['code'] LANG = env['settings']['language'] LOGGING_LEVEL = env['settings']['logging_level'] SITE = env['settings']['site']
import json import MySQLdb as my from collections import OrderedDict from scipy.integrate import simps from astropy.table import Table import pdb import re __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) configFile = os.path.join(__location__, 'Settings.ini') db_config = RawConfigParser() db_config.read(configFile) db_name = db_config.get('database', 'DATABASE_NAME') db_user = db_config.get('database', 'DATABASE_USER') db_pwd = db_config.get('database', 'DATABASE_PASSWORD') db_host = db_config.get('database', 'DATABASE_HOST') db_port = int(db_config.get('database', 'DATABASE_PORT')) isDEBUG = False # Database SELECT # For every sub-query, the iterable result is appended to a master list of results def bulk_upload(query): success = False try:
def read_key_from_config(): config = RawConfigParser() config.read(os.path.join(os.path.dirname(sys.argv[0]), 'domLink.cfg')) return config.get('API_KEYS', 'whoxy')
For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os import ast from configparser import RawConfigParser # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) config = RawConfigParser() config.read(os.path.join(BASE_DIR, 'config.ini')) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = config.get('secrets', 'SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = config.getboolean("global", "DEBUG") ALLOWED_HOSTS = ast.literal_eval(config.get('hosts', 'allowed')) # Application definition INSTALLED_APPS = [
}, "storage": { "type": "filesystem", "custom_handler": "", "filesystem_folder": os.path.expanduser("~/.config/radicale/collections"), "database_url": "" }, "logging": { "config": "/etc/radicale/logging", "debug": "False", "full_environment": "False" } } # Create a ConfigParser and configure it _CONFIG_PARSER = ConfigParser() for section, values in INITIAL_CONFIG.items(): _CONFIG_PARSER.add_section(section) for key, value in values.items(): _CONFIG_PARSER.set(section, key, value) _CONFIG_PARSER.read("/etc/radicale/config") _CONFIG_PARSER.read(os.path.expanduser("~/.config/radicale/config")) if "RADICALE_CONFIG" in os.environ: _CONFIG_PARSER.read(os.environ["RADICALE_CONFIG"]) # Wrap config module into ConfigParser instance sys.modules[__name__] = _CONFIG_PARSER
class Utilize(object): """ Utilize class for common resources """ __slots__ = ( '_config', '_basecon', '_seco', '_kvs', '_decibel', '_redis', '_memcached', '_message_broker' ) def __init__(self, config = './config.ini'): """ Resource class constructor :param config: mixed, the config resource """ # import ini file config parser from configparser import RawConfigParser # read config from config resource self._config = RawConfigParser() self.update(config) # initialize attributes self._basecon = None self._seco = None self._kvs = None self._decibel = None self._redis = None self._memcached = None self._message_broker = None def update(self, config, string = False): """ Update config with file, string or dict :param config: mixed, config resource :param string: bool, whether read string :return: None """ # type check and decide how to update if isinstance(config, (str, bytes, bytearray)): config = config if isinstance(config, str) \ else config.decode(encoding = 'UTF8') self._config.read(config) if not string \ else self._config.read_string(config) # update content from a dict elif isinstance(config, dict): self._config.read_dict(config) @staticmethod def _boolean(keyword): """ Boolean-ize a string :param keyword: str|bytes|bytearray, keyword :return: bool """ # convert keyword into string keyword = keyword.decode(encoding = 'UTF8') \ if isinstance(keyword, (bytes, bytearray)) \ else keyword return keyword.lower() in ( '1', 't', 'y', 'true', 'yes', 'on', 'ok', 'okay', 'confirm' ) @property def config(self): """ Acquire the ConfigParse instance :return: configparse, the ConfigParse instance """ return self._config @property def basecon(self): """ Acquire a singleton base_convert instance :return: base_convert, a base_convert instance """ # return if exists if self._basecon is not None: return self._basecon # else instantiate and return from basecon import BaseCon base = int(self._config['basecon']['base']) \ if 'basecon' in self._config else 62 self._basecon = BaseCon(base = base) return self._basecon @property def seco(self): """ Acquire a singleton SeCo instance :return: seco, a SeCo instance """ # return if exists if self._seco is not None: return self._seco # else instantiate and return from seco import SeCo if 'seco' in self._config: self._seco = SeCo(**self._config['seco']) else: self._seco = SeCo() return self._seco @property def kvs(self): """ Acquire a singleton k-v store instance DO NOT INVOKE BEFORE MULTI-PROC FORKING :return: kvs, a KVS instance """ # return only one instance of database instance if self._kvs is not None: return self._kvs # else instantiate and return from kvs import KVS # acquire config and serialize instance config = self._config seco = self.seco # attempts to get the kvs configs kvs_config = config['kvs'] \ if 'kvs' in config else {} kvs_init = 'initialize' in kvs_config \ and self._boolean(kvs_config['initialize']) kvs_engine = kvs_config['engine'].lower() \ if 'engine' in kvs_config else ':memory:' kvs_path = kvs_config['path'] \ if 'path' in kvs_config else './database.kvs' # instantiate kvs according to configs if kvs_engine == ':memory:': engine = KVS(serialize = seco) elif kvs_engine in ('dbm', 'gdbm', 'ndbm'): engine = KVS(kvs_path, seco) elif kvs_engine == 'redis': engine = KVS(self.redis, seco) elif kvs_engine == 'memcached': engine = KVS(self.memcached, seco) else: raise NotImplementedError( 'Other databases not supported yet.' ) # initialize kv-store if kvs_init and 'kvs:init' in config: # import json for decoding import json for key, value in config['kvs:init'].items(): # try to decode value as json try: engine.set(key, json.loads(value)) except (json.JSONDecodeError, ValueError): engine.set(key, value) # preserve and return kv-store instance self._kvs = engine return self._kvs @property def decibel(self): """ Acquire a singleton decibel instance DO NOT INVOKE BEFORE MULTI-PROC FORKING :return: decibel, a Decibel instance """ # return only one instance of database instance if self._decibel is not None: return self._decibel from decibel import Decibel # set config shorthand config = self._config # attempts to acquire decibel configs db_config = config['decibel'] \ if 'decibel' in config else {} db_init = 'initialize' in db_config \ and self._boolean(db_config['initialize']) db_engine = db_config['engine'].lower() \ if 'engine' in db_config else 'sqlite' db_path = db_config['path'] \ if 'path' in db_config else './database.sqlite' # initialize database instance, acquire statements if db_engine in ('sqlite', 'sqlite3'): import sqlite3 engine = sqlite3.connect(db_path) init = tuple(config['sqlite:init'].values()) \ if db_init and 'sqlite:init' in config else () stmt = dict(config['sqlite:stmt']) \ if 'sqlite:stmt' in config else {} elif db_engine == 'mysql': from mysql.connector import connect engine = connect(**( config['mysql'] if 'mysql' in config else {} )) init = tuple(config['mysql:init'].values()) \ if db_init and 'mysql:init' in config else () stmt = dict(config['mysql:stmt']) \ if 'mysql:stmt' in config else {} else: raise NotImplementedError( 'Other databases not supported yet.' ) # initialize sql database if db_init and init: cursor = engine.cursor() for init_stmt in init: cursor.execute(init_stmt) else: cursor.close() engine.commit() # preserve and return decibel instance self._decibel = Decibel(engine, stmt) return self._decibel @property def redis(self): """ Acquire a singleton redis instance DO NOT INVOKE BEFORE MULTI-PROC FORKING :return: redis, a Redis instance """ # return if exists if self._redis is not None: return self._redis # else instantiate and return from redis import Redis # acquire config config = self._config['redis'] \ if 'redis' in self._config else {} # preserve and return self._redis = Redis(**config) return self._redis @property def memcached(self): """ Acquire a singleton memcached instance DO NOT INVOKE BEFORE MULTI-PROC FORKING :return: memcached, the instance """ # return if exists if self._memcached is not None: return self._memcached # else instantiate and return from pymemcache.client.base \ import Client as Memcached # acquire config host, port = 'localhost', 11211 if 'memcached' in self._config: config = self._config['memcached'] host = config['path'] \ if 'path' in config else 'localhost' port = int(config['port']) \ if 'port' in config else 11211 # preserve and return self._memcached = Memcached((host, port)) return self._memcached @property def message_broker(self): """ Acquire a singleton message broker instance :return: message_broker, a message broker """ # return only one instance of broker if self._message_broker is not None: return self._message_broker # else instantiate and return from msgr import MessageQueue, MessageBroker # initialize and return message broker instance self._message_broker = \ MessageBroker( job = MessageQueue(), res = MessageQueue(), rej = MessageQueue(), ser = MessageQueue() ) return self._message_broker
import os from configparser import RawConfigParser BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), '..')) TEMPLATES[0]['DIRS'] = [os.path.join(BASE_DIR, 'waldur_core', 'templates')] LOCALE_PATHS = ( os.path.join(BASE_DIR, 'waldur_core', 'locale'), ) conf_dir = os.environ.get('WALDUR_BASE_CONFIG_DIR', '/etc/waldur') data_dir = '/usr/share/waldur' work_dir = '/var/lib/waldur' templates_dir = os.path.join(conf_dir, 'templates') config = RawConfigParser() config.read(os.path.join(conf_dir, 'core.ini')) # If these sections and/or options are not set, these values are used as defaults config_defaults = { 'global': { 'debug': 'false', 'default_from_email': '', 'media_root': os.path.join(work_dir, 'media'), 'owner_can_manage_customer': 'false', 'secret_key': '', 'show_all_users': 'false', 'static_root': os.path.join(data_dir, 'static'), 'template_debug': 'false', }, 'auth': { 'token_lifetime': 3600,
from scipy.stats import shapiro, normaltest, f_oneway, ttest_ind, kruskal, mannwhitneyu from matplotlib import pyplot as plt from configparser import RawConfigParser parser = argparse.ArgumentParser(description='Statistical analyis') parser.add_argument('input_file_name', help = 'the input file containing all individual runs') parser.add_argument('config_file_name', help = 'the config file specifying the hyperparameters and metrics') parser.add_argument('data_set', help = 'the data set to analyze') parser.add_argument('-t', '--threshold', type = int, help = 'significance threshold', default = 0.01) parser.add_argument('-o', '--output_folder', help = 'folder for storing the output images', default = '.') parser.add_argument('-p', '--parameters', help = 'the configuration of hyperparameters to investigate', default = 'all_hyperparams') parser.add_argument('-m', '--metrics', help = 'the configuration of metrics to investigate', default = 'all_metrics') args = parser.parse_args() config = RawConfigParser() config.read(args.config_file_name) hyperparams_mapping = config[args.parameters] metric_mapping = config[args.metrics] hyperparams = sorted(list(hyperparams_mapping.keys())) bins = {} for metric in metric_mapping.keys(): bins[metric] = {} for hyperparam in hyperparams: bins[metric][hyperparam] = {} with open(args.input_file_name, 'r') as in_file: reader = csv.DictReader(in_file, delimiter=',')
class OktaAuthConfig(): """ Config helper class """ def __init__(self, logger): self.logger = logger self.config_path = os.path.expanduser('~') + '/.okta-aws' self._value = RawConfigParser() self._value.read(self.config_path) @staticmethod def configure(logger): value = RawConfigParser() config_path = os.path.expanduser('~') + '/.okta-aws' append = False if os.path.exists(config_path): value.read(config_path) print(f"You have preconfigured Okta profiles: {value.sections()}") print(f"This command will append new profile to the existing {config_path} config file") append = True else: print(f"This command will create a new {config_path} config file") confirm = input('Would you like to proceed? [y/n]: ') if confirm == 'y': logger.info(f"Creating new {config_path} file") okta_profile = input('Enter Okta profile name: ') if not okta_profile: okta_profile = 'default' profile = input('Enter AWS profile name: ') base_url = input('Enter Okta base url [your main organisation Okta url]: ') username = input('Enter Okta username: '******'Enter AWS app-link [optional]: ') duration = input('Duration in seconds to request a session token for [Default=3600]: ') if not duration: duration = 3600 value.add_section(okta_profile) value.set(okta_profile, 'base-url', base_url) value.set(okta_profile, 'profile', profile) value.set(okta_profile, 'username', username) if app_link: value.set(okta_profile, 'app-link', app_link) value.set(okta_profile, 'duration', duration) if append: with open(config_path, 'a') as configfile: value.write(configfile) else: with open(config_path, 'w+') as configfile: value.write(configfile) print(f"File {config_path} successfully created. Now you can authenticate to Okta") print(f"Execute 'okta-awscli -o {okta_profile} -p {profile} sts get-caller-identity' to authenticate and retrieve credentials") sys.exit(0) else: sys.exit(0) def base_url_for(self, okta_profile): """ Gets base URL from config """ if self._value.has_option(okta_profile, 'base-url'): base_url = self._value.get(okta_profile, 'base-url') self.logger.info("Authenticating to: %s" % base_url) elif self._value.has_option('default', 'base-url'): base_url = self._value.get('default', 'base-url') self.logger.info( "Using base-url from default profile %s" % base_url ) else: self.logger.error( "No profile found. Please define a default profile, or specify a named profile using `--okta-profile`" ) sys.exit(1) return base_url def app_link_for(self, okta_profile): """ Gets app_link from config """ app_link = None if self._value.has_option(okta_profile, 'app-link'): app_link = self._value.get(okta_profile, 'app-link') elif self._value.has_option('default', 'app-link'): app_link = self._value.get('default', 'app-link') if app_link: try: if not validators.url(app_link): self.logger.error("The app-link provided: %s is an invalid url" % app_link) sys.exit(-1) except TypeError as ex: self.logger.error("Malformed string in app link URL. Ensure there are no invalid characters.") self.logger.info("App Link set as: %s" % app_link) return app_link else: self.logger.error("The app-link is missing. Will try to retrieve it from Okta") return None def username_for(self, okta_profile): """ Gets username from config """ if self._value.has_option(okta_profile, 'username'): username = self._value.get(okta_profile, 'username') self.logger.info("Authenticating as: %s" % username) else: username = input('Enter username: '******'password'): password = self._value.get(okta_profile, 'password') else: password = getpass('Enter password: '******'factor'): factor = self._value.get(okta_profile, 'factor') self.logger.debug("Setting MFA factor to %s" % factor) return factor return None def duration_for(self, okta_profile): """ Gets requested duration from config, ignore it on failure """ if self._value.has_option(okta_profile, 'duration'): duration = self._value.get(okta_profile, 'duration') self.logger.debug( "Requesting a duration of %s seconds" % duration ) try: return int(duration) except ValueError: self.logger.warn( "Duration could not be converted to a number," " ignoring." ) return None def write_role_to_profile(self, okta_profile, role_arn): """ Saves role to profile in config """ if not self._value.has_section(okta_profile): self._value.add_section(okta_profile) base_url = self.base_url_for(okta_profile) self._value.set(okta_profile, 'base-url', base_url) self._value.set(okta_profile, 'role', role_arn) with open(self.config_path, 'w+') as configfile: self._value.write(configfile) def write_applink_to_profile(self, okta_profile, app_link): """ Saves app link to profile in config """ if not self._value.has_section(okta_profile): self._value.add_section(okta_profile) base_url = self.base_url_for(okta_profile) self._value.set(okta_profile, 'base-url', base_url) self._value.set(okta_profile, 'app-link', app_link) with open(self.config_path, 'w+') as configfile: self._value.write(configfile) @staticmethod def get_okta_profiles(): value = RawConfigParser() config_path = os.path.expanduser('~') + '/.okta-aws' value.read(config_path) return value.sections()
def _read_pypirc(self): """Reads the .pypirc file.""" rc = self._get_rc_file() if os.path.exists(rc): self.announce('Using PyPI login from %s' % rc) repository = self.repository or self.DEFAULT_REPOSITORY config = RawConfigParser() config.read(rc) sections = config.sections() if 'distutils' in sections: # let's get the list of servers index_servers = config.get('distutils', 'index-servers') _servers = [ server.strip() for server in index_servers.split('\n') if server.strip() != '' ] if _servers == []: # nothing set, let's try to get the default pypi if 'pypi' in sections: _servers = ['pypi'] else: # the file is not properly defined, returning # an empty dict return {} for server in _servers: current = {'server': server} current['username'] = config.get(server, 'username') # optional params for key, default in (('repository', self.DEFAULT_REPOSITORY), ('realm', self.DEFAULT_REALM), ('password', None)): if config.has_option(server, key): current[key] = config.get(server, key) else: current[key] = default # work around people having "repository" for the "pypi" # section of their config set to the HTTP (rather than # HTTPS) URL if (server == 'pypi' and repository in (self.DEFAULT_REPOSITORY, 'pypi')): current['repository'] = self.DEFAULT_REPOSITORY return current if (current['server'] == repository or current['repository'] == repository): return current elif 'server-login' in sections: # old format server = 'server-login' if config.has_option(server, 'repository'): repository = config.get(server, 'repository') else: repository = self.DEFAULT_REPOSITORY return { 'username': config.get(server, 'username'), 'password': config.get(server, 'password'), 'repository': repository, 'server': server, 'realm': self.DEFAULT_REALM } return {}
def get_okta_profiles(): value = RawConfigParser() config_path = os.path.expanduser('~') + '/.okta-aws' value.read(config_path) return value.sections()
def get_version(): """Return package version from setup.cfg.""" config = RawConfigParser() config.read(os.path.join('..', 'setup.cfg')) return config.get('metadata', 'version')
# [channels] test_group_id: int = 0 # [custom] manual_link: str = "https://manuals.scp-079.org/bots/id/" # [language] lang: str = "cmn-Hans" # [mode] aio: Union[bool, str] = "False" try: not exists(CONFIG_PATH) and raise_error(f"{CONFIG_PATH} does not exists") config = RawConfigParser() config.read(CONFIG_PATH) # [basic] bot_token = config.get("basic", "bot_token", fallback=bot_token) ipv6 = config.get("basic", "ipv6", fallback=ipv6) ipv6 = eval(ipv6) prefix = [ p for p in list(config.get("basic", "prefix", fallback=prefix_str)) if p ] # [channels] test_group_id = int( config.get("channels", "test_group_id", fallback=test_group_id)) # [custom]
try: from configparser import RawConfigParser except ImportError: from ConfigParser import RawConfigParser import logging import os import re import socket import threading from stomp.backward import * log = logging.getLogger('testutils.py') config = RawConfigParser() config.read(os.path.join(os.path.dirname(__file__), 'setup.ini')) header_re = re.compile(r'[^:]+:.*') def get_environ(name): try: return os.environ[name] except: return None def get_default_host(): host = config.get('default', 'host') port = config.get('default', 'port') return [(get_environ('STD_HOST')
def _read_pypirc(self): """Reads the .pypirc file.""" rc = self._get_rc_file() if os.path.exists(rc): self.announce("Using PyPI login from %s" % rc) repository = self.repository or self.DEFAULT_REPOSITORY config = RawConfigParser() config.read(rc) sections = config.sections() if "distutils" in sections: # let's get the list of servers index_servers = config.get("distutils", "index-servers") _servers = [ server.strip() for server in index_servers.split("\n") if server.strip() != "" ] if _servers == []: # nothing set, let's try to get the default pypi if "pypi" in sections: _servers = ["pypi"] else: # the file is not properly defined, returning # an empty dict return {} for server in _servers: current = {"server": server} current["username"] = config.get(server, "username") # optional params for key, default in ( ("repository", self.DEFAULT_REPOSITORY), ("realm", self.DEFAULT_REALM), ("password", None), ): if config.has_option(server, key): current[key] = config.get(server, key) else: current[key] = default # work around people having "repository" for the "pypi" # section of their config set to the HTTP (rather than # HTTPS) URL if server == "pypi" and repository in ( self.DEFAULT_REPOSITORY, "pypi", ): current["repository"] = self.DEFAULT_REPOSITORY return current if (current["server"] == repository or current["repository"] == repository): return current elif "server-login" in sections: # old format server = "server-login" if config.has_option(server, "repository"): repository = config.get(server, "repository") else: repository = self.DEFAULT_REPOSITORY return { "username": config.get(server, "username"), "password": config.get(server, "password"), "repository": repository, "server": server, "realm": self.DEFAULT_REALM, } return {}
class Cache(object): """Client Side cache """ class RepositoryReplay(Error): """Cache is newer than repository, refusing to continue""" class CacheInitAbortedError(Error): """Cache initialization aborted""" class EncryptionMethodMismatch(Error): """Repository encryption method changed since last acccess, refusing to continue """ def __init__(self, repository, key, manifest, path=None, sync=True, warn_if_unencrypted=True): self.lock = None self.timestamp = None self.txn_active = False self.repository = repository self.key = key self.manifest = manifest self.path = path or os.path.join( get_cache_dir(), hexlify(repository.id).decode('ascii')) if not os.path.exists(self.path): if warn_if_unencrypted and isinstance(key, PlaintextKey): if 'ATTIC_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK' not in os.environ: print( """Warning: Attempting to access a previously unknown unencrypted repository\n""", file=sys.stderr) answer = input('Do you want to continue? [yN] ') if not (answer and answer in 'Yy'): raise self.CacheInitAbortedError() self.create() self.open() if sync and self.manifest.id != self.manifest_id: # If repository is older than the cache something fishy is going on if self.timestamp and self.timestamp > manifest.timestamp: raise self.RepositoryReplay() # Make sure an encrypted repository has not been swapped for an unencrypted repository if self.key_type is not None and self.key_type != str(key.TYPE): raise self.EncryptionMethodMismatch() self.sync() self.commit() def __del__(self): self.close() def create(self): """Create a new empty cache at `path` """ os.makedirs(self.path) with open(os.path.join(self.path, 'README'), 'w') as fd: fd.write('This is an Attic cache') config = RawConfigParser() config.add_section('cache') config.set('cache', 'version', '1') config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii')) config.set('cache', 'manifest', '') with open(os.path.join(self.path, 'config'), 'w') as fd: config.write(fd) ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8')) with open(os.path.join(self.path, 'files'), 'w') as fd: pass # empty file def open(self): if not os.path.isdir(self.path): raise Exception('%s Does not look like an Attic cache' % self.path) self.lock = UpgradableLock(os.path.join(self.path, 'config'), exclusive=True) self.rollback() self.config = RawConfigParser() self.config.read(os.path.join(self.path, 'config')) if self.config.getint('cache', 'version') != 1: raise Exception('%s Does not look like an Attic cache') self.id = self.config.get('cache', 'repository') self.manifest_id = unhexlify(self.config.get('cache', 'manifest')) self.timestamp = self.config.get('cache', 'timestamp', fallback=None) self.key_type = self.config.get('cache', 'key_type', fallback=None) self.chunks = ChunkIndex.read( os.path.join(self.path, 'chunks').encode('utf-8')) self.files = None def close(self): if self.lock: self.lock.release() def _read_files(self): self.files = {} self._newest_mtime = 0 with open(os.path.join(self.path, 'files'), 'rb') as fd: u = msgpack.Unpacker(use_list=True) while True: data = fd.read(64 * 1024) if not data: break u.feed(data) for path_hash, item in u: item[0] += 1 self.files[path_hash] = msgpack.packb(item) def begin_txn(self): # Initialize transaction snapshot txn_dir = os.path.join(self.path, 'txn.tmp') os.mkdir(txn_dir) shutil.copy(os.path.join(self.path, 'config'), txn_dir) shutil.copy(os.path.join(self.path, 'chunks'), txn_dir) shutil.copy(os.path.join(self.path, 'files'), txn_dir) os.rename(os.path.join(self.path, 'txn.tmp'), os.path.join(self.path, 'txn.active')) self.txn_active = True def commit(self): """Commit transaction """ if not self.txn_active: return if self.files is not None: with open(os.path.join(self.path, 'files'), 'wb') as fd: for path_hash, item in self.files.items(): # Discard cached files with the newest mtime to avoid # issues with filesystem snapshots and mtime precision item = msgpack.unpackb(item) if item[0] < 10 and bigint_to_int( item[3]) < self._newest_mtime: msgpack.pack((path_hash, item), fd) self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii')) self.config.set('cache', 'timestamp', self.manifest.timestamp) self.config.set('cache', 'key_type', str(self.key.TYPE)) with open(os.path.join(self.path, 'config'), 'w') as fd: self.config.write(fd) self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8')) os.rename(os.path.join(self.path, 'txn.active'), os.path.join(self.path, 'txn.tmp')) shutil.rmtree(os.path.join(self.path, 'txn.tmp')) self.txn_active = False def rollback(self): """Roll back partial and aborted transactions """ # Remove partial transaction if os.path.exists(os.path.join(self.path, 'txn.tmp')): shutil.rmtree(os.path.join(self.path, 'txn.tmp')) # Roll back active transaction txn_dir = os.path.join(self.path, 'txn.active') if os.path.exists(txn_dir): shutil.copy(os.path.join(txn_dir, 'config'), self.path) shutil.copy(os.path.join(txn_dir, 'chunks'), self.path) shutil.copy(os.path.join(txn_dir, 'files'), self.path) os.rename(txn_dir, os.path.join(self.path, 'txn.tmp')) if os.path.exists(os.path.join(self.path, 'txn.tmp')): shutil.rmtree(os.path.join(self.path, 'txn.tmp')) self.txn_active = False def sync(self): """Initializes cache by fetching and reading all archive indicies """ def add(id, size, csize): try: count, size, csize = self.chunks[id] self.chunks[id] = count + 1, size, csize except KeyError: self.chunks[id] = 1, size, csize self.begin_txn() print('Initializing cache...') self.chunks.clear() unpacker = msgpack.Unpacker() repository = cache_if_remote(self.repository) for name, info in self.manifest.archives.items(): archive_id = info[b'id'] cdata = repository.get(archive_id) data = self.key.decrypt(archive_id, cdata) add(archive_id, len(data), len(cdata)) archive = msgpack.unpackb(data) if archive[b'version'] != 1: raise Exception('Unknown archive metadata version') decode_dict(archive, (b'name', )) print('Analyzing archive:', archive[b'name']) for key, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])): data = self.key.decrypt(key, chunk) add(key, len(data), len(chunk)) unpacker.feed(data) for item in unpacker: if b'chunks' in item: for chunk_id, size, csize in item[b'chunks']: add(chunk_id, size, csize) def add_chunk(self, id, data, stats): if not self.txn_active: self.begin_txn() if self.seen_chunk(id): return self.chunk_incref(id, stats) size = len(data) data = self.key.encrypt(data) csize = len(data) self.repository.put(id, data, wait=False) self.chunks[id] = (1, size, csize) stats.update(size, csize, True) return id, size, csize def seen_chunk(self, id): return self.chunks.get(id, (0, 0, 0))[0] def chunk_incref(self, id, stats): if not self.txn_active: self.begin_txn() count, size, csize = self.chunks[id] self.chunks[id] = (count + 1, size, csize) stats.update(size, csize, False) return id, size, csize def chunk_decref(self, id, stats): if not self.txn_active: self.begin_txn() count, size, csize = self.chunks[id] if count == 1: del self.chunks[id] self.repository.delete(id, wait=False) stats.update(-size, -csize, True) else: self.chunks[id] = (count - 1, size, csize) stats.update(-size, -csize, False) def file_known_and_unchanged(self, path_hash, st): if self.files is None: self._read_files() entry = self.files.get(path_hash) if not entry: return None entry = msgpack.unpackb(entry) if entry[2] == st.st_size and bigint_to_int( entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino: # reset entry age entry[0] = 0 self.files[path_hash] = msgpack.packb(entry) return entry[4] else: return None def memorize_file(self, path_hash, st, ids): # Entry: Age, inode, size, mtime, chunk ids mtime_ns = st_mtime_ns(st) self.files[path_hash] = msgpack.packb( (0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids)) self._newest_mtime = max(self._newest_mtime, mtime_ns)
url = "http://xlr:5516/api/v1/config/byTypeAndTitle?configurationType=%s&title=%s" % (server_type, url_encoded_title) request = urllib.request.Request(url) request.add_header("Authorization", "Basic %s" % base64string) result = urllib.request.urlopen(request) return json.loads(result.read())[0] def save_configuration_object(config_object): headers = {'Content-Type': 'application/json'} request = urllib.request.Request("http://xlr:5516/api/v1/config/%s" % (config_object["id"]), json.dumps(config_object).encode("utf-8"), headers) request.add_header("Authorization", "Basic %s" % base64string) request.get_method = lambda: 'PUT' result2 = urllib.request.urlopen(request) def update_ci(server_title, server_type, username, properties): print("Processing credential [%s] for server type [%s] with title [%s]" % (username, server_type, server_title)) config_object = get_configuration_object(section, server_type) for item in properties: config_object[item[0]] = item[1] save_configuration_object(config_object) cp = RawConfigParser() #To avoid parser to convert all keys to lowercase by default cp.optionxform = str cp.read(sys.argv[1]) for section in cp.sections(): update_ci(section, cp.get(section, "type"), cp.get(section, "username"), cp.items(section)) print("Updated credentials")
Django settings for web project. Generated by 'django-admin startproject' using Django 1.10.5. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os from configparser import RawConfigParser config_parser = RawConfigParser() configfile = config_parser.read('sparktw.config.ini') if bool(configfile): secret_key = config_parser.get('global', 'secret_key') allow_hosts = config_parser.get('global', 'allow_hosts').split(' ') debug_mode = not config_parser.get('global', 'scenario').startswith('deploy') redis_hosts = config_parser.get('global', 'redis_hosts').split() else: secret_key = 'my-secert-key' allow_hosts = ['localhost', '127.0.0.1'] redis_hosts = ['redis://localhost:6379'] debug_mode = True # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class MainView(QMainWindow, SubjectInfo, TaskManager, SequenceManager, ExpProtocol, EventNumber, FilePathManager,\ ChannelScaleManager, ChannelSelector, ChannelFilter, BadEpochMonitor, MRCPExtractor, MainSwitch,\ ScopeSwitch, RecordSwitch, TaskSwitch, EventPlot, SSVEPExpProtocol, EyeTracker, RunTimer, GUITimer): """ MainView class controls the GUI frontend interaction """ def __init__(self, amp_name, amp_serial, state=mp.Value('i', 1), queue=None): """ Initialize experimenter window GUI and subject view window GUI :amp_name: amplifier name passed from LSL :amp_serial: amplifier serial passed from LSL """ super(MainView, self).__init__() self.router = Router() self.ui = main_layout.Ui_MainWindow() self.ui.setupUi(self) self.window = QMainWindow() self.SV_window = subject_layout.Ui_SV() self.SV_window.setupUi(self.window) self.eye_tracker_dialog = QDialog() self.eye_tracker_window = eye_tracker_layout.Ui_Dialog() self.eye_tracker_window.setupUi(self.eye_tracker_dialog) # redirect_stdout_to_queue(logger, queue, 'INFO') logger.info('Viewer launched') self.amp_name = amp_name self.amp_serial = amp_serial self.state = state self.init_all() def init_all(self): """ Initialize specialized functions inside GUI """ self.init_config_file() self.init_loop() self.init_panel_GUI() self.init_event_functions() self.init_SV_GUI() self.init_scope_GUI() self.init_timer() # timer for scope refreshing self.init_Runtimer() # timer for record, train and test self.init_eye_tracker() def init_config_file(self): """ Initialize config file """ self.scope_settings = RawConfigParser(allow_no_value=True, inline_comment_prefixes=('#', ';')) if (len(sys.argv) == 1): self.show_channel_names = 0 self.device_name = "" else: if (sys.argv[1].find("gtec") > -1): self.device_name = "gtec" self.show_channel_names = 1 elif (sys.argv[1].find("biosemi") > -1): self.device_name = "biosemi" self.show_channel_names = 1 elif (sys.argv[1].find("hiamp") > -1): self.device_name = "hiamp" self.show_channel_names = 1 else: self.device_name = "" self.show_channel_names = 0 # self.scope_settings.read(os.getenv("HOME") + "/.scope_settings.ini") self.scope_settings.read('.scope_settings.ini') def init_loop(self): """ Initialize loop related variables like StreamReceiver and self.eeg """ self.updating = False logger.info("init_loop runs") self.sr = StreamReceiver(window_size=1, buffer_size=10, amp_serial=Variables.get_amp_serial(), amp_name=Variables.get_amp_name()) srate = int(self.sr.sample_rate) # n_channels= self.sr.channels # 12 unsigned ints (4 bytes) ########## TODO: assumkng 32 samples chunk => make it read from LSL header data = [ 'EEG', srate, ['L', 'R'], 32, len(self.sr.get_eeg_channels()), 0, self.sr.get_trigger_channel(), None, None, None, None, None ] logger.info('Trigger channel is %d' % self.sr.get_trigger_channel()) self.config = { 'id': data[0], 'sf': data[1], 'labels': data[2], 'samples': data[3], 'eeg_channels': data[4], 'exg_channels': data[5], 'tri_channels': data[6], 'eeg_type': data[8], 'exg_type': data[9], 'tri_type': data[10], 'lbl_type': data[11], 'tim_size': 1, 'idx_size': 1 } self.tri = np.zeros(self.config['samples']) self.last_tri = 0 self.eeg = np.zeros( (self.config['samples'], self.config['eeg_channels']), dtype=np.float) self.exg = np.zeros( (self.config['samples'], self.config['exg_channels']), dtype=np.float) self.ts_list = [] self.ts_list_tri = [] def init_event_functions(self): """ Initialize event listeners for widgets in GUI """ # Control buttons self.ui.pushButton_Main_switch.clicked.connect( self.onClicked_button_Main_switch) self.ui.pushButton_start_SV.clicked.connect( self.onClicked_button_start_SV) self.ui.pushButton_scope_switch.clicked.connect( self.onClicked_button_scope_switch) self.ui.pushButton_rec.clicked.connect(self.onClicked_button_rec) # self.ui.pushButton_start_train.clicked.connect(self.onClicked_button_train) # self.ui.pushButton_start_test.clicked.connect(self.onClicked_button_test) # Subject information self.ui.pushButton_save.clicked.connect( self.onClicked_button_save_subject_information) # Experimental protocol self.ui.pushButton_define_task_done.clicked.connect( self.onClicked_button_define_task_done) self.ui.pushButton_define_task_add.clicked.connect( self.onClicked_button_define_task_add) self.ui.pushButton_create_sequence.clicked.connect( self.onClicked_button_create_sequence) self.ui.pushButton_randomize.clicked.connect( self.onClicked_button_randomize) self.ui.toolButton_choose_image_task.clicked.connect( self.onClicked_toolButton_choose_image_task) self.ui.toolButton_choose_sound_task.clicked.connect( self.onClicked_toolButton_choose_sound_task) self.ui.pushButton_experimental_protocol_finish.clicked.connect( self.onClicked_experimental_protocol_finish) self.ui.pushButton_save_protocol.clicked.connect( self.onClicked_button_save_protocol) self.ui.toolButton_load_protocol.clicked.connect( self.onClicked_toolButton_load_protocol) # Event management tab self.ui.pushButton_save_event_number.clicked.connect( self.onClicked_button_save_event_number) # Oscilloscope self.ui.comboBox_scale.activated.connect( self.onActivated_combobox_scale) self.ui.spinBox_time.valueChanged.connect( self.onValueChanged_spinbox_time) self.ui.checkBox_car.stateChanged.connect( self.onActivated_checkbox_car) self.ui.checkBox_bandpass.stateChanged.connect( self.onActivated_checkbox_bandpass) self.ui.checkBox_notch.stateChanged.connect( self.onActivated_checkbox_notch) self.ui.pushButton_bp.clicked.connect(self.onClicked_button_bp) self.ui.pushButton_apply_notch.clicked.connect( self.onClicked_button_notch) self.ui.table_channels.itemSelectionChanged.connect( self.onSelectionChanged_table) self.ui.table_channels.doubleClicked.connect( self.onDoubleClicked_channel_table) self.ui.pushButton_update_channel_name.clicked.connect( self.onClicked_button_update_channel_name) self.ui.table_channels.viewport().installEventFilter(self) # SSVEP self.ui.pushButton_ssvep_task.clicked.connect( self.onClicked_pushButton_ssvep_task) # eye tracker self.ui.pushButton_open_eye_tracker_ui.clicked.connect( self.onClicked_pushButton_open_eye_tracker_ui) # MRCP tab self.ui.pushButton_temp_clear.clicked.connect( self.onClicked_button_temp_clear) self.ui.pushButton_temp_mean.clicked.connect( self.onClicked_button_temp_mean) self.ui.pushButton_temp_view.clicked.connect( self.onClicked_button_temp_view) self.ui.pushButton_temp_remove.clicked.connect( self.onClicked_button_temp_remove) def init_panel_GUI(self): """ Initialize experimenter GUI """ # Tabs self.ui.tab_experimental_protocol.setEnabled(False) self.ui.tab_subjec_information.setEnabled(False) self.ui.tab_event_and_file_management.setEnabled(False) # self.ui.tab_Oscilloscope.setEnabled(False) self.ui.tab_experiment_type.setEnabled(False) # Experimental protocol self.task_list = [] self.new_task_list = [] self.task_descriptor_list = [] self.task_image_path = "" self.task_image_path_list = [] self.task_sound_path = "" self.task_sound_path_list = [] self.task_table = np.ndarray([]) self.new_task_table = np.ndarray([]) self.task_counter = 0 self.protocol_path = "" # Button self.init_task_name_table() self.ui.groupBox_sequence_manager.setEnabled(False) # Event management tab self.event_timestamp_list = [] self.init_task_event_number_table() self.event_list = [] # Button self.ui.pushButton_save_event_number.clicked.connect( self.onClicked_button_save_event_number) self.event_file_path = "" self.mrcp_template_file_path = "" self.raw_eeg_file_path = "" self.raw_mrcp_file_path = "" self.subject_file_path = "" # Oscilloscope self.ui.comboBox_scale.setCurrentIndex(4) self.ui.checkBox_notch.setChecked(True) # self.ui.checkBox_car.setChecked( # int(self.scope_settings.get("filtering", "apply_car_filter"))) # self.ui.checkBox_bandpass.setChecked( # int(self.scope_settings.get("filtering", "apply_bandpass_filter"))) self.ui.pushButton_apply_notch.setEnabled(True) self.ui.doubleSpinBox_lc_notch.setEnabled(True) self.ui.doubleSpinBox_hc_notch.setEnabled(True) # initialize channel selection panel in main view GUI self.channels_to_show_idx = [] idx = 0 for y in range(0, 4): for x in range(0, NUM_X_CHANNELS): if idx < self.config['eeg_channels']: # self.table_channels.item(x,y).setTextAlignment(QtCore.Qt.AlignCenter) self.ui.table_channels.item(x, y).setSelected(True) # Qt5 # self.table_channels.setItemSelected(self.table_channels.item(x, y), True) # Qt4 only self.channels_to_show_idx.append(idx) else: self.ui.table_channels.setItem(x, y, QTableWidgetItem("N/A")) self.ui.table_channels.item(x, y).setFlags( QtCore.Qt.NoItemFlags) self.ui.table_channels.item(x, y).setTextAlignment( QtCore.Qt.AlignCenter) idx += 1 self.ui.table_channels.verticalHeader().setStretchLastSection(True) self.ui.table_channels.horizontalHeader().setStretchLastSection(True) self.channel_to_scale_row_index = -1 self.channel_to_scale_column_index = -1 self.selected_channel_row_index = 0 self.selected_channel_column_index = 0 self.single_channel_scale = 1 # MRCP tab self.init_class_epoch_counter_table() self.init_class_bad_epoch_table() self.show_TID_events = False self.show_LPT_events = False self.show_Key_events = False self.raw_trial_MRCP = np.ndarray([]) self.processed_trial_MRCP = np.ndarray([]) self.total_trials_MRCP = [] self.total_trials_raw_MRCP = [] self.total_MRCP_inds = [] self.temp_counter = 0 self.temp_counter_list = [] self.input_temp_list = [] self.display_temp_list = [] self.selected_temp = "" self.list_selected_temp = [] self.template_buffer = np.zeros( (6 * int(self.sr.sample_rate), self.config['eeg_channels']), dtype=float) self.b_lp, self.a_lp = Utils.butter_lowpass(3, int(self.sr.sample_rate), 2) self.b_hp, self.a_hp = Utils.butter_highpass(0.05, int(self.sr.sample_rate), 2) self.initial_condition_list_lp = Utils.construct_initial_condition_list( self.b_lp, self.a_lp, self.config['eeg_channels']) self.initial_condition_list_hp = Utils.construct_initial_condition_list( self.b_hp, self.a_hp, self.config['eeg_channels']) self.ui.pushButton_bad_epoch.clicked.connect( self.onClicked_button_bad_epoch) self.screen_width = 522 self.screen_height = 160 # self.setGeometry(100,100, self.screen_width, self.screen_height) # self.setFixedSize(self.screen_width, self.screen_height) self.setWindowTitle('EEG Scope Panel') self.setFocusPolicy(QtCore.Qt.ClickFocus) self.setFocus() logger.info('GUI show') self.show() def init_panel_GUI_stop_recording(self): """ Initialize experimenter GUI when stop recording button pressed. This is used to prepare for next run. """ # Tabs self.ui.tab_experimental_protocol.setEnabled(False) self.ui.tab_subjec_information.setEnabled(False) self.ui.tab_event_and_file_management.setEnabled(False) # self.ui.tab_Oscilloscope.setEnabled(False) self.ui.tab_experiment_type.setEnabled(False) # Experimental protocol self.task_list = [] self.new_task_list = [] self.task_descriptor_list = [] self.task_image_path = "" self.task_image_path_list = [] self.task_sound_path = "" self.task_sound_path_list = [] self.task_table = np.ndarray([]) self.new_task_table = np.ndarray([]) self.task_counter = 0 self.protocol_path = "" # Button self.init_task_name_table() self.ui.groupBox_sequence_manager.setEnabled(False) # Event management tab self.event_timestamp_list = [] self.init_task_event_number_table() self.event_list = [] # Button self.ui.pushButton_save_event_number.clicked.connect( self.onClicked_button_save_event_number) self.event_file_path = "" self.mrcp_template_file_path = "" self.raw_eeg_file_path = "" self.raw_mrcp_file_path = "" self.subject_file_path = "" # Oscilloscope self.ui.comboBox_scale.setCurrentIndex(4) self.ui.checkBox_notch.setChecked(True) # self.ui.checkBox_car.setChecked( # int(self.scope_settings.get("filtering", "apply_car_filter"))) # self.ui.checkBox_bandpass.setChecked( # int(self.scope_settings.get("filtering", "apply_bandpass_filter"))) # self.ui.pushButton_apply_notch.setEnabled(False) self.ui.doubleSpinBox_lc_notch.setEnabled(False) self.ui.doubleSpinBox_hc_notch.setEnabled(False) # # initialize channel selection panel in main view GUI # self.channels_to_show_idx = [] # idx = 0 # for y in range(0, 4): # for x in range(0, NUM_X_CHANNELS): # if idx < self.config['eeg_channels']: # # self.table_channels.item(x,y).setTextAlignment(QtCore.Qt.AlignCenter) # self.ui.table_channels.item(x, y).setSelected(True) # Qt5 # # self.table_channels.setItemSelected(self.table_channels.item(x, y), True) # Qt4 only # self.channels_to_show_idx.append(idx) # else: # self.ui.table_channels.setItem(x, y, # QTableWidgetItem("N/A")) # self.ui.table_channels.item(x, y).setFlags( # QtCore.Qt.NoItemFlags) # self.ui.table_channels.item(x, y).setTextAlignment( # QtCore.Qt.AlignCenter) # idx += 1 self.ui.table_channels.verticalHeader().setStretchLastSection(True) self.ui.table_channels.horizontalHeader().setStretchLastSection(True) self.channel_to_scale_row_index = -1 self.channel_to_scale_column_index = -1 self.selected_channel_row_index = 0 self.selected_channel_column_index = 0 self.single_channel_scale = 1 # MRCP tab self.init_class_epoch_counter_table() self.init_class_bad_epoch_table() self.show_TID_events = False self.show_LPT_events = False self.show_Key_events = False self.raw_trial_MRCP = np.ndarray([]) self.processed_trial_MRCP = np.ndarray([]) self.total_trials_MRCP = [] self.total_trials_raw_MRCP = [] self.total_MRCP_inds = [] self.temp_counter = 0 self.temp_counter_list = [] self.input_temp_list = [] self.display_temp_list = [] self.selected_temp = "" self.list_selected_temp = [] self.template_buffer = np.zeros( (6 * int(self.sr.sample_rate), self.config['eeg_channels']), dtype=float) self.b_lp, self.a_lp = Utils.butter_lowpass(3, int(self.sr.sample_rate), 2) self.b_hp, self.a_hp = Utils.butter_highpass(0.05, int(self.sr.sample_rate), 2) self.initial_condition_list_lp = Utils.construct_initial_condition_list( self.b_lp, self.a_lp, self.config['eeg_channels']) self.initial_condition_list_hp = Utils.construct_initial_condition_list( self.b_hp, self.a_hp, self.config['eeg_channels']) self.ui.pushButton_bad_epoch.clicked.connect( self.onClicked_button_bad_epoch) self.screen_width = 522 self.screen_height = 160 # self.setGeometry(100,100, self.screen_width, self.screen_height) # self.setFixedSize(self.screen_width, self.screen_height) self.setWindowTitle('EEG Scope Panel') self.setFocusPolicy(QtCore.Qt.ClickFocus) self.setFocus() self.show() def init_SV_GUI(self): """ Initialize subject view GUI """ self.SVStatus = 0 self.starttime = 0 self.SV_time = 0 self.idle_time = int(self.ui.idleTimeLineEdit.text()) self.focus_time = self.idle_time + int( self.ui.focusTimeLineEdit.text()) self.prepare_time = self.focus_time + int( self.ui.prepareTimeLineEdit.text()) self.two_time = self.prepare_time + int(self.ui.twoTimeLineEdit.text()) self.one_time = self.two_time + int(self.ui.oneTimeLineEdit.text()) self.task_time = self.one_time + int(self.ui.taskTimeLineEdit.text()) self.relax_time = self.task_time + 2 self.cycle_time = self.relax_time self.is_experiment_on = False def init_scope_GUI(self): """ Initialize oscilloscope GUI """ self.bool_parser = {True: '1', False: '0'} # PyQTGraph plot initialization self.win = pg.GraphicsWindow() self.win.setWindowTitle('EEG Scope') self.win.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) self.win.keyPressEvent = self.keyPressEvent # self.win.show() self.main_plot_handler = self.win.addPlot() self.win.resize(1280, 800) # Scales available in the GUI. If you change the options in the GUI # you should change them here as well self.scales_range = [1, 10, 25, 50, 100, 250, 500, 1000, 2500, 100000] self.single_scales_range = [ 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.5, 1.7, 1.8, 2 ] # Scale in uV self.scale = 100 # Time window to show in seconds self.seconds_to_show = 10 # Y Tick labels. Use values from the config file. self.channel_labels = [] values = [] ''' For non-LSL systems having no channel names for x in range(0, self.config['eeg_channels']): if (self.show_channel_names): self.channel_labels.append("(" + str(x + 1) + ") " + self.scope_settings.get("internal", "channel_names_" + self.device_name + str( self.config['eeg_channels'])).split(', ')[x]) else: self.channel_labels.append('CH ' + str(x + 1)) ''' ch_names = np.array(self.sr.get_channel_names()) self.channel_labels = ch_names[self.sr.get_eeg_channels()] for x in range(0, len(self.channels_to_show_idx)): values.append((-x * self.scale, self.channel_labels[self.channels_to_show_idx[x]])) values_axis = [] values_axis.append(values) values_axis.append([]) # Update table labels with current names idx = 0 for y in range(0, 4): for x in range(0, NUM_X_CHANNELS): if (idx < self.config['eeg_channels']): self.ui.table_channels.item(x, y).setText( self.channel_labels[idx]) idx += 1 # Plot initialization # Plotting colors. If channels > 16, colors will roll back to the beginning self.colors = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0], [0, 255, 255], [255, 0, 255], [128, 100, 100], [0, 128, 0], [0, 128, 128], [128, 128, 0], [255, 128, 128], [128, 0, 128], [128, 255, 0], [255, 128, 0], [0, 255, 128], [128, 0, 255]]) # pen = pg.mkColor(self.colors) # self.main_plot_handler.getAxis('left').setTextPen('b') self.main_plot_handler.getAxis('left').setTicks(values_axis) self.main_plot_handler.setRange( xRange=[0, self.seconds_to_show], yRange=[ +1.5 * self.scale, -0.5 * self.scale - self.scale * self.config['eeg_channels'] ]) self.main_plot_handler.disableAutoRange() self.main_plot_handler.showGrid(y=True) self.main_plot_handler.setLabel(axis='left', text='Scale (uV): ' + str(self.scale)) self.main_plot_handler.setLabel(axis='bottom', text='Time (s)') # X axis self.x_ticks = np.zeros(self.config['sf'] * self.seconds_to_show) for x in range(0, self.config['sf'] * self.seconds_to_show): self.x_ticks[x] = (x * 1) / float(self.config['sf']) # We want a lightweight scope, so we downsample the plotting to 64 Hz self.subsampling_value = self.config['sf'] / 64 # EEG data for plotting self.data_plot = np.zeros((self.config['sf'] * self.seconds_to_show, self.config['eeg_channels'])) print('self.data plot shape: ', self.data_plot.shape) self.curve_eeg = [] for x in range(0, len(self.channels_to_show_idx)): self.curve_eeg.append( self.main_plot_handler.plot( x=self.x_ticks, y=self.data_plot[:, self.channels_to_show_idx[x]], pen=pg.mkColor(self.colors[self.channels_to_show_idx[x] % 16, :]))) # self.curve_eeg[-1].setDownsampling(ds=self.subsampling_value, auto=False, method="mean") # Events data self.events_detected = [] self.events_curves = [] self.events_text = [] # CAR initialization self.apply_car = False self.matrix_car = np.zeros( (self.config['eeg_channels'], self.config['eeg_channels']), dtype=float) self.matrix_car[:, :] = -1 / float(self.config['eeg_channels']) np.fill_diagonal(self.matrix_car, 1 - (1 / float(self.config['eeg_channels']))) # Laplacian initalization. TO BE DONE self.matrix_lap = np.zeros( (self.config['eeg_channels'], self.config['eeg_channels']), dtype=float) np.fill_diagonal(self.matrix_lap, 1) self.matrix_lap[2, 0] = -1 self.matrix_lap[0, 2] = -0.25 self.matrix_lap[0, 2] = -0.25 # BP initialization self.apply_bandpass = 1 if (self.apply_bandpass): self.ui.doubleSpinBox_lp.setValue(40.0) self.ui.doubleSpinBox_hp.setValue(1.0) self.ui.doubleSpinBox_lp.setMinimum(0) self.ui.doubleSpinBox_lp.setMaximum(self.sr.sample_rate / 2 - 0.1) self.ui.doubleSpinBox_lp.setSingleStep(1) self.ui.doubleSpinBox_hp.setMinimum(0) self.ui.doubleSpinBox_hp.setMaximum(self.sr.sample_rate / 2 - 0.1) self.ui.doubleSpinBox_hp.setSingleStep(1) self.ui.pushButton_bp.click() # notch initialization self.apply_notch = 1 if (self.apply_notch): self.ui.doubleSpinBox_lc_notch.setValue(58.0) self.ui.doubleSpinBox_hc_notch.setValue(62.0) self.ui.doubleSpinBox_lc_notch.setMinimum(0.1) self.ui.doubleSpinBox_lc_notch.setMaximum(self.sr.sample_rate / 2 - 0.1) self.ui.doubleSpinBox_lc_notch.setSingleStep(1) self.ui.doubleSpinBox_hc_notch.setMinimum(0.1) self.ui.doubleSpinBox_hc_notch.setMaximum(self.sr.sample_rate / 2 - 0.1) self.ui.doubleSpinBox_hc_notch.setSingleStep(1) self.ui.pushButton_apply_notch.click() self.ui.checkBox_bandpass.setChecked(self.apply_bandpass) self.b_bandpass_scope_refilter = self.b_bandpass_scope self.a_bandpass_scope_refilter = self.a_bandpass_scope self.zi_bandpass_scope_refilter = self.zi_bandpass_scope self.b_notch_scope_refilter = self.b_notch_scope self.a_notch_scope_refilter = self.a_notch_scope self.zi_notch_scope_refilter = self.zi_notch_scope self.update_title_scope() # Help variables self.show_help = 0 self.help = pg.TextItem( "CNBI EEG Scope v0.3 \n" + "----------------------------------------------------------------------------------\n" + "C: De/activate CAR Filter\n" + "B: De/activate Bandpass Filter (with current settings)\n" + "T: Show/hide TiD events\n" + "L: Show/hide LPT events\n" + "K: Show/hide Key events. If not shown, they are NOT recorded!\n" + "0-9: Add a user-specific Key event. Do not forget to write down why you marked it.\n" + "Up, down arrow keys: Increase/decrease the scale, steps of 10 uV\n" + "Left, right arrow keys: Increase/decrease the time to show, steps of 1 s\n" + "Spacebar: Stop the scope plotting, whereas data acquisition keeps running (EXPERIMENTAL)\n" + "Esc: Exits the scope", anchor=(0, 0), border=(70, 70, 70), fill=pg.mkColor(20, 20, 20, 200), color=(255, 255, 255)) # Stop plot functionality self.stop_plot = 0 # Force repaint even when we shouldn't repaint. self.force_repaint = 1 def init_timer(self): """ Initialize main timer used for refreshing oscilloscope window. This refreshes every 20ms. """ self.os_time_list1 = [] QtCore.QCoreApplication.processEvents() QtCore.QCoreApplication.flush() self.timer = QtCore.QTimer(self) self.timer.setTimerType(QtCore.Qt.PreciseTimer) self.timer.timeout.connect(self.update_loop) self.timer.start(20) def init_Runtimer(self): """ Initialize task related timer which controls the timing for visual cues """ self.time_show = 0 self.os_time_list = [] self.Runtimer = task.LoopingCall(self.Time) def init_eye_tracker(self): self.eye_tracker_window.tableWidget.setRowCount(9) self.eye_tracker_window.pushButton_1.clicked.connect(self.update_cal1) self.eye_tracker_window.pushButton_2.clicked.connect(self.update_cal2) self.eye_tracker_window.pushButton_3.clicked.connect(self.update_cal3) self.eye_tracker_window.pushButton_4.clicked.connect(self.update_cal4) self.eye_tracker_window.pushButton_5.clicked.connect(self.update_cal5) self.eye_tracker_window.pushButton_6.clicked.connect(self.update_cal6) self.eye_tracker_window.pushButton_7.clicked.connect(self.update_cal7) self.eye_tracker_window.pushButton_8.clicked.connect(self.update_cal8) self.eye_tracker_window.pushButton_9.clicked.connect(self.update_cal9) self.eye_tracker_window.pushButton_12.clicked.connect( self.update_current_gaze_loc) self.eye_tracker_window.pushButton_13.clicked.connect( self.recording_data) self.eye_tracker_window.pushButton_14.clicked.connect( self.recording_stop) self.rec_time = int(self.eye_tracker_window.LineEdit_rec.text()) # self.LineEdit_rec.clicked.conntect(self.update_rec_time(int(self.LineEdit_rec.text()))) self.gaze_x = 0 self.gaze_y = 0 self.table_row = 0 self.table_col = 0 # print(self.gaze_x, self.gaze_y) self.UTC_time = 0 # List of values in 9 points self.points = np.zeros((9, 2)) self.gaze_loc = 0 def trigger_help(self): """Shows / hide help in the scope window""" if self.show_help: self.help.setPos(0, self.scale) self.main_plot_handler.addItem(self.help) self.help.setZValue(1) else: self.main_plot_handler.removeItem(self.help) def eventFilter(self, source, event): """ Select single channel to scale by right clicking :param source: channel table content :param event: right mouse button press :return: ID of the selected channel """ if (event.type() == QtCore.QEvent.MouseButtonPress and event.buttons() == QtCore.Qt.RightButton and source is self.ui.table_channels.viewport()): item = self.ui.table_channels.itemAt(event.pos()) # print('Global Pos:', event.globalPos()) if item is not None: self.channel_to_scale_row_index = item.row() self.channel_to_scale_column_index = item.column() print("RRRRRRRRR", self.channel_to_scale_row_index, self.channel_to_scale_column_index) # print('Table Item:', item.row(), item.column()) # self.menu = QMenu(self) # self.menu.addAction(item.text()) #(QAction('test')) # menu.exec_(event.globalPos()) return super(MainView, self).eventFilter(source, event)
║ JD入会领豆 - 轻松日撸千豆 ║ ║ ║ ════════════════════════════════════════ @Version: {}""".format(version) remarks = 'Ps:您可以到以下途径获取最新的shopid.txt,定期更新:\n\n\tGitHub:https://github.com/curtinlv/JD-Script\n\n\tTG频道:https://t.me/TopStyle2021\n\n\t关注公众号【TopStyle】回复:shopid\n\n\n\t\t\t--By Curtin\n' timestamp = int(round(time.time() * 1000)) today = datetime.datetime.now().strftime('%Y-%m-%d') pwd = repr(os.getcwd()) pwd = pwd.replace('\'', '') # 获取用户参数 try: configinfo = RawConfigParser() try: configinfo.read(pwd + "/OpenCardConfig.ini", encoding="UTF-8") except Exception as e: with open(pwd + "/OpenCardConfig.ini", "r", encoding="UTF-8") as config: getConfig = config.read().encode('utf-8').decode('utf-8-sig') with open(pwd + "/OpenCardConfig.ini", "w", encoding="UTF-8") as config: config.write(getConfig) try: configinfo.read(pwd + "/OpenCardConfig.ini", encoding="UTF-8") except: configinfo.read(pwd + "/OpenCardConfig.ini", encoding="gbk") cookies = configinfo.get('main', 'JD_COOKIE') openCardBean = configinfo.getint('main', 'openCardBean') sleepNum = configinfo.getfloat('main', 'sleepNum') record = configinfo.getboolean('main', 'record')
import psycopg2 from configparser import RawConfigParser from discord.ext.commands import NotOwner config = RawConfigParser() config.read("secret.ini") USER = config.get("DATABASE", "USER") PASSWORD = config.get("DATABASE", "PASSWORD") HOST = config.get("DATABASE", "HOST") PORT = config.get("DATABASE", "PORT") DATABASE = config.get("DATABASE", "DATABASE") class ServerConfig: def __init__(self): self.connection = None self.connect() def __del__(self): # Closes the connection if self.connection: self.connection.close() def connect(self): try: # Connect to the database instance connection = psycopg2.connect(user=USER, password=PASSWORD, host=HOST, port=PORT, database=DATABASE) self.connection = connection except (Exception, psycopg2.Error) as error: print("Error while connecting to PostgreSQL", error) # Get the server configuration for a particular server
import os from configparser import RawConfigParser home = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')) config_parser = RawConfigParser() config_parser.read(os.path.join(home, 'config.ini')) dbuser = config_parser.get('django', 'dbuser') dbname = config_parser.get('django', 'dbname') dbpassword = config_parser.get('django', 'dbpassword')
def get_version(setupcfg_path): """Return package version from setup.cfg.""" config = RawConfigParser() config.read(setupcfg_path) return config.get('metadata', 'version')