コード例 #1
0
ファイル: simulation.py プロジェクト: quasipedia/lifts
 def _load_sim_file(self, sim_file):
     '''Load, parse and expand the simulation file.'''
     sim_fname = os.path.realpath(sim_file)
     # Load the master file
     path, _ = os.path.split(sim_fname)
     with open(sim_fname) as file_:
         sim = toml.load(file_)
     # Expand the building
     building_fname = '{}.toml'.format(sim['building']['model'])
     with open(os.path.join(path, 'buildings', building_fname)) as file_:
         sim['building'] = toml.load(file_)['floor']
     # Expand the lifts
     processed_lifts = []
     for lift in sim['lifts']:
         fname = '{}.toml'.format(lift['model'])
         with open(os.path.join(path, 'lifts', fname)) as file_:
             exp = toml.load(file_)
         exp['bottom_floor_number'], exp['top_floor_number'] = lift['range']
         exp['location'] = lift['location']
         exp['open_doors'] = lift['open_doors']
         processed_lifts.append(exp)
     sim['lifts'] = processed_lifts
     # If provided, initialise the random seed
     try:
         seed(sim['people']['seed'])
     except KeyError:
         print('no seed')
         pass
     self.description = sim
コード例 #2
0
ファイル: util.py プロジェクト: Xion/gisht
def read_cargo_toml(key, manifest=None):
    """Read a value from Cargo.toml manifest.

    :param key: Key to read from [package], or entire path to a key.
                It may contain dots.
    :param manifest: Optional path to the manifest,
                     or a file-like object with it opened

    :return: The value
    """
    if not isinstance(key, (list, tuple)):
        key = (key,) if key.startswith('package.') else ('package', key)
    key = list(chain.from_iterable(k.split('.') for k in key))
    if not key:
        raise ValueError("key must not be empty")

    # Read the content of Cargo.toml.
    manifest = manifest or Path.cwd() / 'Cargo.toml'
    if hasattr(manifest, 'read'):
        content = toml.load(manifest)
    else:
        manifest = Path(manifest)
        with manifest.open() as f:
            content = toml.load(f)

    # Get the value.
    value = content
    for k in key:
        value = value[k]
    return value
コード例 #3
0
ファイル: sensed_tests.py プロジェクト: sli/sensed
def test_test_mode() -> None:
    '''
    Tests server and client by starting the server in a thread, then
    requesting meta and sensor data. Asserts that this data is in the
    correct format and returns None on success.
    '''
    print('Finding default configs...')
    if os.path.isdir('docs'):
        server_config_file = 'docs/sensed-config.sample.toml'
        client_config_file = 'docs/senselog-config.sample.toml'
    elif os.path.isdir('../docs'):
        server_config_file = '../docs/sensed-config.sample.toml'
        client_config_file = '../docs/senselog-config.sample.toml'

    print('Running sensed test mode test...')

    with open(server_config_file) as f:
        server_config = DotMap(toml.load(f))
    dt_sensor = datetime.Sensor(server_config)
    ni_sensor = netifaces.Sensor(server_config)
    cam_sensor = camera.Sensor(server_config)
    hat_sensor = hat.Sensor(server_config)
    server_config.sensors = DotMap({'datetime': dt_sensor,
                                    'netifaces': ni_sensor,
                                    'camera': cam_sensor,
                                    'hat': hat_sensor})
    server = socketserver.UDPServer((server_config.sensed.host,
                                    server_config.sensed.port),
                                    SensedServer)
    server.sensors = server_config.sensors
    server.config = server_config

    server_thread = threading.Thread(target=server.serve_forever)
    server_thread.daemon = True
    server_thread.start()

    with open(client_config_file) as f:
        client_config = DotMap(toml.load(f))
    client = SensedClient(client_config)

    meta = client.get_all_meta()
    sensors = client.get_all_sensors()

    assert isinstance(meta, list)
    assert isinstance(meta[0], dict)
    assert isinstance(sensors, list)
    assert isinstance(sensors[0], dict)

    print('Packet test passed. Shutting down...')

    server.shutdown()

    print('Test complete.')
コード例 #4
0
async def read_user_state(path):
    global prev
    global manual
    book_files = utility.find_files(path, ('brf', 'pef'))
    main_toml = os.path.join(path, 'sd-card', USER_STATE_FILE)
    current_book = manual_filename
    if os.path.exists(main_toml):
        main_state = toml.load(main_toml)
        if 'current_book' in main_state:
            current_book = main_state['current_book']
            if not current_book == manual_filename:
                current_book = os.path.join(path, current_book)
        if 'current_language' in main_state:
            current_language = main_state['current_language']
        else:
            current_language = 'en_GB:en'
    else:
        current_language = 'en_GB:en'

    manual_toml = os.path.join(path, to_state_file(manual_filename))
    if os.path.exists(manual_toml):
        t = toml.load(manual_toml)
        if 'current_page' in t:
            manual = manual._replace(page_number=t['current_page'] - 1)
        if 'bookmarks' in t:
            manual = manual._replace(bookmarks=tuple(sorted(manual.bookmarks + tuple(
                bm - 1 for bm in t['bookmarks']))))
    else:
        manual = Manual.create(current_language)

    books = OrderedDict({manual_filename: manual})
    for book_file in book_files:
        toml_file = to_state_file(book_file)
        book = BookFile(filename=book_file, width=40, height=9)
        if os.path.exists(toml_file):
            t = toml.load(toml_file)
            if 'current_page' in t:
                book = book._replace(page_number=t['current_page'] - 1)
            if 'bookmarks' in t:
                book = book._replace(bookmarks=tuple(sorted(book.bookmarks + tuple(
                    bm - 1 for bm in t['bookmarks']))))
        books[book_file] = book
    books[cleaning_filename] = CleaningAndTesting.create()

    if current_book not in books:
        current_book = manual_filename

    user_state = frozendict(books=FrozenOrderedDict(
        books), current_book=current_book, current_language=current_language)
    prev = user_state
    return user_state.copy(books=user_state['books'])
コード例 #5
0
def main() -> None:
    data = toml.load(sys.stdin)

    assert list(data.keys()) == ["source"]

    # this value is non deterministic
    data["source"]["vendored-sources"]["directory"] = "@vendor@"

    lines = []
    inner = data["source"]
    for source, attrs in sorted(inner.items()):
        lines.append("[source.{}]".format(quote(source)))
        if source == "vendored-sources":
            lines.append('"directory" = "@vendor@"\n')
        else:
            for key, value in sorted(attrs.items()):
                attr = "{} = {}".format(quote(key), quote(value))
                lines.append(attr)
        lines.append("")

    result = "\n".join(lines)
    real = toml.loads(result)
    assert real == data, "output = {} while input = {}".format(real, data)

    print(result)
コード例 #6
0
    def load_hosts_from_hosts_toml(self, options=None, hosts=None, hosts_file=None, ):
        if hosts_file == None and options != None:
            hosts_file = options.hosts_file
        if hosts == None and options != None:
            hosts = options.hosts
        if not os.path.isfile(hosts_file):
            org_hosts_file = hosts_file
            hosts_file = "conf/" + hosts_file
        if not os.path.isfile(hosts_file):
            raise Exception("You must have a hosts.toml in order to deploy. File '%s' does not exist." % org_hosts_file)
        with open(hosts_file) as f:
            all_hosts_conf = toml.load(f)

        if not all_hosts_conf.get('hosts'):
            raise Exception("No hosts defined in hosts.toml")
        if not len(hosts):
            if len(all_hosts_conf.get('hosts')):
                hosts = ['ALL']
        if not hosts:
            raise Exception("You must pass in a comma separated list of hosts as the first argument. Use ALL to setup all hosts")
        confs = []
        for conf in all_hosts_conf.get('hosts'):
            if not conf.get('host'):
                raise Exception("No 'host' attribute defined for a host in your hosts.toml")
            if hosts == ['ALL'] or conf.get('host') in hosts:
                confs.append(conf)
        if not confs:
            raise Exception('No host confs found matching host list: %s' % hosts)
        for conf in confs:
            conf['os'] = conf.get('os', 'ubuntu')
            if not conf['os'] in SUPPORTED_OSES:
                raise Exception('Sorry, %s is not a supported operating system.')
        return confs, hosts_file
コード例 #7
0
    def __init__(self, parent=None):
        """Initiate the abstract widget that is displayed in the preferences dialog."""
        super(Playback, self).__init__(parent)
        self.user_config_file = os.path.join(AppDirs('mosaic', 'Mandeep').user_config_dir,
                                             'settings.toml')

        with open(self.user_config_file) as conffile:
            config = toml.load(conffile)

        playback_config = QGroupBox('Playback Configuration')
        playback_config_layout = QVBoxLayout()
        playback_config_layout.setAlignment(Qt.AlignTop)

        self.cover_art_playback = QCheckBox('Cover Art Playback')
        self.playlist_save_checkbox = QCheckBox('Save Playlist on Close')

        playback_config_layout.addWidget(self.cover_art_playback)
        playback_config_layout.addWidget(self.playlist_save_checkbox)

        playback_config.setLayout(playback_config_layout)

        main_layout = QVBoxLayout()
        main_layout.addWidget(playback_config)

        self.setLayout(main_layout)

        self.check_playback_setting(config)
        self.check_playlist_save(config)
        self.cover_art_playback.clicked.connect(lambda: self.cover_art_playback_setting(config))
        self.playlist_save_checkbox.clicked.connect(lambda: self.playlist_save_setting(config))
コード例 #8
0
ファイル: valid_builders.py プロジェクト: UK992/saltfs
def run():
    homu_cfg = toml.load('/home/servo/homu/cfg.toml')
    homu_builders = homu_cfg['repo']['servo']['buildbot']

    # We need to invoke a new process to read the Buildbot master config
    # because Buildbot is written in python2.
    scriptpath = os.path.join(os.path.dirname(__file__), 'get_buildbot_cfg.py')
    ret = subprocess.run(
        ['/usr/bin/python2', scriptpath],
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE
    )
    if ret.returncode != 0:
        return Failure(
            'Unable to retrieve buildbot builders:', ret.stderr
        )

    buildbot_builders = json.loads(ret.stdout.decode('utf-8'))['builders']

    failure_msg = ''
    for builder_set in ['builders', 'try_builders']:
        diff = set(homu_builders[builder_set]) - set(buildbot_builders)
        if diff:
            failure_msg += 'Invalid builders for "{}": {}\n' \
                .format(builder_set, diff)

    if failure_msg:
        return Failure(
            "Homu config isn't synced with Buildbot config:",
            failure_msg
        )

    return Success('Buildbot and homu configs are synced')
コード例 #9
0
ファイル: deploy.py プロジェクト: scrapinghub/shub
def _get_poetry_requirements():
    try:
        data = toml.load('poetry.lock')
    except IOError:
        raise ShubException('Please make sure the poetry lock file is present')
    # Adapted from poetry 1.0.0a2 poetry/utils/exporter.py
    lines = []
    for package in data['package']:
        source = package.get('source') or {}
        source_type = source.get('type')
        if source_type == 'git':
            line = 'git+{}@{}#egg={}'.format(
                source['url'], source['reference'], package['name']
            )
        elif source_type in ['directory', 'file']:
            line = ''
            line += source['url']
        else:
            line = '{}=={}'.format(package['name'], package['version'])

            if source_type == 'legacy' and source['url']:
                line += ' \\\n'
                line += '    --index-url {}'.format(source['url'])

        line += '\n'
        lines.append(line)
    return ''.join(lines)
コード例 #10
0
 def setUp(self):
     logging.disable(logging.CRITICAL)
     self.db_file, self.db_filename = tempfile.mkstemp()
     with open("fixtures/test_sac_config.toml") as f:
         config = toml.load(f)
         config['db']['connection_string'] = "sqlite:///{}".format(self.db_filename)
         self.config = config
コード例 #11
0
ファイル: configloader.py プロジェクト: KSFT/KCraft
def loadconfig(filename):
    try:
        config=toml.load(filename)
    except IOError:
        with open(filename,"w") as f: pass
        config={}
    return config
コード例 #12
0
  def layout(self, client, userdata):
    fixtures = os.path.join(ROOT, 'test/fixtures')
    profile = webdriver.FirefoxProfile(os.path.join(fixtures, 'profile', client))

    for xpi in glob.glob(os.path.join(ROOT, 'xpi/*.xpi')):
      profile.add_extension(xpi)

    profile.set_preference('extensions.zotero.translators.better-bibtex.testing', True)

    with open(os.path.join(os.path.dirname(__file__), 'preferences.toml')) as f:
      preferences = toml.load(f)
      for p, v in nested_dict_iter(preferences['general']):
        profile.set_preference(p, v)

      if userdata.get('locale', '') == 'fr':
        for p, v in nested_dict_iter(preferences['fr']):
          profile.firefox.set_preference(p, v)

    if userdata.get('first-run', 'false') == 'false':
      profile.set_preference('extensions.zotero.translators.better-bibtex.citekeyFormat', '[auth][shorttitle][year]')

    if client == 'jurism':
      print('\n\n** WORKAROUNDS FOR JURIS-M IN PLACE -- SEE https://github.com/Juris-M/zotero/issues/34 **\n\n')
      #profile.set_preference('extensions.zotero.dataDir', os.path.join(self.path, 'jurism'))
      #profile.set_preference('extensions.zotero.useDataDir', True)
      #profile.set_preference('extensions.zotero.translators.better-bibtex.removeStock', False)

    profile.update_preferences()

    shutil.rmtree(self.path, ignore_errors=True)
    shutil.move(profile.path, self.path)
コード例 #13
0
ファイル: server.py プロジェクト: liweiwei05/shuke
    def __init__(self, overrides=None, valgrind=False):
        self.pid = None
        with open(os.path.join(constants.ASSETS_DIR, "test.toml")) as fp:
            self.cf = toml.load(fp)
        if overrides:
            update_nested_dict(self.cf, overrides)
        self.cf["core"].pop("admin_host", None)
        # extract some field from cf
        self.data_store = self.cf["zone_source"]["type"].lower()
        self.pidfile = self.cf["core"]["pidfile"]
        self.admin_host = self.cf["core"].get("admin_host", None)
        self.admin_port = self.cf["core"].get("admin_port", None)
        self.dns_port = self.cf["core"]["port"]
        self.dns_host = self.cf["core"]["bind"]

        # override mongo host and mongo port
        mongo_conf = self.cf["zone_source"]["mongo"]
        if self.data_store == "mongo":
            self.zm = ZoneMongo(constants.MONGO_HOST,
                                constants.MONGO_PORT,
                                mongo_conf["dbname"])
        self.valgrind = valgrind

        self.cf_str = toml.dumps(self.cf)
        self.fp = tempfile.NamedTemporaryFile()
        self.fp.write(self.cf_str.encode("utf8"))
        self.fp.flush()
        fname = self.fp.name

        if self.valgrind:
            # TODO find the bug of possible lost and still reachable memory
            self.cmd = "valgrind --leak-check=full --show-reachable=no --show-possibly-lost=no %s -c %s" % (DNS_BIN, fname)
        else:
            self.cmd = "%s -c %s" % (DNS_BIN, fname)
        self.vagrant = vagrant.Vagrant(root=os.path.join(constants.REPO_ROOT, "vagrant"))
コード例 #14
0
    def load(cls, fp: TextIO, args: Optional[Namespace] = None):
        """
        :param fp: .toml file's file pointer
        :param args: command line arguments
        :return: Config instance
        """
        config_dic = toml.load(fp)

        code_style_config_dic = config_dic.get('codestyle', {})
        postprocess_config_dic = config_dic.get('postprocess', {})
        etc_config_dic = config_dic.get('etc', {})

        if args:
            code_style_config_dic = _update_config_dict(code_style_config_dic,
                                                        dict(
                                                            template_file=args.template,
                                                            workspace_dir=args.workspace,
                                                            lang=args.lang))
            etc_config_dic = _update_config_dict(etc_config_dic,
                                                 dict(
                                                     download_without_login=args.without_login,
                                                     parallel_download=args.parallel,
                                                     save_no_session_cache=args.save_no_session_cache))

        return Config(
            code_style_config=CodeStyleConfig(**code_style_config_dic),
            postprocess_config=PostprocessConfig(**postprocess_config_dic),
            etc_config=EtcConfig(**etc_config_dic)
        )
コード例 #15
0
ファイル: io.py プロジェクト: michaelaye/planetpy
def set_database_path(dbfolder):
    """Use to write the database path into the config.

    Parameters
    ----------
    dbfolder : str or pathlib.Path
        Path to where planetpy will store data it downloads..
    """
    # First check if there's a config file, so that we don't overwrite
    # anything:
    try:
        config = toml.load(str(configpath))
    except IOError:  # config file doesn't exist
        config = {}  # create new config dictionary

    # check if there's an `data_archive` sub-dic
    try:
        archive_config = config["data_archive"]
    except KeyError:
        config["data_archive"] = {"path": dbfolder}
    else:
        archive_config["path"] = dbfolder

    with open(configpath, "w") as f:
        ret = toml.dump(config, f)
    print(f"Saved database path {ret} into {configpath}.")
コード例 #16
0
ファイル: __init__.py プロジェクト: takluyver/reversion
def read_config():
    with open('reversion.toml') as f:
        conf = toml.load(f)

    if 'currentversion' not in conf:
        raise ConfigError("No field named currentversion")

    if not isinstance(conf['currentversion'], str):
        raise ConfigError("currentversion should be a string, not {}".format(
                                            type(conf['currentversion'])))

    places = conf.get('place', [])
    if not places:
        raise ConfigError("Need at least one replacement site ([[place]] section)")

    if not isinstance(places, list):
        raise ConfigError("place must be an array")

    if not isinstance(places[0], dict):
        raise ConfigError("place must be an array of tables")

    for place in places:
        if 'file' not in place:
            raise ConfigError("Missing file= field for place")

        if not isinstance(place['file'], str):
            raise ConfigError("file must be string")

        if ('line-regex' in place) and not isinstance(place['line-regex'], str):
            raise ConfigError("linematch must be string")

    return conf
コード例 #17
0
ファイル: mod_twitter.py プロジェクト: jrabbit/pyborg-1up
 def __init__(self, conf_file):
     self.toml_file = conf_file
     self.settings: Dict = toml.load(conf_file)
     self.last_look = arrow.get(self.settings['twitter']['last_look'])
     self.multiplexing = True
     self.multi_server = self.settings['pyborg']['multi_server']
     self.pyborg = None
コード例 #18
0
ファイル: config.py プロジェクト: HubbeKing/Hubbot_Twisted
 def read_config(self):
     try:
         config_data = toml.load(self.config_file)
     except Exception as e:
         raise ConfigError(self.config_file, e)
     self._validate_config(config_data)
     self._config_data = config_data
コード例 #19
0
ファイル: certbot-run.py プロジェクト: StallionCMS/stablehand
def main():
    host = sys.argv[1]
    print('Running certbot')
    with open('hosts.toml') as f:
        host_conf = toml.load(f)
    # get certbot domains
    print('Run certbot for %s' % host_conf)
    certbot_conf = host_conf.get('certbot', {})
    if not certbot_conf:
        raise ValueError('[certbot] configuration section of the hosts.toml file is empty!')
    webroot_path = certbot_conf.get("webroot_path", "/var/www/html/")
    domains = certbot_conf.get('domains', [])
    email = certbot_conf.get('email')
    if not domains or not type(domains) == list:
        raise ValueError('certbot conf has no list of domains entry!')

    if not os.path.isfile('/usr/bin/certbot'):
        install('software-properties-common')
        local['add-apt-repository']['-y', 'ppa:certbot/certbot'] & FG
        apt_get['update'] & FG
        install('python-certbot-nginx')
        
    cron_path = '/etc/cron.d/certbot-global-auto-renew'
    if not os.path.isfile(cron_path):
        with open(cron_path, 'w') as f:
            f.write(cron_template)
        os.chmod(cron_path, 0o600)
            

    domains = sorted(domains, key=lambda d:len(d))
    name = domains[0]
    
    conf_path = '/etc/letsencrypt/renewal/' + name + '.conf'
    matches = True
    if not os.path.isfile(conf_path):
        matches = False
    else:
        with open(conf_path, 'r') as f:
            text = f.read()
        for d in domains:
            if d + ' = ' + webroot_path.rstrip('/') not in text:
                matches = False

    if matches:
        print('letsencrypt renewal conf exists for domains %s' % domains)
        return

    
    yn = input("Is DNS for the SSL domains pointing to this server? If no, you'll have to confirm domain ownership via editing DNS entries. (y/n) ")
    if yn.lower() == 'y':
        args = ['certonly', '--webroot', '-w', webroot_path, '--cert-name', name]
        for domain in domains:
            args.extend(['-d', domain])
        local['certbot'][args] & FG
    else:
        args = ['certonly', '--manual', '--preferred-challenges=dns', '--cert-name', name]
        for domain in domains:
            args.extend(['-d', domain])
        local['certbot'][args] & FG
        print("Once DNS is pointing to this server, run certbot again to configure automatic renewal.")
コード例 #20
0
ファイル: fileutils.py プロジェクト: OmerShapira/Tacklebox
 def load(self):
     """Cache the file onto self.__db, and mark the time it was loaded"""
     if not os.path.exists(self.path):
         #TODO (OS): Figure out what to do here. 
         return
     with open(self.path, 'r') as f:
         self.__db = toml.load(f)
         self.__last_load = time()
コード例 #21
0
ファイル: homebrew.py プロジェクト: Xion/gisht
def read_package_info(cargo_toml=None):
    """Read the content of [package] section from Cargo.toml.
    :return: [package] section as dictionary
    """
    cargo_toml = Path(cargo_toml or Path.cwd() / 'Cargo.toml')
    with cargo_toml.open() as f:
        manifest = toml.load(f)
    return manifest['package']
コード例 #22
0
ファイル: config.py プロジェクト: cometsong/rcfiles
 def read_config_file(toml_file):
     """read config settings from TOML file"""
     try:
         from toml import load
     except ImportError:
         return {"config_file_error":
                 "'toml' module must be installed to use config file"}
     else:
         return load(toml_file)
コード例 #23
0
ファイル: configure.py プロジェクト: LarryKX/Mcx
 def __init__(self):
     for item in CONF_LOCATIONS:
         conf_path = os.path.expanduser(os.path.join(item, CONF_FILE))
         if os.path.exists(conf_path):
             with open(conf_path) as f:
                 for k,v in toml.load(f).iteritems():
                     self.__setattr__(k, v)
     self.hosts = {}
     self.get_hosts()
コード例 #24
0
ファイル: schema_import.py プロジェクト: japsu/wurst
    def import_from_toml(self, fp):
        """
        Import from a file-like object where TOML markup can be read from.

        :param fp: A filelike object.
        :return: Naught.
        """
        data = toml.load(fp)
        self.import_from_data(data)
コード例 #25
0
ファイル: core.py プロジェクト: HarryZhu/elk
    def _loadConfig(self,cfgFile):
        if os.path.exists(cfgFile) is None:
            return None

        self.conf = toml.load(cfgFile)
        if self.conf is None:
            logging.error('cannot load the config file: ' + str(cfgFile))
        else:
            logging.info('loaded the config file: ' + str(cfgFile))
コード例 #26
0
ファイル: submit.py プロジェクト: valenting/presto
def parse_config():
    with open('config.toml', 'rb') as fin:
        obj = pytoml.load(fin)
    if not obj['key']:
        raise Exception('Key must be defined in config file')
    if not 'start' in obj:
        obj['start'] = 0
    if not 'end' in obj:
        obj['end'] = 10000
    return obj
コード例 #27
0
 def _load_user_config(self):
     config_file_path = os.sep.join([
         self.metadata.distribution_root,
         'pyproject.toml',
     ])
     try:
         config = toml.load(config_file_path)
         return config[u'tool'][u'dependencychecker']
     except (IOError, KeyError, ):
         return {}
コード例 #28
0
ファイル: rust.py プロジェクト: pre-commit/pre-commit
def _add_dependencies(cargo_toml_path, additional_dependencies):
    with open(cargo_toml_path, 'r+') as f:
        cargo_toml = toml.load(f)
        cargo_toml.setdefault('dependencies', {})
        for dep in additional_dependencies:
            name, _, spec = dep.partition(':')
            cargo_toml['dependencies'][name] = spec or '*'
        f.seek(0)
        toml.dump(cargo_toml, f)
        f.truncate()
コード例 #29
0
ファイル: mod_subtitle.py プロジェクト: jrabbit/pyborg-1up
    def start(self) -> None:
        self.settings = toml.load(self.conf_file)
        self.multiplexing = True
        self.multi_server = self.settings['pyborg']['multi_server']

        self.project = aeidon.Project()
        logger.debug("About to load subs.")
        self.project.open_main(self.subs_file)
        logger.debug("Loaded subs!")
        self.subtitles = self.project.subtitles
        self.run()
コード例 #30
0
ファイル: config.py プロジェクト: michaeljoseph/changes
def project_config():
    """Deprecated"""
    project_name = curdir

    config_path = Path(join(project_name, PROJECT_CONFIG_FILE))

    if not exists(config_path):
        store_settings(DEFAULTS.copy())
        return DEFAULTS

    return toml.load(io.open(config_path)) or {}
コード例 #31
0
import toml

var_path = "config.toml"
try:
    config_vars = toml.load(var_path)
except FileNotFoundError:
    print("Le fichier {0} est manquant, veuillez le creer".format(var_path))
    raise


def password():
    """
    Retourne le mot de passe du fichier de configuration
    voir le fichier indiqué par `var_path` pour le modifier
    """
    return config_vars['login_infos']['password']


def screen_infos(dimension):
    """
    Retourne une information concernant l'écran du fichier de configuration
    voir le fichier indiqué par `var_path` pour le modifier
    """
    return config_vars['screen_infos'][dimension]


def tempo_infos(dimension):
    """
    Retourne une information concernant les tempos du fichier de configuration
    voir le fichier indiqué par `var_path` pour le modifier
    """
コード例 #32
0
def intent_callback_fuel(hermes, intent_message):
    for (slot_value, slot) in intent_message.slots.items():
        if slot[0].slot_value.value.value == "Diesel":
            hermes.publish_end_session(
                intent_message.session_id,
                tankerkoenig.diesel_price(intent_message))
        elif slot[0].slot_value.value.value == "Benzin":
            hermes.publish_end_session(
                intent_message.session_id,
                tankerkoenig.benzin_price(intent_message))


if __name__ == "__main__":
    config = read_configuration_file()
    tankerkoenig = Tankerkoenig(config)

    snips_config = toml.load('/etc/snips.toml')
    if 'mqtt' in snips_config['snips-common'].keys():
        MQTT_BROKER_ADDRESS = snips_config['snips-common']['mqtt']
    if 'mqtt_username' in snips_config['snips-common'].keys():
        MQTT_USERNAME = snips_config['snips-common']['mqtt_username']
    if 'mqtt_password' in snips_config['snips-common'].keys():
        MQTT_PASSWORD = snips_config['snips-common']['mqtt_password']
    mqtt_opts = MqttOptions(username=MQTT_USERNAME,
                            password=MQTT_PASSWORD,
                            broker_address=MQTT_BROKER_ADDRESS)

    with Hermes(mqtt_options=mqtt_opts) as h:
        h.subscribe_intent(add_postfix("fuelInfo"), intent_callback_fuel)
        h.start()
コード例 #33
0
ファイル: conf.py プロジェクト: yukun89/million
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import toml

g_conf = toml.load('conf/trade.conf')
コード例 #34
0
ファイル: test_api.py プロジェクト: thombashi/toml
def test_warnings():
    # Expect 1 warning for the non existent toml file
    with pytest.warns(UserWarning):
        toml.load(["test.toml", "nonexist.toml"])
コード例 #35
0
    def load(self):
        # Download package
        try:
            uri, groupuri = self.repository._decompose_uri()
            g = gitlab.Gitlab(uri, private_token=self.repository._token)

            g.auth()
        except Exception as e:
            print_stack()
            log.debug(f"{type(e)} {e}")
            raise GitlabRepositoryException(f"{e}")
        
        try:
            group = g.groups.get(groupuri)
            project = None
            for p in group.projects.list(all=True):
                if p.name==self.name:
                    project = g.projects.get(p.id)
                    break
            if project is None:
                raise GitlabRepositoryException(f"{self.name}: project not found inside repository {self.repository.name}")
        except gitlab.exceptions.GitlabGetError as e:
            print_stack()
            log.debug(f"{type(e)} {e}")
            raise GitlabRepositoryException(f"{self.name}: project not found inside repository {self.repository.name}")
        except Exception as e:
            print_stack()
            log.debug(f"{type(e)} {e}")
            raise GitlabRepositoryException(f"{e}")

        try:
            # locate release
            release = None
            for r in project.releases.list():
                if r.tag_name==f"v{self.version}":
                    release = r
                    break 
        except Exception as e:
            print_stack()
            log.debug(f"{type(e)} {e}")
            raise GitlabRepositoryException(f"{e}")
        
        if release is None:
            raise GitlabRepositoryException(f"{self.name}-{self.version}: no release found from github repository")
     
        try:
            # search zip file
            asset = None
            for link in release.assets['links']:
                if link['name']==f"{self.name}-{self.version}.zip":
                    asset = link
        except Exception as e:
            print_stack()
            log.debug(f"{type(e)} {e}")
            raise GitlabRepositoryException(f"{e}")

        if asset is None:
            raise GitlabRepositoryException(f"{self.name}-{self.version}.zip: not found from github repository")

        try:
            # download unzip package
            tmpd = tempfile.mkdtemp()
            tmpf = tempfile.mktemp()

            # TODO: need this bug to be corrected to make this working
            # https://gitlab.com/gitlab-org/gitlab/-/issues/28978
            headers = {'Private-Token': self.repository._token}
            r = requests.get(asset['url'], headers=headers, allow_redirects=True)
            with open(tmpf, mode="wb") as f:
                f.write(r.content)
            
            file = zipfile.ZipFile(tmpf)
            file.extractall(path=tmpd, members=file.namelist())
            
            self._source_path = tmpd
        except Exception as e:
            os.remove(tmpf)
            print_stack()
            log.debug(f"{type(e)} {e}")
            raise GitlabRepositoryException(f"{e}")

        # load army.toml
        try:
            file = os.path.join(tmpd, "army.toml")
            log.info(f"Load package '{file}'")
            self._data = toml.load(file)
            log.debug(f"content: {self._data}")
        except toml.decoder.TomlDecodeError as e:
            os.remove(tmpf)
            print_stack()
            log.debug(f"{type(e)} {e}")
            raise GitlabRepositoryException(f"{e}")
        
        os.remove(tmpf)
コード例 #36
0
ファイル: gen_Gtau_peaks.py プロジェクト: SpM-lab/SpM
    parser.add_argument(
        "-n",
        dest="nsamples",
        default=1,
        type=int,
        help="the number of samples to be generated (default=%(default)s)",
    )

    parser.add_argument("-q", action="store_true", help="do quietly")

    args = parser.parse_args()

    VERBOSE = not args.q

    param = toml.load(args.input)

    if VERBOSE:
        print("parameter file is successfully loaded")

    outdir = param["parameter"].get("outputdir", "work")
    if not os.path.exists(outdir):
        os.makedirs(outdir)
        if VERBOSE:
            print(f"make output directory ({outdir})")

    if not os.path.isdir(outdir):
        raise RuntimeError(f"{outdir} is not a directory.")

    nsamples = args.nsamples
コード例 #37
0
sys.dont_write_bytecode = True  # ??? - Blaise 2020-08-13

import glob
import sys
import time
import logging
from argparse import ArgumentParser

from slackclient import SlackClient

import appdirs
import toml
import pathlib

config = toml.load(
    pathlib.path(appdirs.user_config_dir("yaqc-cmds", "yaqc-cmds")) /
    "config.toml")
default_channel = config["slack"]["channel"]
witch_token = config["slack"]["token"]


class RtmBot(object):
    def __init__(self, token):
        self.last_ping = 0
        self.token = token
        self.bot_plugins = []
        self.slack_client = None

    def connect(self):
        """Convenience method that creates Server instance"""
        self.slack_client = SlackClient(self.token)
コード例 #38
0
ファイル: app_api.py プロジェクト: rip1ey/kubos
 def __init__(self, service_config_filepath=SERVICE_CONFIG_PATH):
     self.config = toml.load(service_config_filepath)
コード例 #39
0
def main():
    """ main function """

    global START_TIME, MINER_URL, MINER_TOKEN, DAEMON_URL, DAEMON_TOKEN

    # Start execution time mesurement
    START_TIME = time.time()

    # SET API IP PORT AND AUTH
    if MINER_URL == '':
        miner_config = toml.load(str(Path.home()) + "/.lotusminer/config.toml")
        miner_api_ip = "127.0.0.1"
        miner_api_port = "2345"
        # try to read configuration file to identify miner url
        if "API" in miner_config.keys():
            if "ListenAddress" in miner_config["API"].keys():
                miner_api = miner_config["API"]["ListenAddress"].split("/")
                miner_api_ip = miner_api[2].replace("0.0.0.0", "127.0.0.1")
                miner_api_port = miner_api[4]
        MINER_URL = "http://" + miner_api_ip + ":" + miner_api_port + "/rpc/v0"
    if DAEMON_URL == '':
        daemon_config = toml.load(str(Path.home()) + "/.lotus/config.toml")
        daemon_api_ip = "127.0.0.1"
        daemon_api_port = "1234"
        # try to read configuration file to identify daemon url
        if "API" in daemon_config.keys():
            if "ListenAddress" in daemon_config["API"].keys():
                daemon_api = daemon_config["API"]["ListenAddress"].split("/")
                daemon_api_ip = daemon_api[2].replace("0.0.0.0", "127.0.0.1")
                daemon_api_port = daemon_api[4]
        DAEMON_URL = "http://" + daemon_api_ip + ":" + daemon_api_port + "/rpc/v0"

    if MINER_TOKEN == '':
        with open(str(Path.home()) + "/.lotusminer/token", "r") as text_file:
            MINER_TOKEN = text_file.read()
    if DAEMON_TOKEN == '':
        with open(str(Path.home()) + "/.lotus/token", "r") as text_file:
            DAEMON_TOKEN = text_file.read()
    #################################################################################
    # MAIN
    #################################################################################

    # SCRAPE METRIC DEFINITION
    print(
        "# HELP lotus_scrape_execution_succeed return 1 if lotus-farcaster execution was successfully"
    )
    print("# TYPE lotus_scrape_execution_succeed gauge")

    # LOCAL TIME METRIC
    print(
        "# HELP lotus_local_time time on the node machine when last execution start in epoch"
    )
    print("# TYPE lotus_local_time gauge")
    print(f'lotus_local_time {{ }} { int(time.time()) }')

    # RETRIEVE MINER ID
    actoraddress = miner_get_json("ActorAddress", [])
    miner_id = actoraddress['result']

    # RETRIEVE TIPSET + CHAINHEAD
    chainhead = daemon_get_json("ChainHead", [])
    tipsetkey = chainhead["result"]["Cids"]
    # XXX small hack trying to speedup the script
    empty_tipsetkey = []
    print("# HELP lotus_chain_height return current height")
    print("# TYPE lotus_chain_height counter")
    print(
        f'lotus_chain_height {{ miner_id="{miner_id}" }} {chainhead["result"]["Height"]}'
    )
    checkpoint("ChainHead")

    # GENERATE CHAIN SYNC STATUS
    print(
        "# HELP lotus_chain_sync_diff return daemon sync height diff with chainhead for each daemon worker"
    )
    print("# TYPE lotus_chain_sync_diff  gauge")
    print(
        "# HELP lotus_chain_sync_status return daemon sync status with chainhead for each daemon worker"
    )
    print("# TYPE lotus_chain_sync_status  gauge")
    sync_status = daemon_get_json("SyncState", [])
    for worker in sync_status["result"]["ActiveSyncs"]:
        try:
            diff_height = worker["Target"]["Height"] - worker["Base"]["Height"]
        except Exception:
            diff_height = -1
        print(
            f'lotus_chain_sync_diff {{ miner_id="{ miner_id }", worker_id="{ sync_status["result"]["ActiveSyncs"].index(worker) }" }} { diff_height }'
        )
        print(
            f'lotus_chain_sync_status {{ miner_id="{ miner_id }", worker_id="{ sync_status["result"]["ActiveSyncs"].index(worker) }" }} { worker["Stage"]  }'
        )
    checkpoint("ChainSync")

    # GENERATE MINER INFO
    miner_version = miner_get_json("Version", [])
    checkpoint("Miner")

    # RETRIEVE MAIN ADDRESSES
    daemon_stats = daemon_get_json("StateMinerInfo",
                                   [miner_id, empty_tipsetkey])
    miner_owner = daemon_stats["result"]["Owner"]
    miner_owner_addr = daemon_get_json(
        "StateAccountKey", [miner_owner, empty_tipsetkey])["result"]
    miner_worker = daemon_stats["result"]["Worker"]
    miner_worker_addr = daemon_get_json(
        "StateAccountKey", [miner_worker, empty_tipsetkey])["result"]
    try:
        miner_control0 = daemon_stats["result"]["ControlAddresses"][0]
    except:
        miner_control0 = miner_worker
    miner_control0_addr = daemon_get_json(
        "StateAccountKey", [miner_control0, empty_tipsetkey])["result"]

    print(
        "# HELP lotus_miner_info lotus miner information like adress version etc"
    )
    print("# TYPE lotus_miner_info gauge")
    print("# HELP lotus_miner_info_sector_size lotus miner sector size")
    print("# TYPE lotus_miner_info_sector_size gauge")
    print(
        f'lotus_miner_info {{ miner_id = "{miner_id}", version="{ miner_version["result"]["Version"] }", owner="{ miner_owner }", owner_addr="{ miner_owner_addr }", worker="{ miner_worker }", worker_addr="{ miner_worker_addr }", control0="{ miner_control0 }", control0_addr="{ miner_control0_addr }" }} 1'
    )
    print(
        f'lotus_miner_info_sector_size {{ miner_id = "{miner_id}" }} { daemon_stats["result"]["SectorSize"] }'
    )
    checkpoint("StateMinerInfo")

    # GENERATE DAEMON INFO
    daemon_network = daemon_get_json("StateNetworkName", [])
    daemon_network_version = daemon_get_json("StateNetworkVersion",
                                             [empty_tipsetkey])
    daemon_version = daemon_get_json("Version", [])
    print(
        "# HELP lotus_info lotus daemon information like adress version, value is set to network version number"
    )
    print("# TYPE lotus_info gauge")
    print(
        f'lotus_info {{ miner_id="{miner_id}", version="{ daemon_version["result"]["Version"] }", network="{ daemon_network["result"] }"}} { daemon_network_version["result"]}'
    )
    checkpoint("Daemon")

    # GENERATE WALLET + LOCKED FUNDS BALANCES
    walletlist = daemon_get_json("WalletList", [])
    print("# HELP lotus_wallet_balance return wallet balance")
    print("# TYPE lotus_wallet_balance gauge")
    for addr in walletlist["result"]:
        balance = daemon_get_json("WalletBalance", [addr])
        short = addr[0:5] + "..." + addr[-5:]
        print(
            f'lotus_wallet_balance {{ miner_id="{miner_id}", address="{ addr }", short="{ short }" }} { int(balance["result"])/1000000000000000000 }'
        )

    # Add miner balance :
    miner_balance_available = daemon_get_json("StateMinerAvailableBalance",
                                              [miner_id, empty_tipsetkey])
    print(
        f'lotus_wallet_balance {{ miner_id="{miner_id}", address="{ miner_id }", short="{ miner_id }" }} { int(miner_balance_available["result"])/1000000000000000000 }'
    )

    # Retrieve locked funds balance
    locked_funds = daemon_get_json("StateReadState",
                                   [miner_id, empty_tipsetkey])
    print(
        "# HELP lotus_wallet_locked_balance return miner wallet locked funds")
    print("# TYPE lotus_wallet_locked_balance gauge")
    for i in ["PreCommitDeposits", "LockedFunds", "FeeDebt", "InitialPledge"]:
        print(
            f'lotus_wallet_locked_balance {{ miner_id="{miner_id}", address="{ miner_id }", locked_type ="{ i }" }} { int(locked_funds["result"]["State"][i])/1000000000000000000 }'
        )
    checkpoint("Balances")

    # GENERATE POWER
    powerlist = daemon_get_json("StateMinerPower", [miner_id, empty_tipsetkey])
    print("# HELP lotus_power return miner power")
    print("# TYPE lotus_power gauge")
    for minerpower in powerlist["result"]["MinerPower"]:
        print(
            f'lotus_power {{ miner_id="{miner_id}", scope="miner", power_type="{ minerpower }" }} { powerlist["result"]["MinerPower"][minerpower] }'
        )
    for totalpower in powerlist["result"]["TotalPower"]:
        print(
            f'lotus_power {{ miner_id="{miner_id}", scope="network", power_type="{ totalpower }" }} { powerlist["result"]["TotalPower"][totalpower] }'
        )

    # Mining eligibility
    print(
        "# HELP lotus_power_mining_eligibility return miner mining eligibility"
    )
    print("# TYPE lotus_power_mining_eligibility gauge")
    base_info = daemon_get_json(
        "MinerGetBaseInfo",
        [miner_id, chainhead["result"]["Height"], tipsetkey])

    if base_info["result"] is None:
        print(f'ERROR MinerGetBaseInfo return no result', file=sys.stderr)
        print(
            f'KNOWN_REASON your miner reports wrong info to the chain and thats pretty bad (not just for the dashboard)',
            file=sys.stderr)
        print(f'SOLUTION restart your miner and node', file=sys.stderr)
        print('lotus_scrape_execution_succeed { } 0')
        sys.exit(0)

    if base_info["result"]["EligibleForMining"]:
        eligibility = 1
    else:
        eligibility = 0
    print(
        f'lotus_power_mining_eligibility {{ miner_id="{miner_id}" }} { eligibility }'
    )
    checkpoint("Power")

    # GENERATE MPOOL
    mpoolpending = daemon_get_json("MpoolPending", [empty_tipsetkey])
    print("# HELP lotus_mpool_total return number of message pending in mpool")
    print("# TYPE lotus_mpool_total gauge")
    print(
        "# HELP lotus_mpool_local_total return total number in mpool comming from local adresses"
    )
    print("# TYPE lotus_power_local_total gauge")
    print("# HELP lotus_mpool_local_message local message details")
    print("# TYPE lotus_mpool_local_message gauge")
    mpool_total = 0
    mpool_local_total = 0
    for message in mpoolpending["result"]:
        mpool_total += 1
        frm = message["Message"]["From"]
        if frm in walletlist["result"]:
            mpool_local_total += 1
            if frm == miner_owner_addr:
                display_addr = "owner"
            elif frm == miner_worker_addr:
                display_addr = "worker"
            elif frm == miner_control0_addr:
                display_addr = "control0"
            elif frm != miner_id:
                display_addr = frm[0:5] + "..." + frm[-5:]
            print(
                f'lotus_mpool_local_message {{ miner_id="{miner_id}", from="{ display_addr }", to="{ message["Message"]["To"] }", nonce="{ message["Message"]["Nonce"] }", value="{ message["Message"]["Value"] }", gaslimit="{ message["Message"]["GasLimit"] }", gasfeecap="{ message["Message"]["GasFeeCap"] }", gaspremium="{ message["Message"]["GasPremium"] }", method="{ message["Message"]["Method"] }" }} 1'
            )

    print(f'lotus_mpool_total {{ miner_id="{miner_id}" }} { mpool_total }')
    print(
        f'lotus_mpool_local_total {{ miner_id="{miner_id}" }} { mpool_local_total }'
    )
    checkpoint("MPool")

    # GENERATE NET_PEERS
    daemon_netpeers = daemon_get_json("NetPeers", [])
    print("# HELP lotus_netpeers_total return number netpeers")
    print("# TYPE lotus_netpeers_total gauge")
    print(
        f'lotus_netpeers_total {{ miner_id="{miner_id}" }} { len(daemon_netpeers["result"]) }'
    )

    miner_netpeers = miner_get_json("NetPeers", [])
    print("# HELP lotus_miner_netpeers_total return number netpeers")
    print("# TYPE lotus_miner_netpeers_total gauge")
    print(
        f'lotus_miner_netpeers_total {{ miner_id="{miner_id}" }} { len(miner_netpeers["result"]) }'
    )
    checkpoint("NetPeers")

    # GENERATE NETSTATS XXX Verfier la qualité des stats ... lotus net, API et Grafana sont tous differents
    print("# HELP lotus_net_protocol_in return input net per protocol")
    print("# TYPE lotus_net_protocol_in counter")
    print("# HELP lotus_net_protocol_out return output per protocol net")
    print("# TYPE lotus_net_protocol_out counter")
    protocols_list = daemon_get_json("NetBandwidthStatsByProtocol", [])
    for protocol in protocols_list["result"]:
        print(
            f'lotus_net_protocol_in {{ miner_id="{miner_id}", protocol="{ protocol }" }} { protocols_list["result"][protocol]["TotalIn"] }'
        )
        print(
            f'lotus_net_protocol_out {{ miner_id="{miner_id}", protocol="{ protocol }" }} { protocols_list["result"][protocol]["TotalOut"] }'
        )

    print("# HELP lotus_miner_net_protocol_in return input net per protocol")
    print("# TYPE lotus_miner_net_protocol_in counter")
    print("# HELP lotus_miner_net_protocol_out return output per protocol net")
    print("# TYPE lotus_miner_net_protocol_out counter")
    protocols_list = miner_get_json("NetBandwidthStatsByProtocol", [])
    for protocol in protocols_list["result"]:
        print(
            f'lotus_miner_net_protocol_in {{ miner_id="{miner_id}", protocol="{ protocol }" }} { protocols_list["result"][protocol]["TotalIn"] }'
        )
        print(
            f'lotus_miner_net_protocol_out {{ miner_id="{miner_id}", protocol="{ protocol }" }} { protocols_list["result"][protocol]["TotalOut"] }'
        )

    print("# HELP lotus_net_total_in return input net")
    print("# TYPE lotus_net_total_in counter")
    print("# HELP lotus_net_total_out return output net")
    print("# TYPE lotus_net_total_out counter")
    net_list = daemon_get_json("NetBandwidthStats", [])
    print(
        f'lotus_net_total_in {{ miner_id="{miner_id}" }} { net_list["result"]["TotalIn"] }'
    )
    print(
        f'lotus_net_total_out {{ miner_id="{miner_id}" }} { net_list["result"]["TotalOut"] }'
    )

    print("# HELP lotus_miner_net_total_in return input net")
    print("# TYPE lotus_miner_net_total_in counter")
    print("# HELP lotus_miner_net_total_out return output net")
    print("# TYPE lotus_miner_net_total_out counter")
    net_list = miner_get_json("NetBandwidthStats", [])
    print(
        f'lotus_miner_net_total_in {{ miner_id="{miner_id}" }} { net_list["result"]["TotalIn"] }'
    )
    print(
        f'lotus_miner_net_total_out {{ miner_id="{miner_id}" }} { net_list["result"]["TotalOut"] }'
    )
    checkpoint("NetBandwidth")

    # GENERATE WORKER INFOS
    workerstats = miner_get_json("WorkerStats", [])
    # XXX 1.2.1 introduce a new worker_id format. Later we should delete it, its a useless info.
    #print("# HELP lotus_miner_worker_id All lotus worker information prfer to use workername than workerid which is changing at each restart")
    #print("# TYPE lotus_miner_worker_id gauge")
    print(
        "# HELP lotus_miner_worker_mem_physical_used worker minimal memory used"
    )
    print("# TYPE lotus_miner_worker_mem_physical_used gauge")
    print("# HELP lotus_miner_worker_mem_vmem_used worker maximum memory used")
    print("# TYPE lotus_miner_worker_mem_vmem_used gauge")
    print(
        "# HELP lotus_miner_worker_mem_reserved worker memory reserved by lotus"
    )
    print("# TYPE lotus_miner_worker_mem_reserved gauge")
    print("# HELP lotus_miner_worker_gpu_used is the GPU used by lotus")
    print("# TYPE lotus_miner_worker_gpu_used gauge")
    print("# HELP lotus_miner_worker_cpu_used number of CPU used by lotus")
    print("# TYPE lotus_miner_worker_cpu_used gauge")
    print("# HELP lotus_miner_worker_cpu number of CPU")
    print("# TYPE lotus_miner_worker_cpu gauge")
    print("# HELP lotus_miner_worker_gpu number of GPU")
    print("# TYPE lotus_miner_worker_gpu gauge")
    print("# HELP lotus_miner_worker_mem_physical server RAM")
    print("# TYPE lotus_miner_worker_mem_physical gauge")
    print("# HELP lotus_miner_worker_mem_swap server SWAP")
    print("# TYPE lotus_miner_worker_mem_swap gauge")
    for val in workerstats["result"].items():
        val = val[1]
        info = val["Info"]
        worker_host = info["Hostname"]
        mem_physical = info["Resources"]["MemPhysical"]
        mem_swap = info["Resources"]["MemSwap"]
        mem_reserved = info["Resources"]["MemReserved"]
        cpus = info["Resources"]["CPUs"]
        gpus = len(info["Resources"]["GPUs"])
        mem_used_min = val["MemUsedMin"]
        mem_used_max = val["MemUsedMax"]
        if val["GpuUsed"]:
            gpu_used = 1
        else:
            gpu_used = 0
        cpu_used = val["CpuUse"]

        print(
            f'lotus_miner_worker_cpu {{ miner_id="{miner_id}", worker_host="{worker_host}" }} { cpus }'
        )
        print(
            f'lotus_miner_worker_gpu {{ miner_id="{miner_id}", worker_host="{worker_host}" }} { gpus }'
        )
        print(
            f'lotus_miner_worker_mem_physical {{ miner_id="{miner_id}", worker_host="{worker_host}" }} { mem_physical }'
        )
        print(
            f'lotus_miner_worker_mem_swap {{ miner_id="{miner_id}", worker_host="{worker_host}" }} { mem_swap }'
        )
        print(
            f'lotus_miner_worker_mem_physical_used {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {mem_used_min}'
        )
        print(
            f'lotus_miner_worker_mem_vmem_used {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {mem_used_max}'
        )
        print(
            f'lotus_miner_worker_mem_reserved {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {mem_reserved}'
        )
        print(
            f'lotus_miner_worker_gpu_used {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {gpu_used}'
        )
        print(
            f'lotus_miner_worker_cpu_used {{ miner_id="{miner_id}", worker_host="{worker_host}" }} {cpu_used}'
        )
    checkpoint("Workers")

    # GENERATE JOB INFOS
    workerjobs = miner_get_json("WorkerJobs", [])
    print(
        "# HELP lotus_miner_worker_job status of each individual job running on the workers. Value is the duration"
    )
    print("# TYPE lotus_miner_worker_job gauge")
    for (wrk, job_list) in workerjobs["result"].items():
        for job in job_list:
            job_id = job['ID']['ID']
            sector = str(job['Sector']['Number'])

            try:
                worker_host = workerstats["result"][wrk]["Info"]["Hostname"]
            except:
                # sometime WorkerJobs return invalid worker_id like 0000-000000-0000... in that case return unknown
                worker_host = "unknown"
            task = str(job['Task'])
            job_start_time = str(job['Start'])
            job_start_time = job_start_time[:-1]
            run_wait = str(job['RunWait'])
            job_start_epoch = time.mktime(
                time.strptime(job_start_time[:19], '%Y-%m-%dT%H:%M:%S'))
            print(
                f'lotus_miner_worker_job {{ miner_id="{miner_id}", job_id="{job_id}", worker_host="{ worker_host }", task="{task}", sector_id="{sector}", job_start_time="{job_start_time}", run_wait="{run_wait}" }} { START_TIME - job_start_epoch }'
            )
    checkpoint("Jobs")

    # GENERATE JOB SCHEDDIAG
    scheddiag = miner_get_json("SealingSchedDiag", [True])

    if scheddiag["result"]["SchedInfo"]["Requests"]:
        for req in scheddiag["result"]["SchedInfo"]["Requests"]:
            sector = req["Sector"]["Number"]
            task = req["TaskType"]
            print(
                f'lotus_miner_worker_job {{ miner_id="{miner_id}", job_id="", worker="", task="{task}", sector_id="{sector}", start="", run_wait="99" }} 0'
            )
    checkpoint("SchedDiag")

    # GENERATE SECTORS
    print("# HELP lotus_miner_sector_state sector state")
    print("# TYPE lotus_miner_sector_state gauge")
    print(
        "# HELP lotus_miner_sector_event contains important event of the sector life"
    )
    print("# TYPE lotus_miner_sector_event gauge")
    print(
        "# HELP lotus_miner_sector_sealing_deals_info contains information related to deals that are not in Proving and Removed state."
    )
    print("# TYPE lotus_miner_sector_sealing_deals_info gauge")

    sector_list = miner_get_json("SectorsList", [])
    #sectors_counters = {}
    # remove duplicate (bug)
    unique_sector_list = set(sector_list["result"])
    for sector in unique_sector_list:
        detail = miner_get_json("SectorsStatus", [sector, False])
        deals = len(
            detail["result"]["Deals"]) - detail["result"]["Deals"].count(0)
        creation_date = detail["result"]["Log"][0]["Timestamp"]
        packed_date = ""
        finalized_date = ""
        verified_weight = detail["result"]["VerifiedDealWeight"]
        for log in range(len(detail["result"]["Log"])):
            if detail["result"]["Log"][log][
                    "Kind"] == "event;sealing.SectorPacked":
                packed_date = detail["result"]["Log"][log]["Timestamp"]
            if detail["result"]["Log"][log][
                    "Kind"] == "event;sealing.SectorFinalized":
                finalized_date = detail["result"]["Log"][log]["Timestamp"]
        if detail["result"]["Log"][0]["Kind"] == "event;sealing.SectorStartCC":
            pledged = 1
        else:
            pledged = 0
        print(
            f'lotus_miner_sector_state {{ miner_id="{miner_id}", sector_id="{ sector }", state="{ detail["result"]["State"] }", pledged="{ pledged }", deals="{ deals }", verified_weight="{ verified_weight }" }} 1'
        )

        if packed_date != "":
            print(
                f'lotus_miner_sector_event {{ miner_id="{miner_id}", sector_id="{ sector }", event_type="packed" }} { packed_date }'
            )
        if creation_date != "":
            print(
                f'lotus_miner_sector_event {{ miner_id="{miner_id}", sector_id="{ sector }", event_type="creation" }} { creation_date }'
            )
        if finalized_date != "":
            print(
                f'lotus_miner_sector_event {{ miner_id="{miner_id}", sector_id="{ sector }", event_type="finalized" }} { finalized_date }'
            )

        if detail["result"]["State"] not in ["Proving", "Removed"]:
            for deal in detail["result"]["Deals"]:
                if deal != 0:
                    try:
                        deal_info = daemon_get_json("StateMarketStorageDeal",
                                                    [deal, empty_tipsetkey])
                    except:
                        deal_is_verified = "unknown"
                        deal_size = "unknown"
                        deal_slash_epoch = "unknown"
                        deal_price_per_epoch = "unknown"
                        deal_provider_collateral = "unknown"
                        deal_client_collateral = "unknown"
                        deal_start_epoch = "unknown"
                        deal_end_epoch = "unknown"
                    else:
                        deal_is_verified = deal_info["result"]["Proposal"][
                            "VerifiedDeal"]
                        deal_size = deal_info["result"]["Proposal"][
                            "PieceSize"]
                        deal_slash_epoch = deal_info["result"]["State"][
                            "SlashEpoch"]
                        deal_price_per_epoch = deal_info["result"]["Proposal"][
                            "StoragePricePerEpoch"]
                        deal_provider_collateral = deal_info["result"][
                            "Proposal"]["ProviderCollateral"]
                        deal_client_collateral = deal_info["result"][
                            "Proposal"]["ClientCollateral"]
                        deal_start_epoch = deal_info["result"]["Proposal"][
                            "StartEpoch"]
                        deal_end_epoch = deal_info["result"]["Proposal"][
                            "EndEpoch"]
                    print(
                        f'lotus_miner_sector_sealing_deals_size {{ miner_id="{miner_id}", sector_id="{ sector }", deal_id="{ deal }", deal_is_verified="{ deal_is_verified }", deal_slash_epoch="{ deal_slash_epoch }", deal_price_per_epoch="{ deal_price_per_epoch }",deal_provider_collateral="{ deal_provider_collateral }", deal_client_collateral="{ deal_client_collateral }", deal_size="{ deal_size }", deal_start_epoch="{ deal_start_epoch }", deal_end_epoch="{ deal_end_epoch }" }} 1'
                    )

    # GENERATE DEADLINES
    proven_partitions = daemon_get_json("StateMinerDeadlines",
                                        [miner_id, empty_tipsetkey])
    deadlines = daemon_get_json("StateMinerProvingDeadline",
                                [miner_id, empty_tipsetkey])
    dl_epoch = deadlines["result"]["CurrentEpoch"]
    dl_index = deadlines["result"]["Index"]
    dl_open = deadlines["result"]["Open"]
    dl_numbers = deadlines["result"]["WPoStPeriodDeadlines"]
    dl_window = deadlines["result"]["WPoStChallengeWindow"]
    print("# HELP lotus_miner_deadline_info deadlines and WPoSt informations")
    print("# TYPE lotus_miner_deadline_info gauge")
    print(
        f'lotus_miner_deadline_info {{ miner_id="{miner_id}", current_idx="{ dl_index }", current_epoch="{ dl_epoch }",current_open_epoch="{ dl_open }", wpost_period_deadlines="{ dl_numbers }", wpost_challenge_window="{ dl_window }" }} 1'
    )
    print(
        "# HELP lotus_miner_deadline_active_start remaining time before deadline start"
    )
    print("# TYPE lotus_miner_deadline_active_start gauge")
    print(
        "# HELP lotus_miner_deadline_active_sectors_all number of sectors in the deadline"
    )
    print("# TYPE lotus_miner_deadline_active_sectors_all gauge")
    print(
        "# HELP lotus_miner_deadline_active_sectors_recovering number of sectors in recovering state"
    )
    print("# TYPE lotus_miner_deadline_active_sectors_recovering gauge")
    print(
        "# HELP lotus_miner_deadline_active_sectors_faulty number of faulty sectors"
    )
    print("# TYPE lotus_miner_deadline_active_sectors_faulty gauge")
    print(
        "# HELP lotus_miner_deadline_active_sectors_live number of live sectors"
    )
    print("# TYPE lotus_miner_deadline_active_sectors_live gauge")
    print(
        "# HELP lotus_miner_deadline_active_sectors_active number of active sectors"
    )
    print("# TYPE lotus_miner_deadline_active_sectors_active gauge")
    print(
        "# HELP lotus_miner_deadline_active_partitions number of partitions in the deadline"
    )
    print("# TYPE lotus_miner_deadline_active_partitions gauge")
    print(
        "# HELP lotus_miner_deadline_active_partitions_proven number of partitions already proven for the deadline"
    )
    print("# TYPE lotus_miner_deadline_active_partitions_proven gauge")
    for c_dl in range(dl_numbers):
        idx = (dl_index + c_dl) % dl_numbers
        opened = dl_open + dl_window * c_dl
        partitions = daemon_get_json("StateMinerPartitions",
                                     [miner_id, idx, empty_tipsetkey])
        if partitions["result"]:
            faulty = 0
            recovering = 0
            alls = 0
            active = 0
            live = 0
            count = len(partitions["result"])
            proven = bitfield_count(
                proven_partitions["result"][idx]["PostSubmissions"])
            for partition in partitions["result"]:
                faulty += bitfield_count(partition["FaultySectors"])
                recovering += bitfield_count(partition["RecoveringSectors"])
                active += bitfield_count(partition["ActiveSectors"])
                live += bitfield_count(partition["LiveSectors"])
                alls = bitfield_count(partition["AllSectors"])
            print(
                f'lotus_miner_deadline_active_start {{ miner_id="{miner_id}", index="{ idx }" }} { (opened - dl_epoch) * 30  }'
            )
            print(
                f'lotus_miner_deadline_active_partitions_proven {{ miner_id="{miner_id}", index="{ idx }" }} { proven }'
            )
            print(
                f'lotus_miner_deadline_active_partitions {{ miner_id="{miner_id}", index="{ idx }" }} { count }'
            )
            print(
                f'lotus_miner_deadline_active_sectors_all {{ miner_id="{miner_id}", index="{ idx }" }} { alls  }'
            )
            print(
                f'lotus_miner_deadline_active_sectors_recovering {{ miner_id="{miner_id}", index="{ idx }" }} { recovering }'
            )
            print(
                f'lotus_miner_deadline_active_sectors_faulty {{ miner_id="{miner_id}", index="{ idx }" }} { faulty }'
            )
            print(
                f'lotus_miner_deadline_active_sectors_active {{ miner_id="{miner_id}", index="{ idx }" }} { active }'
            )
            print(
                f'lotus_miner_deadline_active_sectors_live {{ miner_id="{miner_id}", index="{ idx }" }} { live }'
            )
    checkpoint("Deadlines")

    # GENERATE STORAGE INFO
    print("# HELP lotus_miner_storage_info get storage info state")
    print("# TYPE lotus_miner_storage_info gauge")
    print("# HELP lotus_miner_storage_capacity get storage total capacity")
    print("# TYPE lotus_miner_storage_capacity gauge")
    print(
        "# HELP lotus_miner_storage_available get storage available capacity")
    print("# TYPE lotus_miner_storage_available gauge")
    print("# HELP lotus_miner_storage_reserved get storage reserved capacity")
    print("# TYPE lotus_miner_storage_reserved  gauge")
    storage_list = miner_get_json("StorageList", [])

    storage_local_list = miner_get_json("StorageLocal", [])
    for storage in storage_list["result"].keys():
        storage_info = miner_get_json("StorageInfo", [storage])
        if storage in storage_local_list["result"].keys():
            storage_path = storage_local_list["result"][storage]
        else:
            storage_path = ''
        storage_id = storage_info["result"]["ID"]
        storage_url = urlparse(storage_info["result"]["URLs"][0])
        storage_host_ip = storage_url.hostname
        try:
            storate_host_name = socket.gethostbyaddr(storage_host_ip)[0]
        except Exception:
            storate_host_name = storage_host_ip

        storage_host_port = storage_url.port
        storage_weight = storage_info["result"]["Weight"]
        storage_can_seal = storage_info["result"]["CanSeal"]
        storage_can_store = storage_info["result"]["CanStore"]
        try:
            storage_stat = miner_get_json("StorageStat", [storage])
        except:
            storage_capacity = 0
            storage_available = 0
            storage_reserved = 0
        else:
            storage_capacity = storage_stat["result"]["Capacity"]
            storage_available = storage_stat["result"]["Available"]
            storage_reserved = storage_stat["result"]["Reserved"]
        print(
            f'lotus_miner_storage_info {{ miner_id="{miner_id}", storage_id="{ storage_id }", storage_url="{ storage_info["result"]["URLs"][0] }", storage_host_name="{ storate_host_name }", storage_host_ip="{ storage_host_ip }", storage_host_port="{ storage_host_port }", weight="{ storage_weight }", can_seal="{ storage_can_seal }", can_store="{ storage_can_store }", path="{ storage_path }" }} 1'
        )
        print(
            f'lotus_miner_storage_capacity {{ miner_id="{miner_id}", storage_id="{ storage_id }" }} { storage_capacity }'
        )
        print(
            f'lotus_miner_storage_available {{ miner_id="{miner_id}", storage_id="{ storage_id }" }} { storage_available }'
        )
        print(
            f'lotus_miner_storage_reserved {{ miner_id="{miner_id}", storage_id="{ storage_id }" }} { storage_reserved }'
        )
    checkpoint("Storage")

    # GENERATE SCRAPE TIME
    print(
        f'lotus_scrape_duration_seconds {{ collector="All" }} {time.time() - START_TIME}'
    )
    print('lotus_scrape_execution_succeed { } 1')
コード例 #40
0
import toml
import numpy
from utils import retry, accounts, firstProducer, numProducers, intToCurrency
config = toml.load('./config.toml')

def allocateFunds(b, e):
    dist = numpy.random.pareto(1.161, e - b).tolist() # 1.161 = 80/20 rule
    dist.sort()
    dist.reverse()
    factor = 2_000_000 / sum(dist)
    total = 0
    for i in range(b, e):
        funds = round(factor * dist[i - b] * 10000)
        if i >= firstProducer and i < firstProducer + numProducers:
            funds = max(funds, round(config['funds']['min_producer_funds'] * 10000))
        total += funds
        accounts[i]['funds'] = funds
    return total

def createStakedAccounts(b, e):
    ramFunds = round(config['funds']['ram_funds'] * 10000)
    configuredMinStake = round(config['funds']['min_stake'] * 10000)
    maxUnstaked = round(config['funds']['max_unstaked'] * 10000)
    for i in range(b, e):
        a = accounts[i]
        funds = a['funds']
        print('#' * 80)
        print('# %d/%d %s %s' % (i, e, a['name'], intToCurrency(funds)))
        print('#' * 80)
        if funds < ramFunds:
            print('skipping %s: not enough funds to cover ram' % a['name'])
コード例 #41
0
ファイル: ConverterTY.py プロジェクト: iceandrise/ISP-lab
import simplejson as json
import yaml
import toml
try:
    filename="convert"
    f = open(filename+'.toml', 'r')
    tomlData = toml.load(f)
    f.close()
except FileNotFoundError:('File not find')

ff = open("convert2"+'.yaml', 'w+')
yamlData = {'func':'\t', 'arr':'','null':'', 'bool1':'','bool2':'', 'string':'','fred':'', 'emptyArray':'','emptyObject':'', 'emptyString':''}
yamlData['func'] = tomlData['func']
yamlData['arr'] = tomlData['arr']
yamlData['null'] = tomlData['null']
yamlData['bool1'] = tomlData['bool1']
yamlData['bool2'] = tomlData['bool2']
yamlData['string'] = tomlData['string']
yamlData['fred'] = tomlData['fred']
yamlData['emptyArray'] = tomlData['emptyArray']
yamlData['emptyObject'] = tomlData['emptyObject']
yamlData['emptyString'] = tomlData['emptyString']
yamlData['code'] = tomlData['code']
yamlData['base64'] = tomlData['base64']
yaml.dump(yamlData, ff)
コード例 #42
0
ファイル: setup.py プロジェクト: garvankeeley/glean
from pathlib import Path  # noqa
import toml  # noqa

ROOT = Path(__file__).parent.absolute()

os.chdir(str(ROOT))

with (ROOT.parent.parent / "README.md").open() as readme_file:
    readme = readme_file.read()

with (ROOT.parent.parent / "CHANGELOG.md").open() as history_file:
    history = history_file.read()

with (ROOT.parent / "Cargo.toml").open() as cargo:
    parsed_toml = toml.load(cargo)
    version = parsed_toml["package"]["version"]

requirements = [
    "cffi>=1",
    "glean_parser==1.28.3",
    "iso8601>=0.1.10; python_version<='3.6'",
]

setup_requirements = ["cffi>=1.0.0"]

# The environment variable `GLEAN_BUILD_VARIANT` can be set to `debug` or `release`
buildvariant = os.environ.get("GLEAN_BUILD_VARIANT", "debug")

if mingw_arch == "i686":
    shared_object_build_dir = "../../target/i686-pc-windows-gnu"
コード例 #43
0
ファイル: rngfit.py プロジェクト: Sandalmoth/setrack
def plottime(control, future):
    db = toml.load(control.dbfile)

    n_exercises = len(db['exercises'])
    grid_size = math.ceil(n_exercises**0.5)

    fig, axs = plt.subplots(nrows=grid_size, ncols=grid_size)

    years = mdates.YearLocator()
    years_fmt = mdates.DateFormatter('%Y')
    months = mdates.MonthLocator()
    months_fmt = mdates.DateFormatter('%Y-%m')
    days = mdates.WeekdayLocator(byweekday=MO)

    for i, exercise in enumerate(db['exercises']):
        this_axs = axs[int(i / grid_size)][i % grid_size]
        amraps = parse_amraps(db[exercise]['amraps'])
        if future:
            x_axis = [
                amraps['date'][0] + datetime.timedelta(days=x)
                for x in range((amraps['date'][-1] - amraps['date'][0]).days)
            ]
        else:
            x_axis = amraps['date']
        rm_axis = []
        rm_axis_lower = []
        rm_axis_upper = []
        rms = [1, 5, 10]
        linestyles = ['-', '--', 'dotted']
        for j, date in enumerate(x_axis):
            if future:
                orm, slope, sigma_orm, sigma_slope = fit_rmcurve(amraps, date)
                rm_axis.append([
                    round(forward_general_epley(orm, x, slope), 1) for x in rms
                ])
                rm_axis_upper.append([
                    round(
                        forward_general_epley(orm, x, slope + sigma_slope) +
                        sigma_orm, 1) for x in rms
                ])
                rm_axis_lower.append([
                    round(
                        forward_general_epley(orm, x, slope - sigma_slope) -
                        sigma_orm, 1) for x in rms
                ])
            else:
                if j == 0:
                    continue
                old_amraps = {k: v[:j + 1] for k, v in amraps.items()}
                orm, slope, sigma_orm, sigma_slope = fit_rmcurve(old_amraps)
                rm_axis.append([
                    round(forward_general_epley(orm, x, slope), 1) for x in rms
                ])
                rm_axis_upper.append([
                    round(
                        forward_general_epley(orm, x, slope + sigma_slope) +
                        sigma_orm, 1) for x in rms
                ])
                rm_axis_lower.append([
                    round(
                        forward_general_epley(orm, x, slope - sigma_slope) -
                        sigma_orm, 1) for x in rms
                ])
        if not future:
            x_axis = x_axis[1:]
        for j, rm in enumerate(rms):
            this_axs.fill_between(x_axis, [x[j] for x in rm_axis_lower],
                                  [x[j] for x in rm_axis_upper],
                                  color='lightgrey',
                                  alpha=0.5)
            this_axs.plot(x_axis, [x[j] for x in rm_axis],
                          color='k',
                          linestyle=linestyles[j])
        this_axs.xaxis.set_major_locator(years)
        this_axs.xaxis.set_major_formatter(years_fmt)
        this_axs.xaxis.set_minor_locator(months)
        this_axs.grid(which='minor')
        this_axs.set_title(exercise)
        this_axs.format_xdata = mdates.DateFormatter('%Y-%m-%d')

    min_date = min([[x.get_xlim()[0] for x in y] for y in axs])[0]
    max_date = max([[x.get_xlim()[1] for x in y] for y in axs])[0]
    for y in axs:
        for x in y:
            x.set_xlim(min_date, max_date)

    plt.tight_layout()
    plt.show()
コード例 #44
0
def get_configuration_from_toml(filename: str) -> Any:
    """Return the tool.skjold section from the given pyproject.toml location."""
    document = toml.load(filename)
    section = document.get("tool", {}).get("skjold", {})
    return section
コード例 #45
0
import toml
import os
import importlib
from setuptools import find_packages, setup

pytom = toml.load("pyproject.toml")
package_name = pytom["project"]["name"]
author_name = " - ".join(pytom["project"]["authors"])

mymodule = importlib.import_module(package_name)

data_subdirs = ["templates", "static"]

data_files = []

for subdir in data_subdirs:
    for dir_path, _, file_names in os.walk(os.path.join(package_name, subdir)):
        data_files += [os.path.join(dir_path, f) for f in file_names]

with open("README.md") as fp:
    long_description = fp.read()

with open("requirements.txt") as fp:
    requirements = [p for p in fp.read().strip().split() if ("git+" not in p)]

if __name__ == "__main__":
    setup(
        name=package_name,
        version=mymodule.__version__,
        description=pytom["project"]["description"],
        long_description=long_description,
コード例 #46
0
ファイル: config.py プロジェクト: JTpku/relay
def load_config(path: str) -> MutableMapping:
    raw_data = toml.load(path)
    raw_data = convert_legacy_format(raw_data)
    return ConfigSchema().load(raw_data)
コード例 #47
0
ファイル: main.py プロジェクト: zakharykaplan/dotfiles
def main():
    # Define all valid attributes
    attrs = {
        "author": None,
        "comment": None,
        "description": None,
        "email": None,
        "homepage": None,
        "license": "NONE",
        "version": None,
    }

    # Initialize parser
    parser = argparse.ArgumentParser()
    # fmt: off
    # Positional arguments
    parser.add_argument("file", type=str, help="target file to create")
    # Optional arguments
    parser.add_argument("-b",
                        "--box",
                        action="store_true",
                        help="apply a box decoration to the header")
    parser.add_argument("--box-style",
                        type=str,
                        default="round",
                        help="box decoration style")
    parser.add_argument("-f",
                        "--force",
                        action="store_true",
                        help="force creation of the target file")
    parser.add_argument("-t",
                        "--template",
                        type=str,
                        default="basic.txt",
                        help="source code header template")
    parser.add_argument(
        "--confdir",
        type=str,
        default="~/.config/mksrc",
        help="mksrc config home directory (default: ~/.config/mksrc)")
    for attr in attrs:  # add overrides for all attributes
        parser.add_argument(f"--{attr}",
                            type=str,
                            help=f"{attr} string attribute")
    # fmt: on
    # Parse args
    args = parser.parse_args()

    # Resolve paths
    args.confdir = Path(args.confdir).expanduser()
    args.config = args.confdir.joinpath("config.toml")
    args.opath = Path(args.file)
    args.templates = args.confdir.joinpath("templates")
    args.template = Path(args.template)
    if not args.template.is_file():
        args.template = args.templates.joinpath(args.template)

    # Override args
    args.file = args.opath.name

    # Read the config file
    config = toml.load(args.config)

    # Ensure the output file does not exist
    if args.opath.exists() and not args.force:
        raise FileExistsError

    # Update commentstrings from defaults
    cms = commentstrings()
    cms.update(config.get("commentstrings") or {})
    config["commentstrings"] = cms

    # Update attributes from config
    attrs.update(config["attributes"])
    # Autofill any missing attributes
    for attr in attrs:
        if args.__dict__.get(attr):
            attrs[attr] = args.__dict__[attr]
    # Set any missing mandatory attributes
    if not attrs.get("comment"):
        attrs["comment"] = (cms[args.opath.suffix] if cms.get(
            args.opath.suffix) else (input("Comment: ") or "#"))
    attrs["comment"] += " %s" if "%s" not in attrs["comment"] else ""
    if not attrs.get("file"):
        attrs["file"] = args.opath.name

    # Create a header from the template
    with open(args.template, "r") as f:
        # Read the template file
        header = f.read()
        # Fill in attribute tags
        matches = re.finditer("__([A-Z]+)__", header)
        # Iterate all matches
        for match in matches:
            # Extract the attribute name, value
            attr = match.group(1)
            value = attrs.get(attr.lower())
            # Prompt the user for missing values
            if not value:
                value = input(f"{attr.title()}: ")
            # Continue if the value is still missing
            if not value:
                continue
            # Replace attribute tags with values
            header = header.replace(match.group(0), value)
        # Fill in any dates
        header = date.today().strftime(header)

    # Perform any decorations
    border = {
        "ascii": {
            "dl": "+",
            "dr": "+",
            "hh": "-",
            "ul": "+",
            "ur": "+",
            "vv": "|",
        },
        "round": {
            "dl": "╮",
            "dr": "╭",
            "ul": "╯",
            "ur": "╰",
        },
        "sharp": {
            "dl": "┐",
            "dr": "┌",
            "ul": "┘",
            "ur": "└",
        },
        "star": {
            "dl": "*",
            "dr": "*",
            "hh": "*",
            "ul": "*",
            "ur": "*",
            "vv": "*",
        },
    }[args.box_style]
    if args.box:
        header = box(attrs, header, border)

    # Split the header into lines
    header = header.splitlines()
    # Strip trailing whitespace
    header = map(str.strip, header)
    # Filter out missing attributes
    header = filter(lambda line: not re.search("__([A-Z]+)__", line), header)
    # Insert commentstrings
    header = map(lambda line: attrs["comment"] % line, header)
    # Add back newlines
    header = map(lambda line: "%s\n" % line, header)
    # Rejoin the header
    header = "".join(header)

    # Print the header
    print(header, end="")

    # Make intermediate output directories
    args.opath.parent.mkdir(exist_ok=True)

    # Write the header to the output file
    with open(args.opath, "w") as f:
        f.write(header)
コード例 #48
0
ファイル: config.py プロジェクト: qlchan24/briefcase
def parse_config(config_file, platform, output_format):
    """
    Parse the briefcase section of the pyproject.toml configuration file.

    This method only does basic structural parsing of the TOML, looking for,
    at a minimum, a ``[tool.briefcase.app.<appname>]`` section declaring the
    existence of a single app. It will also search for:

      * ``[tool.briefcase]`` - global briefcase settings
      * ``[tool.briefcase.app.<appname>]`` - settings specific to the app
      * ``[tool.briefcase.app.<appname>.<platform>]`` - settings specific to
        the platform
      * ``[tool.briefcase.app.<appname>.<platform>.<format>]`` - settings
        specific to the output format

    A configuration can define multiple apps; the final output is the merged
    content of the global, app, platform and output format settings
    for each app, with output format definitions taking precendence over
    platform, over app-level, over global. The final result is a single
    (mostly) flat dictionary for each app.

    :param config_file: A file-like object containing TOML to be parsed.
    :param platform: The platform being targetted
    :param output_format: The output format
    :returns: A dictionary of configuration data. The top level dictionary is
        keyed by the names of the apps that are declared; each value is
        itself the configuration data merged from global, app, platform and
        format definitions.
    """
    try:
        pyproject = toml.load(config_file)

        global_config = pyproject['tool']['briefcase']
    except toml.TomlDecodeError as e:
        raise BriefcaseConfigError('Invalid pyproject.toml: {e}'.format(e=e))
    except KeyError:
        raise BriefcaseConfigError('No tool.briefcase section in pyproject.toml')

    # For consistent results, sort the platforms and formats
    all_platforms = sorted(get_platforms().keys())
    all_formats = sorted(get_output_formats(platform).keys())

    try:
        all_apps = global_config.pop('app')
    except KeyError:
        raise BriefcaseConfigError('No Briefcase apps defined in pyproject.toml')

    # Build the flat configuration for each app,
    # based on the requested platform and output format
    app_configs = {}
    for app_name, app_data in all_apps.items():
        # At this point, the base configuration will contain a section
        # for each configured platform. Iterate over all the known platforms,
        # and remove these platform configurations. Keep a copy of the platform
        # configuration if it matches the requested platform, and merge it
        # into the app's configuration
        platform_data = None
        for p in all_platforms:
            try:
                platform_block = app_data.pop(p)

                if p == platform:
                    # If the platform matches the requested format, preserve
                    # it for later use.
                    platform_data = platform_block
                    merge_config(platform_data, platform_data)

                    # The platform configuration will contain a section
                    # for each configured output format. Iterate over all
                    # the output format of the platform, and remove these
                    # output format configurations. Keep a copy of the output
                    # format configuration if it matches the requested output
                    # format, and merge it into the platform's configuration.
                    format_data = None
                    for f in all_formats:
                        try:
                            format_block = platform_data.pop(f)

                            if f == output_format:
                                # If the output format matches the requested
                                # one, preserve it.
                                format_data = format_block

                        except KeyError:
                            pass

                    # If we found a specific configuration for the requested
                    # output format, add it to the platform configuration,
                    # overwriting any platform-level settings with format-level
                    # values.
                    if format_data:
                        merge_config(platform_data, format_data)

            except KeyError:
                pass

        # Now construct the final configuration.
        # First, convert the requirement definition at the global level
        merge_config(global_config, global_config)

        # The app's config starts as a copy of the base briefcase configuation.
        config = copy.deepcopy(global_config)

        # The app name is both the key, and a property of the configuration
        config['app_name'] = app_name

        # Merge the app-specific requirements
        merge_config(config, app_data)

        # If there is platform-specific configuration, merge the requirements,
        # then overwrite the platform-specific values.
        # This will already include any format-specific configuration.
        if platform_data:
            merge_config(config, platform_data)

        # Construct a configuration object, and add it to the list
        # of configurations that are being handled.
        app_configs[app_name] = config

    return global_config, app_configs
コード例 #49
0
def main():
    # configs
    args = toml.load(open('config.toml'))['model']
    
    seed = args['seed']
    batch_size = args['batch_size']
    seq_size = args['seq_size']
    init_size = args['init_size']
    state_size = args['state_size']
    belief_size = args['belief_size']
    num_layers = args['num_layers']
    obs_std = args['obs_std']
    obs_bit = args['obs_bit']
    learn_rate = args['learn_rate']
    grad_clip = args['grad_clip']
    max_iters = args['max_iters']
    seg_num = args['seg_num']
    seg_len = args['seg_len']
    max_beta = args['max_beta']
    min_beta = args['min_beta']
    beta_anneal = args['beta_anneal']
    log_dir = args['log_dir']
    test_times = args['test_times']
    gpu_ids = args['gpu_ids']
    data_path = args['data_path']
    check_path = args['check_path']
    
    # fix seed
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True

    # set logger
    log_format = '[%(asctime)s] %(message)s'
    logging.basicConfig(level=logging.INFO, format=log_format, stream=sys.stderr)

    # set size
    seq_size = seq_size
    init_size = init_size

    # set writer
    exp_name = set_exp_name(args)
    writer = SummaryWriter(log_dir + exp_name)
    LOGGER.info('EXP NAME: ' + exp_name)

    # load dataset
    train_loader, test_loader, check_loader = full_dataloader(seq_size, init_size, batch_size, data_path, check_path)
    LOGGER.info('Dataset loaded')

    # init models
    model = EnvModel(belief_size=belief_size,
                     state_size=state_size,
                     num_layers=num_layers,
                     max_seg_len=seg_len,
                     max_seg_num=seg_num)

    if torch.cuda.is_available():
        device = torch.device(f'cuda:{gpu_ids[0]}')
        model.to(device)
        model = nn.DataParallel(model, device_ids=gpu_ids)
        model = model.module
    else:
        device = torch.device('cpu')
        model.to(device)
        
    LOGGER.info('Model initialized')

    # init optimizer
    optimizer = Adam(params=model.parameters(),
                     lr=learn_rate, amsgrad=True)

    # test data
    pre_test_full_list = iter(test_loader).next()
    pre_test_full_data_list = pre_test_full_list['img']
    pre_test_full_point_list = pre_test_full_list['point']
    pre_test_full_data_list = preprocess(pre_test_full_data_list.to(device), obs_bit)
    
    # for each iter
    b_idx = 0
    while b_idx <= max_iters:
        # for each batch
        for train_list in train_loader:
            b_idx += 1
            # mask temp annealing
            if beta_anneal:
                model.state_model.mask_beta = (max_beta - min_beta) * 0.999 ** (b_idx / beta_anneal) + min_beta
            else:
                model.state_model.mask_beta = max_beta

            ##############
            # train time #
            ##############
            # get input data
            train_obs_list = train_list['img']
            train_points_list = train_list['point']
            train_obs_list = preprocess(train_obs_list.to(device), obs_bit)

            # run model with train mode
            model.train()
            optimizer.zero_grad()
            results = model(train_obs_list, train_points_list, seq_size, init_size, obs_std)

            # get train loss and backward update
            train_total_loss = results['train_loss']
            train_total_loss.backward()
            if grad_clip > 0.0:
                nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
            optimizer.step()

            # log
            if b_idx % 10 == 0:
                log_str, log_data = log_train(results, writer, b_idx)
                LOGGER.info(log_str, *log_data)

            #############
            # test time #
            #############
            if b_idx % test_times == 0:
                # set data
                pre_test_init_data_list = pre_test_full_data_list[:, :init_size]
                post_test_init_data_list = postprocess(pre_test_init_data_list, obs_bit)
                pre_test_input_data_list = pre_test_full_data_list[:, init_size:(init_size + seq_size)]
                post_test_input_data_list = postprocess(pre_test_input_data_list, obs_bit)

                with torch.no_grad():
                    ##################
                    # test data elbo #
                    ##################
                    model.eval()
                    results = model(pre_test_full_data_list, pre_test_full_point_list, seq_size, init_size, obs_std)
                    post_test_rec_data_list = postprocess(results['rec_data'], obs_bit)
                    output_img, output_mask = plot_rec(post_test_init_data_list,
                                                       post_test_input_data_list,
                                                       post_test_rec_data_list,
                                                       results['mask_data'],
                                                       results['p_mask'],
                                                       results['q_mask'])

                    # log
                    log_str, log_data = log_test(results, writer, b_idx)
                    LOGGER.info(log_str, *log_data)
                    writer.add_image('valid/rec_image', output_img.transpose([2, 0, 1]), global_step=b_idx)
                    writer.add_image('valid/mask_image', output_mask.transpose([2, 0, 1]), global_step=b_idx)

                    ###################
                    # full generation #
                    ###################
                    pre_test_gen_data_list, test_mask_data_list = model.full_generation(pre_test_init_data_list, seq_size)
                    post_test_gen_data_list = postprocess(pre_test_gen_data_list, obs_bit)

                    # log
                    output_img = plot_gen(post_test_init_data_list, post_test_gen_data_list, test_mask_data_list)
                    writer.add_image('valid/full_gen_image', output_img.transpose([2, 0, 1]), b_idx)
              
    
    with torch.no_grad():
        model.eval()
        acc = []
        precision = []
        recall = []
        f_value = []
        for check in check_loader:
            check_obs = check['img']
            check_point = check['point']
            check_obs = preprocess(check_obs.to(device), obs_bit)
            results = model(check_obs, check_point, seq_size, init_size, obs_std)
            metrixs = calc_metrixs(results['mask_data_true'], results['mask_data'])
            acc.append(metrixs['accuracy'])
            precision.append(metrixs['precision'])
            recall.append(metrixs['recall'])
            f_value.append(metrixs['f_value'])
            
        acc = np.concatenate(acc)
        precision = np.concatenate(precision)
        recall = np.concatenate(recall)
        f_value = np.concatenate(f_value)
        
        print('shape: ', acc.shape)
        print('accuracy: ', acc.mean())
        print('precision: ', precision.mean())
        print('recall: ', recall.mean())
        print('f_value: ', f_value.mean())
コード例 #50
0
    def clean_cargo_cache(self, force=False, show_size=False, keep=None):
        def get_size(path):
            if os.path.isfile(path):
                return os.path.getsize(path) / (1024 * 1024.0)
            total_size = 0
            for dirpath, dirnames, filenames in os.walk(path):
                for f in filenames:
                    fp = os.path.join(dirpath, f)
                    total_size += os.path.getsize(fp)
            return total_size / (1024 * 1024.0)

        removing_anything = False
        packages = {
            'crates': {},
            'git': {},
        }
        import toml
        if os.environ.get("CARGO_HOME", ""):
            cargo_dir = os.environ.get("CARGO_HOME")
        else:
            home_dir = os.path.expanduser("~")
            cargo_dir = path.join(home_dir, ".cargo")
        if not os.path.isdir(cargo_dir):
            return
        cargo_file = open(path.join(self.context.topdir, "Cargo.lock"))
        content = toml.load(cargo_file)

        for package in content.get("package", []):
            source = package.get("source", "")
            version = package["version"]
            if source == u"registry+https://github.com/rust-lang/crates.io-index":
                crate_name = "{}-{}".format(package["name"], version)
                if not packages["crates"].get(crate_name, False):
                    packages["crates"][package["name"]] = {
                        "current": [],
                        "exist": [],
                    }
                packages["crates"][package["name"]]["current"].append(
                    crate_name)
            elif source.startswith("git+"):
                name = source.split("#")[0].split("/")[-1].replace(".git", "")
                branch = ""
                crate_name = "{}-{}".format(package["name"],
                                            source.split("#")[1])
                crate_branch = name.split("?")
                if len(crate_branch) > 1:
                    branch = crate_branch[1].replace("branch=", "")
                    name = crate_branch[0]

                if not packages["git"].get(name, False):
                    packages["git"][name] = {
                        "current": [],
                        "exist": [],
                    }
                packages["git"][name]["current"].append(
                    source.split("#")[1][:7])
                if branch:
                    packages["git"][name]["current"].append(branch)

        crates_dir = path.join(cargo_dir, "registry")
        crates_cache_dir = ""
        crates_src_dir = ""
        if os.path.isdir(path.join(crates_dir, "cache")):
            for p in os.listdir(path.join(crates_dir, "cache")):
                crates_cache_dir = path.join(crates_dir, "cache", p)
                crates_src_dir = path.join(crates_dir, "src", p)

        git_dir = path.join(cargo_dir, "git")
        git_db_dir = path.join(git_dir, "db")
        git_checkout_dir = path.join(git_dir, "checkouts")
        if os.path.isdir(git_db_dir):
            git_db_list = filter(lambda f: not f.startswith('.'),
                                 os.listdir(git_db_dir))
        else:
            git_db_list = []
        if os.path.isdir(git_checkout_dir):
            git_checkout_list = os.listdir(git_checkout_dir)
        else:
            git_checkout_list = []

        for d in list(set(git_db_list + git_checkout_list)):
            crate_name = d.replace("-{}".format(d.split("-")[-1]), "")
            if not packages["git"].get(crate_name, False):
                packages["git"][crate_name] = {
                    "current": [],
                    "exist": [],
                }
            if os.path.isdir(path.join(git_checkout_dir, d)):
                with cd(path.join(git_checkout_dir, d)):
                    git_crate_hash = glob.glob('*')
                if not git_crate_hash or not os.path.isdir(
                        path.join(git_db_dir, d)):
                    packages["git"][crate_name]["exist"].append(("del", d, ""))
                    continue
                for d2 in git_crate_hash:
                    dep_path = path.join(git_checkout_dir, d, d2)
                    if os.path.isdir(dep_path):
                        packages["git"][crate_name]["exist"].append(
                            (path.getmtime(dep_path), d, d2))
            elif os.path.isdir(path.join(git_db_dir, d)):
                packages["git"][crate_name]["exist"].append(("del", d, ""))

        for d in os.listdir(crates_src_dir):
            crate_name = re.sub(r"\-\d+(\.\d+){1,3}.+", "", d)
            if not packages["crates"].get(crate_name, False):
                packages["crates"][crate_name] = {
                    "current": [],
                    "exist": [],
                }
            packages["crates"][crate_name]["exist"].append(d)

        total_size = 0
        for packages_type in ["git", "crates"]:
            sorted_packages = sorted(packages[packages_type])
            for crate_name in sorted_packages:
                crate_count = 0
                existed_crates = packages[packages_type][crate_name]["exist"]
                for exist in sorted(existed_crates, reverse=True):
                    current_crate = packages[packages_type][crate_name][
                        "current"]
                    size = 0
                    exist_name = path.join(
                        exist[1],
                        exist[2]) if packages_type == "git" else exist
                    exist_item = exist[2] if packages_type == "git" else exist
                    if exist_item not in current_crate:
                        crate_count += 1
                        if int(crate_count) >= int(keep) or not current_crate or \
                           exist[0] == "del" or exist[2] == "master":
                            removing_anything = True
                            crate_paths = []
                            if packages_type == "git":
                                exist_checkout_path = path.join(
                                    git_checkout_dir, exist[1])
                                exist_db_path = path.join(git_db_dir, exist[1])
                                exist_path = path.join(git_checkout_dir,
                                                       exist_name)

                                if exist[0] == "del":
                                    if os.path.isdir(exist_checkout_path):
                                        crate_paths.append(exist_checkout_path)
                                    if os.path.isdir(exist_db_path):
                                        crate_paths.append(exist_db_path)
                                    crate_count += -1
                                else:
                                    crate_paths.append(exist_path)

                                    exist_checkout_list = glob.glob(
                                        path.join(exist_checkout_path, '*'))
                                    if len(exist_checkout_list) <= 1:
                                        crate_paths.append(exist_checkout_path)
                                        if os.path.isdir(exist_db_path):
                                            crate_paths.append(exist_db_path)
                            else:
                                crate_paths.append(
                                    path.join(crates_cache_dir,
                                              "{}.crate".format(exist)))
                                crate_paths.append(
                                    path.join(crates_src_dir, exist))

                            size = sum(
                                get_size(p)
                                for p in crate_paths) if show_size else 0
                            total_size += size
                            print_msg = (exist_name, " ({}MB)".format(
                                round(size, 2)) if show_size else "",
                                         cargo_dir)
                            if force:
                                print("Removing `{}`{} package from {}".format(
                                    *print_msg))
                                for crate_path in crate_paths:
                                    if os.path.exists(crate_path):
                                        try:
                                            delete(crate_path)
                                        except:
                                            print(traceback.format_exc())
                                            print("Delete %s failed!" %
                                                  crate_path)
                            else:
                                print("Would remove `{}`{} package from {}".
                                      format(*print_msg))

        if removing_anything and show_size:
            print("\nTotal size of {} MB".format(round(total_size, 2)))

        if not removing_anything:
            print("Nothing to remove.")
        elif not force:
            print("\nNothing done. "
                  "Run `./mach clean-cargo-cache -f` to actually remove.")
コード例 #51
0
import toml
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger

from experiments.circular_padding.resnet_circular_padding import *
from models.custom_resnet import *
from src import *
from src.data.data_module import MusicDataModule

config = toml.load('carnatic.toml')
fcd = FullChromaDataset(json_path=config['data']['metadata'],
                        data_folder=config['data']['chroma_folder'],
                        include_mbids=json.load(
                            open(config['data']['limit_songs'])),
                        carnatic=True,
                        compression=40)

train, fcd_not_train = fcd.greedy_split(train_size=0.70)
val, test = fcd_not_train.greedy_split(test_size=0.5)

train = ChromaChunkDataset(train,
                           chunk_size=100,
                           augmentation=transpose_chromagram,
                           stride=10)
data = MusicDataModule(train, val, batch_size=32)

import sys

if sys.argv[1] == '34':
    model = ResNet34Circular(num_classes=max(fcd.y) + 1)
コード例 #52
0
    def update_cargo(self, params=None, package=None, all_packages=None, dry_run=None):
        if not params:
            params = []

        if dry_run:
            import toml
            import httplib
            import colorama

            cargo_file = open(path.join(self.context.topdir, "Cargo.lock"))
            content = toml.load(cargo_file)

            packages = {}
            outdated_packages = 0
            conn = httplib.HTTPSConnection("crates.io")
            for package in content.get("package", []):
                if "replace" in package:
                    continue
                source = package.get("source", "")
                if source == r"registry+https://github.com/rust-lang/crates.io-index":
                    version = package["version"]
                    name = package["name"]
                    if not packages.get(name, "") or packages[name] > version:
                        packages[name] = package["version"]
                        conn.request('GET', '/api/v1/crates/{}/versions'.format(package["name"]))
                        r = conn.getresponse()
                        json_content = json.load(r)
                        for v in json_content.get("versions"):
                            if not v.get("yanked"):
                                max_version = v.get("num")
                                break

                        if version != max_version:
                            outdated_packages += 1
                            version_major, version_minor = (version.split("."))[:2]
                            max_major, max_minor = (max_version.split("."))[:2]

                            if version_major == max_major and version_minor == max_minor and "alpha" not in version:
                                msg = "minor update"
                                msg_color = "\033[93m"
                            else:
                                msg = "update, which may contain breaking changes"
                                msg_color = "\033[91m"

                            colorama.init()
                            print("{}Outdated package `{}`, available {}\033[0m".format(msg_color, name, msg),
                                  "\n\tCurrent version: {}".format(version),
                                  "\n\t Latest version: {}".format(max_version))
            conn.close()

            print("\nFound {} outdated packages from crates.io".format(outdated_packages))
        elif package:
            params += ["-p", package]
        elif all_packages:
            params = []
        else:
            print("Please choose package to update with the --package (-p) ")
            print("flag or update all packages with --all-packages (-a) flag")
            sys.exit(1)

        if params or all_packages:
            self.ensure_bootstrapped()

            with cd(self.context.topdir):
                call(["cargo", "update"] + params,
                     env=self.build_env())
コード例 #53
0
                            "%s_lograt_replo.gr" % outprefix)
    ipod_utils.write_grfile(G_LOCS, max_logmat,
                            "%s_lograt_rephi.gr" % outprefix)

    numpy.save("%s_lograt_bs.npy" % outprefix, bs_lograt)
    numpy.save("%s_lograt.npy" % outprefix, orig_lograt)
    numpy.save("%s_lograt_replo.npy" % outprefix, min_logmat)
    numpy.save("%s_lograt_rephi.npy" % outprefix, max_logmat)


# now actually implement the full processing of the bootstrap replicates

if __name__ == "__main__":

    conf_file = sys.argv[1]
    conf_dict = toml.load(conf_file)

    # figure out some global parameters
    bs_suffix = conf_dict['general']['bs_suffix']
    bs_dir = conf_dict['general']['bs_dirname']
    orig_suffix = conf_dict['general']['orig_suffix']
    out_prefix = os.path.join(conf_dict['general']['output_path'],
                              conf_dict['general']['out_prefix'])

    # make missing path if needed
    if not (os.path.isdir(conf_dict['general']['output_path'])):
        os.mkdir(conf_dict['general']['output_path'])

    # get the spline normalization from the input data only
    #   we will use this repeatedly in the analysis below
コード例 #54
0
ファイル: webserver.py プロジェクト: aaaMeganaaa/Cookie
logger = logging.getLogger('website')
logger.setLevel(logging.INFO)

# Filter warnings
warnings.filterwarnings('ignore', category=RuntimeWarning)

# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("config_file", help="The configuration for the webserver.")
parser.add_argument("--host", type=str, default='0.0.0.0', help="The host IP to run the webserver on.")
parser.add_argument("--port", type=int, default=8080, help="The port to run the webserver on.")
args = parser.parse_args()

# Read config
with open(args.config_file) as a:
    config = toml.load(a)

# Create website object - don't start based on argv
app = Application(loop=asyncio.get_event_loop())
app['static_root_url'] = '/static'
session_setup(app, ECS(os.urandom(32), max_age=1000000))  # Encrypted cookies
# session_setup(app, SimpleCookieStorage(max_age=1000000))  # Simple cookies DEBUG ONLY
jinja_setup(app, loader=FileSystemLoader(os.getcwd() + '/website/templates'))
app.router.add_routes(website.frontend_routes)
app.router.add_routes(website.backend_routes)
app.router.add_static('/static', os.getcwd() + '/website/static', append_version=True)

# Add our connections
app['database'] = utils.DatabaseConnection
utils.DatabaseConnection.logger = logger.getChild("db")
コード例 #55
0
def wait_for_materialize_views(args: argparse.Namespace) -> None:
    """Record the current table status of all views installed in Materialize."""

    start_time = time.monotonic()

    # Create a dictionary mapping view names (as calculated from the filename) to expected contents
    view_snapshots: typing.Dict[str, str] = {
        p.stem: p.read_text().strip()
        for p in pathlib.Path(args.snapshot_dir).glob("*.sql")
    }

    source_offsets: typing.Dict[str, int] = {
        p.stem: int(p.read_text().strip())
        for p in pathlib.Path(args.snapshot_dir).glob("*.offset")
    }

    # Create a dictionary mapping view names to source name and offset
    view_sources: typing.Dict[str, SourceInfo] = {}
    with open(os.path.join(args.snapshot_dir, "config.toml")) as fd:
        conf = toml.load(fd)

        if len(conf["sources"]) != 1:
            print(f"ERROR: Expected just one source block: {conf['sources']}")
            sys.exit(1)

        source_info = conf["sources"][0]
        topic_prefix: str = source_info["topic_namespace"]
        source_names: typing.List[str] = source_info["names"]

        for query_info in conf["queries"]:

            # Ignore views not in this snapshot (they likely have multiple sources...)
            view: str = query_info["name"]
            if view not in view_snapshots:
                continue

            sources: typing.List[str] = query_info["sources"]
            if len(query_info["sources"]) != 1:
                print(
                    f"ERROR: Expected just one source for view {view}: {query_info['sources']}"
                )
                sys.exit(1)

            source_name: str = query_info["sources"][0]
            if source_name not in source_name:
                print(
                    f"ERROR: No matching source {source_name} for view {view}: {source_names}"
                )
                sys.exit(1)

            topic_name = f"{topic_prefix}{source_name}"
            if topic_name not in source_offsets:
                print(
                    f"ERROR: Missing offset information for source {topic_name}: {source_offsets}"
                )
                sys.exit(1)

            view_sources[view] = SourceInfo(topic_name,
                                            source_offsets[topic_name])

    with psycopg2.connect(
            f"postgresql://materialize@{args.host}:{args.port}/materialize"
    ) as conn:
        conn.autocommit = True
        installed_views = set(view_names(conn))

    # Verify that we have snapshots for all views installed
    captured_views = set(view_snapshots.keys())
    if not captured_views.issuperset(installed_views):
        missing_views = installed_views.difference(captured_views)
        print(f"ERROR: Missing final state for views: {missing_views}")
        print(f"       Have: {captured_views}")
        sys.exit(1)

    print("Recording time required until each view matches its snapshot")

    pending_views = installed_views
    with psycopg2.connect(
            f"postgresql://materialize@{args.host}:{args.port}/materialize"
    ) as conn:
        conn.autocommit = True
        while pending_views:
            views_to_remove = []
            time_taken = time.monotonic() - start_time
            for view in pending_views:
                with conn.cursor() as cursor:

                    # Determine if the source is at the desired offset and identify the
                    # mz_logical_timestamp associated with the offset
                    timestamp = source_at_offset(cursor, view_sources[view])
                    if not timestamp:
                        continue

                    # Get the contents of the view at the desired timestamp, where an empty result
                    # implies that the desired timestamp is not yet incorporated into the view
                    try:
                        contents = view_contents(cursor, view, timestamp)
                        if not contents:
                            continue
                    except ViewNotReady:
                        continue

                    views_to_remove.append(view)
                    expected = view_snapshots[view]
                    if contents == expected:
                        print(
                            f"PASSED: {time_taken:>6.1f}s: {view} (result={contents})"
                        )
                    else:
                        print(
                            f"FAILED: {time_taken:>6.1f}s: {view} ({contents} != {expected})"
                        )

            for view in views_to_remove:
                pending_views.remove(view)

            if pending_views:
                time.sleep(0.1)
コード例 #56
0
        "sql_uri": 'postgres://localhost/pollbot',
        "connection_count": 20,
        "overflow_count": 10,
    },
    'logging': {
        "sentry_enabled": False,
        "sentry_token": "",
        "log_level": logging.INFO,
        "debug": False,
    },
    'webhook': {
        "enabled": False,
        "domain": "https://localhost",
        "token": "pollbot",
        "cert_path": '/path/to/cert.pem',
        "port": 7000,
    }
}

config_path = os.path.expanduser('~/.config/ultimate_pollbot.toml')

if not os.path.exists(config_path):
    with open(config_path, "w") as file_descriptor:
        toml.dump(default_config, file_descriptor)
    print(
        "Please adjust the configuration file at '~/.config/ultimate_pollbot.toml'"
    )
    sys.exit(1)
else:
    config = toml.load(config_path)
コード例 #57
0
ファイル: Internal_to_MQTT.py プロジェクト: SteveCossy/IOT
# Variables for this script
INTERVAL = 10  # Seconds between sending readings

ConfPathFile = os.path.join(HOME_DIR, AUTH_FILE)

LogPathFile = os.path.join(CSVPath, LOG_FILE)
logging.basicConfig(filename=LogPathFile, level=logging.DEBUG)
CurrentTime = datetime.datetime.now().isoformat()
logging.debug(CrLf + '***** Starting at: {a}'.format(a=CurrentTime) + ' *****')

# Cayenne authentication info. This should be obtained from the Cayenne Dashboard,
#  and the details should be put into the file listed above.

# Read the Cayenne configuration stuff into a dictionary
ConfigDict = toml.load(ConfPathFile)
CayenneParam = ConfigDict.get('cayenne')

# print (CayenneParam)


# The callback for when a message is received from Cayenne.
def on_message(client, userData, message):
    # based on https://developers.mydevices.com/cayenne/docs/cayenne-mqtt-api/#cayenne-mqtt-api-mqtt-messaging-topics-send-actuator-updated-value
    #    global COUNTER
    print("message received: ", str(message.payload.decode("utf-8")))


def on_connect(client, userData, flags, rc):
    print("Connected with result code " + str(rc))
コード例 #58
0
ファイル: autorecon.py プロジェクト: secfb/HackingScripts
    elif m > 1:
        elapsed_time.append(str(m) + ' minutes')

    if s == 1:
        elapsed_time.append(str(s) + ' second')
    elif s > 1:
        elapsed_time.append(str(s) + ' seconds')
    else:
        elapsed_time.append('less than a second')

    return ', '.join(elapsed_time)

port_scan_profiles_config_file = 'port-scan-profiles.toml'
with open(os.path.join(rootdir, 'autorecon_config', port_scan_profiles_config_file), 'r') as p:
    try:
        port_scan_profiles_config = toml.load(p)

        if len(port_scan_profiles_config) == 0:
            fail('There do not appear to be any port scan profiles configured in the {port_scan_profiles_config_file} config file.')

    except toml.decoder.TomlDecodeError as e:
        fail('Error: Couldn\'t parse {port_scan_profiles_config_file} config file. Check syntax and duplicate tags.')

with open(os.path.join(rootdir, 'autorecon_config', 'service-scans.toml'), 'r') as c:
    try:
        service_scans_config = toml.load(c)
    except toml.decoder.TomlDecodeError as e:
        fail('Error: Couldn\'t parse service-scans.toml config file. Check syntax and duplicate tags.')

with open(os.path.join(rootdir, 'autorecon_config', 'global-patterns.toml'), 'r') as p:
    try:
コード例 #59
0
from subprocess import PIPE, Popen

pprint = PrettyPrinter(1).pprint

EMSCRIPTEN_REPO = "https://github.com/emscripten-core/emscripten/"
DOCKER_REGISTRY = "registry.hub.docker.com"

# TODO: remove
DOCKER_REPO = "trzeci/emscripten"

QUEUE_FILE = "queue.txt"
LOG_COMPILATION = "build.log"
ROOT = os.path.realpath(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))

config = toml.load(open("config.toml", "r"))


def get_username():
    try:
        import pwd
        return pwd.getpwuid(os.getuid())[0]
    except:
        return os.getlogin()
    return


def format_values(sth, mapping):
    def handle_object(obj):
        for k, v in obj.items():
            obj[k] = format_values(v, mapping)
コード例 #60
0
def parse_toml(toml_file):
    data = toml.load(toml_file)
    return data