コード例 #1
0
ファイル: regexbot.py プロジェクト: cequencer/slackbots
	def __init__(self, config_path=None):
		if config_path is None:
			config_path = 'regexbot.ini'
		config = RawConfigParser()
		config.read_dict(DEFAULT_CONFIG)
		config.read(config_path)

		self.rtm_token = config.get('regexbot', 'rtm_token')

		self.channel_flood_cooldown = timedelta(seconds=config.getint('regexbot', 'channel_flood_cooldown'))
		self.global_flood_cooldown = timedelta(seconds=config.getint('regexbot', 'global_flood_cooldown'))
		self.max_messages = config.getint('regexbot', 'max_messages')
		self.max_message_size = config.getint('regexbot', 'max_message_size')

		self.version = str(config.get('regexbot', 'version')) + '; %s'
		try: self.version = self.version % Popen(["git","branch","-v","--contains"], stdout=PIPE).communicate()[0].strip()
		except: self.version = self.version % 'unknown'

		self._last_message_times = {}
		self._last_message = datetime.utcnow()
		self._message_buffer = {}

		self.ignore_list = []
		if config.has_section('ignore'):
			for k,v in config.items('ignore'):
				try:
					self.ignore_list.append(regex.compile(str(v), regex.I))
				except Exception, ex:
					print "Error compiling regular expression in ignore list (%s):" % k
					print "  %s" % v
					print ex
					exit(1)
コード例 #2
0
ファイル: config.py プロジェクト: ryanprior/turses
    def _parse_legacy_config_file(self):
        """
        Parse a legacy configuration file.
        """
        conf = RawConfigParser()
        conf.read(LEGACY_CONFIG_FILE)

        styles = self.styles.copy()

        if conf.has_option('params', 'dm_template'):
            styles['dm_template'] = conf.get('params', 'dm_template')

        if conf.has_option('params', 'header_template'):
            styles['header_template'] = conf.get('params', 'header_template')

        self.styles.update(styles)

        if conf.has_option('params', 'logging_level'):
            self.logging_level = conf.getint('params', 'logging_level')

        for binding in self.key_bindings:
            if conf.has_option('keys', binding):
                custom_key = conf.get('keys', binding)
                self._set_key_binding(binding, custom_key)

        palette_labels = [color[0] for color in PALETTE]
        for label in palette_labels:
            if conf.has_option('colors', label):
                custom_fg = conf.get('colors', label)
                self._set_color(label, custom_fg)
コード例 #3
0
def main():
    logging_config = dict(level=INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    if PY2:
        logging_config['disable_existing_loggers'] = True

    basicConfig(**logging_config)

    args = get_args()

    if args.verbose:
        getLogger('').setLevel(DEBUG)

    config = RawConfigParser()

    if args.generate:
        with open(args.configuration, "w") as f:
            _generate_sample_configuration().write(f)
        return

    config.read(args.configuration)

    chatbot = import_string(config.get("BOT", "BOT"))

    sleep_timeout = config.getint("BOT", "SLEEP")

    receiver = IMAPReceiver(config.get("EMAIL", "USERNAME"),
                            config.get("EMAIL", "PASSWORD"),
                            config.get("EMAIL", "IMAP_SERVER"))

    sender = SMTPSender(config.get("EMAIL", "USERNAME"),
                        config.get("EMAIL", "PASSWORD"),
                        config.get("EMAIL", "SMTP_SERVER"),
                        config.get("EMAIL", "SMTP_PORT"))

    try:
        while True:

            logger.info("Retrieving new messages")
            email_messages = receiver.get_new_emails()
            logger.info("Retrieved %d new messages", len(email_messages))

            for email_message in email_messages:
                email_from, email_subject, email_body = get_email_content(email_message)

                chatbot_response = chatbot.respond(email_body)
                logger.info("Chatting with %s: in: %s out: %s", email_from, email_body, chatbot_response)

                message = make_simple_text_message(from_address=config.get("EMAIL", "USERNAME"),
                                                   to_address=email_from,
                                                   subject=email_subject,
                                                   text=chatbot_response)

                sender.send_email(message)
                logger.info("Response complete")

            sleep(sleep_timeout)
    except KeyboardInterrupt:
        exit()
コード例 #4
0
ファイル: GPIO.py プロジェクト: bobvann/GPIOSim
def input(pin):
    """Read the specified pin and return HIGH/true if the pin is pulled high,
    or LOW/false if pulled low."""
    check()

    pin = GPIO_NAMES.index("GPIO"+str(pin))
    
    c = RawConfigParser()
    c.read(WORK_FILE)

    return c.getint("pin"+str(pin),"value")
コード例 #5
0
ファイル: config.py プロジェクト: eriksf/dotfiles
    def _read_configuration_file(self, path):
        """Try to read and parse `path` as a configuration file.

        If the configurations were illegal (checked with
        `self._validate_options`), raises `IllegalConfiguration`.

        Returns (options, should_inherit).

        """
        parser = RawConfigParser(inline_comment_prefixes=('#', ';'))
        options = None
        should_inherit = True

        if parser.read(path) and self._get_section_name(parser):
            all_options = self._parser.option_list[:]
            for group in self._parser.option_groups:
                all_options.extend(group.option_list)

            option_list = dict([(o.dest, o.type or o.action)
                                for o in all_options])

            # First, read the default values
            new_options, _ = self._parse_args([])

            # Second, parse the configuration
            section_name = self._get_section_name(parser)
            for opt in parser.options(section_name):
                if opt == 'inherit':
                    should_inherit = parser.getboolean(section_name, opt)
                    continue

                if opt.replace('_', '-') not in self.CONFIG_FILE_OPTIONS:
                    log.warning("Unknown option '{}' ignored".format(opt))
                    continue

                normalized_opt = opt.replace('-', '_')
                opt_type = option_list[normalized_opt]
                if opt_type in ('int', 'count'):
                    value = parser.getint(section_name, opt)
                elif opt_type == 'string':
                    value = parser.get(section_name, opt)
                else:
                    assert opt_type in ('store_true', 'store_false')
                    value = parser.getboolean(section_name, opt)
                setattr(new_options, normalized_opt, value)

            # Third, fix the set-options
            options = self._fix_set_options(new_options)

        if options is not None:
            if not self._validate_options(options):
                raise IllegalConfiguration('in file: {}'.format(path))

        return options, should_inherit
コード例 #6
0
ファイル: manager.py プロジェクト: jnphilipp/Feedindicator
 def load(self):
     """Load configurations from file."""
     parser = RawConfigParser()
     parser.optionxform = str
     parser.read(os.path.join(app_config_dir, 'config'))
     if parser.has_option('Options', 'autostart'):
         self.autostart = parser.getboolean('Options', 'autostart')
     if parser.has_option('Options', 'refreshtime'):
         self.refreshtime = parser.getint('Options', 'refreshtime')
     if parser.has_option('Options', 'stoptimer'):
         self.stoptimer = parser.getboolean('Options', 'stoptimer')
     if parser.has_option('Options', 'items_per_feed'):
         self.items_per_feed = parser.getint('Options', 'items_per_feed')
     if parser.has_option('Options', 'show_notifications'):
         self.show_notifications = parser.getboolean('Options',
                                                     'show_notifications')
     if parser.has_option('Options', 'show_update_notifications'):
         self.show_update_notifications = parser. \
             getboolean('Options', 'show_update_notifications')
     if parser.has_option('Options', 'feeds_at_top'):
         self.feeds_at_top = parser.getboolean('Options', 'feeds_at_top')
     if parser.has_option('Options', 'show_unread_feeds'):
         self.show_unread_feeds = parser.getboolean('Options',
                                                    'show_unread_feeds')
コード例 #7
0
ファイル: tigreServer.py プロジェクト: PROBIC/tigreBrowser
def read_config_file(config_file):
    """Reads the config file.
    Returns port setting from the config file.
    """
    config = RawConfigParser()
    if not config.read(config_file):
        print("Could not find config file '%s'" % config_file)
        return 9999

    try:
        port = config.getint('server', 'port')
    except (Exception,):
        e = sys.exc_info()[1]
        print("Error in config file: %s" % e)
        sys.exit(1)
    return port
コード例 #8
0
ファイル: GPIO.py プロジェクト: bobvann/GPIOSim
def output(pin, value):
    """Set the specified pin the provided high/low value.  Value should be
    either HIGH/LOW or a boolean (true = high)."""
    check()
    
    c = RawConfigParser()
    c.read(WORK_FILE)

    pin = GPIO_NAMES.index("GPIO"+str(pin))

    if c.getint("pin"+str(pin),"state") != OUT:
        raise Exception

    c.set("pin"+str(pin),"value",str(value))

    with open(WORK_FILE, 'w') as configfile:
            c.write(configfile)

    pid = os.popen("ps ax | grep GPIOSim | head -1 | awk '{print $1}'").read()
    os.kill(int(pid), signal.SIGUSR1)
コード例 #9
0
ファイル: lint.py プロジェクト: adams-sarah/Flake8Lint
def load_flake8_config(filename, global_config=False, project_config=False):
    """
    Returns flake8 settings from config file.

    More info: http://flake8.readthedocs.org/en/latest/config.html
    """
    parser = RawConfigParser()

    # check global config
    if global_config and os.path.isfile(DEFAULT_CONFIG_FILE):
        parser.read(DEFAULT_CONFIG_FILE)

    # search config in filename dir and all parent dirs
    if project_config:
        parent = tail = os.path.abspath(filename)
        while tail:
            if parser.read([os.path.join(parent, fn) for fn in CONFIG_FILES]):
                break
            parent, tail = os.path.split(parent)

    result = {}
    if parser.has_section('flake8'):
        options = (
            ('ignore', 'ignore', 'list'),
            ('select', 'select', 'list'),
            ('exclude', 'ignore_files', 'list'),
            ('max_line_length', 'pep8_max_line_length', 'int')
        )
        for config, plugin, option_type in options:
            if not parser.has_option('flake8', config):
                config = config.replace('_', '-')
            if parser.has_option('flake8', config):
                if option_type == 'list':
                    option_value = parser.get('flake8', config).strip()
                    if option_value:
                        result[plugin] = option_value.split(',')
                elif option_type == 'int':
                    option_value = parser.get('flake8', config).strip()
                    if option_value:
                        result[plugin] = parser.getint('flake8', config)
    return result
コード例 #10
0
ファイル: GPIO.py プロジェクト: bobvann/GPIOSim
def setup(pin, mode, pull_up_down=PUD_OFF):
    """Set the input or output mode for a specified pin.  Mode should be
    either OUT or IN."""
    check()
        
    c = RawConfigParser()
    c.read(WORK_FILE)

    pin = GPIO_NAMES.index("GPIO"+str(pin))

    if c.getint("pin"+str(pin),"state") == 0:
        raise Exception

    c.set("pin"+str(pin),"state",str(mode))
    if mode==OUT:
        c.set("pin"+str(mode),"value","0")
    with open(WORK_FILE, 'w') as configfile:
            c.write(configfile)

    pid = os.popen("ps ax | grep GPIOSim | head -1 | awk '{print $1}'").read()
    os.kill(int(pid), signal.SIGUSR1)
コード例 #11
0
ファイル: cache.py プロジェクト: pieViks/borg
class Cache:
    """Client Side cache
    """
    class RepositoryReplay(Error):
        """Cache is newer than repository, refusing to continue"""

    class CacheInitAbortedError(Error):
        """Cache initialization aborted"""

    class RepositoryAccessAborted(Error):
        """Repository access aborted"""

    class EncryptionMethodMismatch(Error):
        """Repository encryption method changed since last acccess, refusing to continue
        """

    def __init__(self, repository, key, manifest, path=None, sync=True, do_files=False, warn_if_unencrypted=True):
        self.lock = None
        self.timestamp = None
        self.lock = None
        self.txn_active = False
        self.repository = repository
        self.key = key
        self.manifest = manifest
        self.path = path or os.path.join(get_cache_dir(), hexlify(repository.id).decode('ascii'))
        self.do_files = do_files
        # Warn user before sending data to a never seen before unencrypted repository
        if not os.path.exists(self.path):
            if warn_if_unencrypted and isinstance(key, PlaintextKey):
                if not self._confirm('Warning: Attempting to access a previously unknown unencrypted repository',
                                     'BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK'):
                    raise self.CacheInitAbortedError()
            self.create()
        self.open()
        # Warn user before sending data to a relocated repository
        if self.previous_location and self.previous_location != repository._location.canonical_path():
            msg = 'Warning: The repository at location {} was previously located at {}'.format(repository._location.canonical_path(), self.previous_location)
            if not self._confirm(msg, 'BORG_RELOCATED_REPO_ACCESS_IS_OK'):
                raise self.RepositoryAccessAborted()

        if sync and self.manifest.id != self.manifest_id:
            # If repository is older than the cache something fishy is going on
            if self.timestamp and self.timestamp > manifest.timestamp:
                raise self.RepositoryReplay()
            # Make sure an encrypted repository has not been swapped for an unencrypted repository
            if self.key_type is not None and self.key_type != str(key.TYPE):
                raise self.EncryptionMethodMismatch()
            self.sync()
            self.commit()

    def __del__(self):
        self.close()

    def _confirm(self, message, env_var_override=None):
        print(message, file=sys.stderr)
        if env_var_override and os.environ.get(env_var_override):
            print("Yes (From {})".format(env_var_override))
            return True
        if not sys.stdin.isatty():
            return False
        try:
            answer = input('Do you want to continue? [yN] ')
        except EOFError:
            return False
        return answer and answer in 'Yy'

    def create(self):
        """Create a new empty cache at `self.path`
        """
        os.makedirs(self.path)
        with open(os.path.join(self.path, 'README'), 'w') as fd:
            fd.write('This is a Borg cache')
        config = RawConfigParser()
        config.add_section('cache')
        config.set('cache', 'version', '1')
        config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii'))
        config.set('cache', 'manifest', '')
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            config.write(fd)
        ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
        with open(os.path.join(self.path, 'chunks.archive'), 'wb') as fd:
            pass  # empty file
        with open(os.path.join(self.path, 'files'), 'wb') as fd:
            pass  # empty file

    def destroy(self):
        """destroy the cache at `self.path`
        """
        self.close()
        os.remove(os.path.join(self.path, 'config'))  # kill config first
        shutil.rmtree(self.path)

    def _do_open(self):
        self.config = RawConfigParser()
        self.config.read(os.path.join(self.path, 'config'))
        if self.config.getint('cache', 'version') != 1:
            raise Exception('%s Does not look like a Borg cache')
        self.id = self.config.get('cache', 'repository')
        self.manifest_id = unhexlify(self.config.get('cache', 'manifest'))
        self.timestamp = self.config.get('cache', 'timestamp', fallback=None)
        self.key_type = self.config.get('cache', 'key_type', fallback=None)
        self.previous_location = self.config.get('cache', 'previous_location', fallback=None)
        self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8'))
        self.files = None

    def open(self):
        if not os.path.isdir(self.path):
            raise Exception('%s Does not look like a Borg cache' % self.path)
        self.lock = UpgradableLock(os.path.join(self.path, 'lock'), exclusive=True).acquire()
        self.rollback()

    def close(self):
        if self.lock:
            self.lock.release()
            self.lock = None

    def _read_files(self):
        self.files = {}
        self._newest_mtime = 0
        with open(os.path.join(self.path, 'files'), 'rb') as fd:
            u = msgpack.Unpacker(use_list=True)
            while True:
                data = fd.read(64 * 1024)
                if not data:
                    break
                u.feed(data)
                for path_hash, item in u:
                    item[0] += 1
                    # in the end, this takes about 240 Bytes per file
                    self.files[path_hash] = msgpack.packb(item)

    def begin_txn(self):
        # Initialize transaction snapshot
        txn_dir = os.path.join(self.path, 'txn.tmp')
        os.mkdir(txn_dir)
        shutil.copy(os.path.join(self.path, 'config'), txn_dir)
        shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
        shutil.copy(os.path.join(self.path, 'chunks.archive'), txn_dir)
        shutil.copy(os.path.join(self.path, 'files'), txn_dir)
        os.rename(os.path.join(self.path, 'txn.tmp'),
                  os.path.join(self.path, 'txn.active'))
        self.txn_active = True

    def commit(self):
        """Commit transaction
        """
        if not self.txn_active:
            return
        if self.files is not None:
            with open(os.path.join(self.path, 'files'), 'wb') as fd:
                for path_hash, item in self.files.items():
                    # Discard cached files with the newest mtime to avoid
                    # issues with filesystem snapshots and mtime precision
                    item = msgpack.unpackb(item)
                    if item[0] < 10 and bigint_to_int(item[3]) < self._newest_mtime:
                        msgpack.pack((path_hash, item), fd)
        self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii'))
        self.config.set('cache', 'timestamp', self.manifest.timestamp)
        self.config.set('cache', 'key_type', str(self.key.TYPE))
        self.config.set('cache', 'previous_location', self.repository._location.canonical_path())
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            self.config.write(fd)
        self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
        os.rename(os.path.join(self.path, 'txn.active'),
                  os.path.join(self.path, 'txn.tmp'))
        shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False

    def rollback(self):
        """Roll back partial and aborted transactions
        """
        # Remove partial transaction
        if os.path.exists(os.path.join(self.path, 'txn.tmp')):
            shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        # Roll back active transaction
        txn_dir = os.path.join(self.path, 'txn.active')
        if os.path.exists(txn_dir):
            shutil.copy(os.path.join(txn_dir, 'config'), self.path)
            shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
            shutil.copy(os.path.join(txn_dir, 'chunks.archive'), self.path)
            shutil.copy(os.path.join(txn_dir, 'files'), self.path)
            os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
            if os.path.exists(os.path.join(self.path, 'txn.tmp')):
                shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False
        self._do_open()

    def sync(self):
        """Re-synchronize chunks cache with repository.

        If present, uses a compressed tar archive of known backup archive
        indices, so it only needs to fetch infos from repo and build a chunk
        index once per backup archive.
        If out of sync, the tar gets rebuilt from known + fetched chunk infos,
        so it has complete and current information about all backup archives.
        Finally, it builds the master chunks index by merging all indices from
        the tar.

        Note: compression (esp. xz) is very effective in keeping the tar
              relatively small compared to the files it contains.
        """
        in_archive_path = os.path.join(self.path, 'chunks.archive')
        out_archive_path = os.path.join(self.path, 'chunks.archive.tmp')

        def open_in_archive():
            try:
                tf = tarfile.open(in_archive_path, 'r')
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                # file not found
                tf = None
            except tarfile.ReadError:
                # empty file?
                tf = None
            return tf

        def open_out_archive():
            for compression in ('xz', 'bz2', 'gz'):
                # xz needs py 3.3, bz2 and gz also work on 3.2
                try:
                    tf = tarfile.open(out_archive_path, 'w:'+compression, format=tarfile.PAX_FORMAT)
                    break
                except tarfile.CompressionError:
                    continue
            else:  # shouldn't happen
                tf = None
            return tf

        def close_archive(tf):
            if tf:
                tf.close()

        def delete_in_archive():
            os.unlink(in_archive_path)

        def rename_out_archive():
            os.rename(out_archive_path, in_archive_path)

        def add(chunk_idx, id, size, csize, incr=1):
            try:
                count, size, csize = chunk_idx[id]
                chunk_idx[id] = count + incr, size, csize
            except KeyError:
                chunk_idx[id] = incr, size, csize

        def transfer_known_idx(archive_id, tf_in, tf_out):
            archive_id_hex = hexlify(archive_id).decode('ascii')
            tarinfo = tf_in.getmember(archive_id_hex)
            archive_name = tarinfo.pax_headers['archive_name']
            print('Already known archive:', archive_name)
            f_in = tf_in.extractfile(archive_id_hex)
            tf_out.addfile(tarinfo, f_in)
            return archive_name

        def fetch_and_build_idx(archive_id, repository, key, tmp_dir, tf_out):
            chunk_idx = ChunkIndex()
            cdata = repository.get(archive_id)
            data = key.decrypt(archive_id, cdata)
            add(chunk_idx, archive_id, len(data), len(cdata))
            archive = msgpack.unpackb(data)
            if archive[b'version'] != 1:
                raise Exception('Unknown archive metadata version')
            decode_dict(archive, (b'name',))
            print('Analyzing new archive:', archive[b'name'])
            unpacker = msgpack.Unpacker()
            for item_id, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])):
                data = key.decrypt(item_id, chunk)
                add(chunk_idx, item_id, len(data), len(chunk))
                unpacker.feed(data)
                for item in unpacker:
                    if not isinstance(item, dict):
                        print('Error: Did not get expected metadata dict - archive corrupted!')
                        continue
                    if b'chunks' in item:
                        for chunk_id, size, csize in item[b'chunks']:
                            add(chunk_idx, chunk_id, size, csize)
            archive_id_hex = hexlify(archive_id).decode('ascii')
            file_tmp = os.path.join(tmp_dir, archive_id_hex).encode('utf-8')
            chunk_idx.write(file_tmp)
            tarinfo = tf_out.gettarinfo(file_tmp, archive_id_hex)
            tarinfo.pax_headers['archive_name'] = archive[b'name']
            with open(file_tmp, 'rb') as f:
                tf_out.addfile(tarinfo, f)
            os.unlink(file_tmp)

        def create_master_idx(chunk_idx, tf_in, tmp_dir):
            chunk_idx.clear()
            for tarinfo in tf_in:
                archive_id_hex = tarinfo.name
                archive_name = tarinfo.pax_headers['archive_name']
                print("- extracting archive %s ..." % archive_name)
                tf_in.extract(archive_id_hex, tmp_dir)
                chunk_idx_path = os.path.join(tmp_dir, archive_id_hex).encode('utf-8')
                print("- reading archive ...")
                archive_chunk_idx = ChunkIndex.read(chunk_idx_path)
                print("- merging archive ...")
                chunk_idx.merge(archive_chunk_idx)
                os.unlink(chunk_idx_path)

        self.begin_txn()
        print('Synchronizing chunks cache...')
        # XXX we have to do stuff on disk due to lacking ChunkIndex api
        with tempfile.TemporaryDirectory(prefix='borg-tmp') as tmp_dir:
            repository = cache_if_remote(self.repository)
            out_archive = open_out_archive()
            in_archive = open_in_archive()
            if in_archive:
                known_ids = set(unhexlify(hexid) for hexid in in_archive.getnames())
            else:
                known_ids = set()
            archive_ids = set(info[b'id'] for info in self.manifest.archives.values())
            print('Rebuilding archive collection. Known: %d Repo: %d Unknown: %d' % (
                len(known_ids), len(archive_ids), len(archive_ids - known_ids), ))
            for archive_id in archive_ids & known_ids:
                transfer_known_idx(archive_id, in_archive, out_archive)
            close_archive(in_archive)
            delete_in_archive()  # free disk space
            for archive_id in archive_ids - known_ids:
                fetch_and_build_idx(archive_id, repository, self.key, tmp_dir, out_archive)
            close_archive(out_archive)
            rename_out_archive()
            print('Merging collection into master chunks cache...')
            in_archive = open_in_archive()
            create_master_idx(self.chunks, in_archive, tmp_dir)
            close_archive(in_archive)
            print('Done.')

    def add_chunk(self, id, data, stats):
        if not self.txn_active:
            self.begin_txn()
        if self.seen_chunk(id):
            return self.chunk_incref(id, stats)
        size = len(data)
        data = self.key.encrypt(data)
        csize = len(data)
        self.repository.put(id, data, wait=False)
        self.chunks[id] = (1, size, csize)
        stats.update(size, csize, True)
        return id, size, csize

    def seen_chunk(self, id):
        return self.chunks.get(id, (0, 0, 0))[0]

    def chunk_incref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        self.chunks[id] = (count + 1, size, csize)
        stats.update(size, csize, False)
        return id, size, csize

    def chunk_decref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        if count == 1:
            del self.chunks[id]
            self.repository.delete(id, wait=False)
            stats.update(-size, -csize, True)
        else:
            self.chunks[id] = (count - 1, size, csize)
            stats.update(-size, -csize, False)

    def file_known_and_unchanged(self, path_hash, st):
        if not self.do_files:
            return None
        if self.files is None:
            self._read_files()
        entry = self.files.get(path_hash)
        if not entry:
            return None
        entry = msgpack.unpackb(entry)
        if entry[2] == st.st_size and bigint_to_int(entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino:
            # reset entry age
            entry[0] = 0
            self.files[path_hash] = msgpack.packb(entry)
            return entry[4]
        else:
            return None

    def memorize_file(self, path_hash, st, ids):
        if not self.do_files:
            return
        # Entry: Age, inode, size, mtime, chunk ids
        mtime_ns = st_mtime_ns(st)
        self.files[path_hash] = msgpack.packb((0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids))
        self._newest_mtime = max(self._newest_mtime, mtime_ns)
コード例 #12
0
ファイル: regexbot.py プロジェクト: cequencer/ircbots
try:
    config.readfp(open(argv[1]))
except:
    try:
        config.readfp(open('regexbot.ini'))
    except Exception:
        print "Syntax:"
        print "  %s [config]" % argv[0]
        print ""
        print "If no configuration file is specified or there was an error, it will default to `regexbot.ini'."
        print "If there was a failure reading the configuration, it will display this message."
        exit(1)

# read config
SERVER = config.get('regexbot', 'server')
PORT = config.getint('regexbot', 'port')
IPV6 = config.getboolean('regexbot', 'ipv6')
NICK = str(config.get('regexbot', 'nick'))
CHANNELS = str(config.get('regexbot', 'channels')).split()
VERSION = str(config.get('regexbot', 'version')) + '; %s'
try:
    VERSION = VERSION % Popen(["git", "branch", "-v", "--contains"],
                              stdout=PIPE).communicate()[0].strip()
except:
    VERSION = VERSION % 'unknown'
del Popen, PIPE
TRANSLATE_ENABLED = config.getboolean('regexbot', 'translate_enabled')
RECONNECT_TO_SERVER = config.getboolean('regexbot', 'reconnect_to_server')
FORCE_ENDING_SLASH = config.getboolean('regexbot', 'force_ending_slash')

CHANNEL_FLOOD_COOLDOWN = timedelta(
コード例 #13
0
ファイル: portal.py プロジェクト: stemid/captiveportal
# Setup logging
logFormatter = Formatter(config.get('logging', 'log_format'))
l = getLogger('captiveportal')
if config.get('logging', 'log_handler') == 'syslog':
    syslog_address = config.get('logging', 'syslog_address')

    if syslog_address.startswith('/'):
        logHandler = SysLogHandler(
            address=syslog_address,
            facility=SysLogHandler.LOG_LOCAL0
        )
    else:
        logHandler = SysLogHandler(
            address=(
                config.get('logging', 'syslog_address'),
                config.getint('logging', 'syslog_port')
            ),
            facility=SysLogHandler.LOG_LOCAL0
        )
else:
    logHandler = RotatingFileHandler(
        config.get('logging', 'log_file'),
        maxBytes=config.getint('logging', 'log_max_bytes'),
        backupCount=config.getint('logging', 'log_max_copies')
    )
logHandler.setFormatter(logFormatter)
l.addHandler(logHandler)

if config.get('logging', 'log_debug'):
    l.setLevel(DEBUG)
else:
コード例 #14
0
ファイル: eds.py プロジェクト: triveria/canopen
def import_eds(source, node_id):
    eds = RawConfigParser()
    if hasattr(source, "read"):
        fp = source
    else:
        fp = open(source)
    try:
        # Python 3
        eds.read_file(fp)
    except AttributeError:
        # Python 2
        eds.readfp(fp)
    fp.close()
    od = objectdictionary.ObjectDictionary()
    if eds.has_section("DeviceComissioning"):
        od.bitrate = int(eds.get("DeviceComissioning", "Baudrate")) * 1000
        od.node_id = int(eds.get("DeviceComissioning", "NodeID"))

    for section in eds.sections():
        # Match dummy definitions
        match = re.match(r"^[Dd]ummy[Uu]sage$", section)
        if match is not None:
            for i in range(1, 8):
                key = "Dummy%04d" % i
                if eds.getint(section, key) == 1:
                    var = objectdictionary.Variable(key, i, 0)
                    var.data_type = i
                    var.access_type = "const"
                    od.add_object(var)

        # Match indexes
        match = re.match(r"^[0-9A-Fa-f]{4}$", section)
        if match is not None:
            index = int(section, 16)
            name = eds.get(section, "ParameterName")
            try:
                object_type = int(eds.get(section, "ObjectType"), 0)
            except NoOptionError:
                # DS306 4.6.3.2 object description
                # If the keyword ObjectType is missing, this is regarded as
                # "ObjectType=0x7" (=VAR).
                object_type = VAR
            try:
                storage_location = eds.get(section, "StorageLocation")
            except NoOptionError:
                storage_location = None

            if object_type in (VAR, DOMAIN):
                var = build_variable(eds, section, node_id, index)
                od.add_object(var)
            elif object_type == ARR and eds.has_option(section, "CompactSubObj"):
                arr = objectdictionary.Array(name, index)
                last_subindex = objectdictionary.Variable(
                    "Number of entries", index, 0)
                last_subindex.data_type = objectdictionary.UNSIGNED8
                arr.add_member(last_subindex)
                arr.add_member(build_variable(eds, section, node_id, index, 1))
                arr.storage_location = storage_location
                od.add_object(arr)
            elif object_type == ARR:
                arr = objectdictionary.Array(name, index)
                arr.storage_location = storage_location
                od.add_object(arr)
            elif object_type == RECORD:
                record = objectdictionary.Record(name, index)
                record.storage_location = storage_location
                od.add_object(record)

            continue

        # Match subindexes
        match = re.match(r"^([0-9A-Fa-f]{4})[S|s]ub([0-9A-Fa-f]+)$", section)
        if match is not None:
            index = int(match.group(1), 16)
            subindex = int(match.group(2), 16)
            entry = od[index]
            if isinstance(entry, (objectdictionary.Record,
                                  objectdictionary.Array)):
                var = build_variable(eds, section, node_id, index, subindex)
                entry.add_member(var)

        # Match [index]Name
        match = re.match(r"^([0-9A-Fa-f]{4})Name", section)
        if match is not None:
            index = int(match.group(1), 16)
            num_of_entries = int(eds.get(section, "NrOfEntries"))
            entry = od[index]
            # For CompactSubObj index 1 is were we find the variable
            src_var = od[index][1]
            for subindex in range(1, num_of_entries + 1):
                var = copy_variable(eds, section, subindex, src_var)
                if var is not None:
                    entry.add_member(var)

    return od
コード例 #15
0
class Cache(object):
    """Client Side cache
    """
    class RepositoryReplay(Error):
        """Cache is newer than repository, refusing to continue"""

    def __init__(self, repository, key, manifest, path=None, sync=True):
        self.timestamp = None
        self.txn_active = False
        self.repository = repository
        self.key = key
        self.manifest = manifest
        self.path = path or os.path.join(
            get_cache_dir(),
            hexlify(repository.id).decode('ascii'))
        if not os.path.exists(self.path):
            self.create()
        self.open()
        if sync and self.manifest.id != self.manifest_id:
            # If repository is older than the cache something fishy is going on
            if self.timestamp and self.timestamp > manifest.timestamp:
                raise self.RepositoryReplay()
            self.sync()
            self.commit()

    def __del__(self):
        self.close()

    def create(self):
        """Create a new empty cache at `path`
        """
        os.makedirs(self.path)
        with open(os.path.join(self.path, 'README'), 'w') as fd:
            fd.write('This is an Attic cache')
        config = RawConfigParser()
        config.add_section('cache')
        config.set('cache', 'version', '1')
        config.set('cache', 'repository',
                   hexlify(self.repository.id).decode('ascii'))
        config.set('cache', 'manifest', '')
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            config.write(fd)
        ChunkIndex.create(os.path.join(self.path, 'chunks').encode('utf-8'))
        with open(os.path.join(self.path, 'files'), 'w') as fd:
            pass  # empty file

    def open(self):
        if not os.path.isdir(self.path):
            raise Exception('%s Does not look like an Attic cache' % self.path)
        self.lock = UpgradableLock(os.path.join(self.path, 'config'),
                                   exclusive=True)
        self.rollback()
        self.config = RawConfigParser()
        self.config.read(os.path.join(self.path, 'config'))
        if self.config.getint('cache', 'version') != 1:
            raise Exception('%s Does not look like an Attic cache')
        self.id = self.config.get('cache', 'repository')
        self.manifest_id = unhexlify(self.config.get('cache', 'manifest'))
        self.timestamp = self.config.get('cache', 'timestamp', fallback=None)
        self.chunks = ChunkIndex(
            os.path.join(self.path, 'chunks').encode('utf-8'))
        self.files = None

    def close(self):
        self.lock.release()

    def _read_files(self):
        self.files = {}
        self._newest_mtime = 0
        with open(os.path.join(self.path, 'files'), 'rb') as fd:
            u = msgpack.Unpacker(use_list=True)
            while True:
                data = fd.read(64 * 1024)
                if not data:
                    break
                u.feed(data)
                for hash, item in u:
                    item[0] += 1
                    self.files[hash] = item

    def begin_txn(self):
        # Initialize transaction snapshot
        txn_dir = os.path.join(self.path, 'txn.tmp')
        os.mkdir(txn_dir)
        shutil.copy(os.path.join(self.path, 'config'), txn_dir)
        shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
        shutil.copy(os.path.join(self.path, 'files'), txn_dir)
        os.rename(os.path.join(self.path, 'txn.tmp'),
                  os.path.join(self.path, 'txn.active'))
        self.txn_active = True

    def commit(self):
        """Commit transaction
        """
        if not self.txn_active:
            return
        if self.files is not None:
            with open(os.path.join(self.path, 'files'), 'wb') as fd:
                for item in self.files.items():
                    # Discard cached files with the newest mtime to avoid
                    # issues with filesystem snapshots and mtime precision
                    if item[1][0] < 10 and item[1][3] < self._newest_mtime:
                        msgpack.pack(item, fd)
        self.config.set('cache', 'manifest',
                        hexlify(self.manifest.id).decode('ascii'))
        self.config.set('cache', 'timestamp', self.manifest.timestamp)
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            self.config.write(fd)
        self.chunks.flush()
        os.rename(os.path.join(self.path, 'txn.active'),
                  os.path.join(self.path, 'txn.tmp'))
        shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False

    def rollback(self):
        """Roll back partial and aborted transactions
        """
        # Remove partial transaction
        if os.path.exists(os.path.join(self.path, 'txn.tmp')):
            shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        # Roll back active transaction
        txn_dir = os.path.join(self.path, 'txn.active')
        if os.path.exists(txn_dir):
            shutil.copy(os.path.join(txn_dir, 'config'), self.path)
            shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
            shutil.copy(os.path.join(txn_dir, 'files'), self.path)
            os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
            if os.path.exists(os.path.join(self.path, 'txn.tmp')):
                shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False

    def sync(self):
        """Initializes cache by fetching and reading all archive indicies
        """
        def add(id, size, csize):
            try:
                count, size, csize = self.chunks[id]
                self.chunks[id] = count + 1, size, csize
            except KeyError:
                self.chunks[id] = 1, size, csize

        self.begin_txn()
        print('Initializing cache...')
        self.chunks.clear()
        unpacker = msgpack.Unpacker()
        repository = cache_if_remote(self.repository)
        for name, info in self.manifest.archives.items():
            archive_id = info[b'id']
            cdata = repository.get(archive_id)
            data = self.key.decrypt(archive_id, cdata)
            add(archive_id, len(data), len(cdata))
            archive = msgpack.unpackb(data)
            if archive[b'version'] != 1:
                raise Exception('Unknown archive metadata version')
            decode_dict(archive, (b'name', ))
            print('Analyzing archive:', archive[b'name'])
            for key, chunk in zip(archive[b'items'],
                                  repository.get_many(archive[b'items'])):
                data = self.key.decrypt(key, chunk)
                add(key, len(data), len(chunk))
                unpacker.feed(data)
                for item in unpacker:
                    if b'chunks' in item:
                        for chunk_id, size, csize in item[b'chunks']:
                            add(chunk_id, size, csize)

    def add_chunk(self, id, data, stats):
        if not self.txn_active:
            self.begin_txn()
        if self.seen_chunk(id):
            return self.chunk_incref(id, stats)
        size = len(data)
        data = self.key.encrypt(data)
        csize = len(data)
        self.repository.put(id, data, wait=False)
        self.chunks[id] = (1, size, csize)
        stats.update(size, csize, True)
        return id, size, csize

    def seen_chunk(self, id):
        return self.chunks.get(id, (0, 0, 0))[0]

    def chunk_incref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        self.chunks[id] = (count + 1, size, csize)
        stats.update(size, csize, False)
        return id, size, csize

    def chunk_decref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        if count == 1:
            del self.chunks[id]
            self.repository.delete(id, wait=False)
            stats.update(-size, -csize, True)
        else:
            self.chunks[id] = (count - 1, size, csize)
            stats.update(-size, -csize, False)

    def file_known_and_unchanged(self, path_hash, st):
        if self.files is None:
            self._read_files()
        entry = self.files.get(path_hash)
        if (entry and entry[3] == st_mtime_ns(st) and entry[2] == st.st_size
                and entry[1] == st.st_ino):
            # reset entry age
            if entry[0] != 0:
                self.files[path_hash][0] = 0
            return entry[4]
        else:
            return None

    def memorize_file(self, path_hash, st, ids):
        # Entry: Age, inode, size, mtime, chunk ids
        mtime_ns = st_mtime_ns(st)
        self.files[path_hash] = 0, st.st_ino, st.st_size, mtime_ns, ids
        self._newest_mtime = max(self._newest_mtime, mtime_ns)
コード例 #16
0
ファイル: repository.py プロジェクト: Ernest0x/attic
class Repository(object):
    """Filesystem based transactional key value store

    On disk layout:
    dir/README
    dir/config
    dir/data/<X / SEGMENTS_PER_DIR>/<X>
    dir/index.X
    dir/hints.X
    """
    DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
    DEFAULT_SEGMENTS_PER_DIR = 10000

    class DoesNotExist(Error):
        """Repository {} does not exist"""

    class AlreadyExists(Error):
        """Repository {} already exists"""

    class InvalidRepository(Error):
        """{} is not a valid repository"""


    def __init__(self, path, create=False):
        self.path = path
        self.io = None
        self.lock = None
        if create:
            self.create(path)
        self.open(path)

    def __del__(self):
        self.close()

    def create(self, path):
        """Create a new empty repository at `path`
        """
        if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
            raise self.AlreadyExists(path)
        if not os.path.exists(path):
            os.mkdir(path)
        with open(os.path.join(path, 'README'), 'w') as fd:
            fd.write('This is an Attic repository\n')
        os.mkdir(os.path.join(path, 'data'))
        config = RawConfigParser()
        config.add_section('repository')
        config.set('repository', 'version', '1')
        config.set('repository', 'segments_per_dir', self.DEFAULT_SEGMENTS_PER_DIR)
        config.set('repository', 'max_segment_size', self.DEFAULT_MAX_SEGMENT_SIZE)
        config.set('repository', 'id', hexlify(os.urandom(32)).decode('ascii'))
        with open(os.path.join(path, 'config'), 'w') as fd:
            config.write(fd)

    def open(self, path):
        self.head = None
        self.path = path
        if not os.path.isdir(path):
            raise self.DoesNotExist(path)
        self.config = RawConfigParser()
        self.config.read(os.path.join(self.path, 'config'))
        if not 'repository' in self.config.sections() or self.config.getint('repository', 'version') != 1:
            raise self.InvalidRepository(path)
        self.lock = UpgradableLock(os.path.join(path, 'config'))
        self.max_segment_size = self.config.getint('repository', 'max_segment_size')
        self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
        self.id = unhexlify(self.config.get('repository', 'id').strip())
        self.rollback()

    def close(self):
        if self.lock:
            self.rollback()
            self.lock.release()
            self.lock = None

    def commit(self, rollback=True):
        """Commit transaction
        """
        self.io.write_commit()
        self.compact_segments()
        self.write_index()
        self.rollback()

    def _available_indices(self, reverse=False):
        names = [int(name[6:]) for name in os.listdir(self.path) if re.match('index\.\d+', name)]
        names.sort(reverse=reverse)
        return names

    def open_index(self, head, read_only=False):
        if head is None:
            self.index = NSIndex.create(os.path.join(self.path, 'index.tmp').encode('utf-8'))
            self.segments = {}
            self.compact = set()
        else:
            if read_only:
                self.index = NSIndex((os.path.join(self.path, 'index.%d') % head).encode('utf-8'), readonly=True)
            else:
                shutil.copy(os.path.join(self.path, 'index.%d' % head),
                            os.path.join(self.path, 'index.tmp'))
                self.index = NSIndex(os.path.join(self.path, 'index.tmp').encode('utf-8'))
            hints = read_msgpack(os.path.join(self.path, 'hints.%d' % head))
            if hints[b'version'] != 1:
                raise ValueError('Unknown hints file version: %d' % hints['version'])
            self.segments = hints[b'segments']
            self.compact = set(hints[b'compact'])

    def write_index(self):
        hints = {b'version': 1,
                 b'segments': self.segments,
                 b'compact': list(self.compact)}
        write_msgpack(os.path.join(self.path, 'hints.%d' % self.io.head), hints)
        self.index.flush()
        os.rename(os.path.join(self.path, 'index.tmp'),
                  os.path.join(self.path, 'index.%d' % self.io.head))
        # Remove old indices
        current = '.%d' % self.io.head
        for name in os.listdir(self.path):
            if not name.startswith('index.') and not name.startswith('hints.'):
                continue
            if name.endswith(current):
                continue
            os.unlink(os.path.join(self.path, name))

    def compact_segments(self):
        """Compact sparse segments by copying data into new segments
        """
        if not self.compact:
            return

        def lookup(tag, key):
            return tag == TAG_PUT and self.index.get(key, (-1, -1))[0] == segment
        segments = self.segments
        for segment in sorted(self.compact):
            if segments[segment] > 0:
                for tag, key, data in self.io.iter_objects(segment, lookup, include_data=True):
                    new_segment, offset = self.io.write_put(key, data)
                    self.index[key] = new_segment, offset
                    segments.setdefault(new_segment, 0)
                    segments[new_segment] += 1
                    segments[segment] -= 1
                assert segments[segment] == 0
        self.io.write_commit()
        for segment in self.compact:
            assert self.segments.pop(segment) == 0
            self.io.delete_segment(segment)
        self.compact = set()

    def recover(self, path):
        """Recover missing index by replaying logs"""
        start = None
        available = self._available_indices()
        if available:
            start = available[-1]
        self.open_index(start)
        for segment, filename in self.io._segment_names():
            if start is not None and segment <= start:
                continue
            self.segments[segment] = 0
            for tag, key, offset in self.io.iter_objects(segment):
                if tag == TAG_PUT:
                    try:
                        s, _ = self.index[key]
                        self.compact.add(s)
                        self.segments[s] -= 1
                    except KeyError:
                        pass
                    self.index[key] = segment, offset
                    self.segments[segment] += 1
                elif tag == TAG_DELETE:
                    try:
                        s, _ = self.index.pop(key)
                        self.segments[s] -= 1
                        self.compact.add(s)
                        self.compact.add(segment)
                    except KeyError:
                        pass
            if self.segments[segment] == 0:
                self.compact.add(segment)
        if self.io.head is not None:
            self.write_index()

    def rollback(self):
        """
        """
        self._active_txn = False
        if self.io:
            self.io.close()
        self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
        if self.io.head is not None and not os.path.exists(os.path.join(self.path, 'index.%d' % self.io.head)):
            self.lock.upgrade()
            self.recover(self.path)
        self.open_index(self.io.head, read_only=True)

    def _len(self):
        return len(self.index)

    def get(self, id):
        try:
            segment, offset = self.index[id]
            return self.io.read(segment, offset, id)
        except KeyError:
            raise self.DoesNotExist(self.path)

    def get_many(self, ids, peek=None):
        for id in ids:
            yield self.get(id)

    def put(self, id, data, wait=True):
        if not self._active_txn:
            self._active_txn = True
            self.lock.upgrade()
            self.open_index(self.io.head)
        try:
            segment, _ = self.index[id]
            self.segments[segment] -= 1
            self.compact.add(segment)
            segment = self.io.write_delete(id)
            self.segments.setdefault(segment, 0)
            self.compact.add(segment)
        except KeyError:
            pass
        segment, offset = self.io.write_put(id, data)
        self.segments.setdefault(segment, 0)
        self.segments[segment] += 1
        self.index[id] = segment, offset

    def delete(self, id, wait=True):
        if not self._active_txn:
            self._active_txn = True
            self.lock.upgrade()
            self.open_index(self.io.head)
        try:
            segment, offset = self.index.pop(id)
            self.segments[segment] -= 1
            self.compact.add(segment)
            segment = self.io.write_delete(id)
            self.compact.add(segment)
            self.segments.setdefault(segment, 0)
        except KeyError:
            raise self.DoesNotExist(self.path)

    def add_callback(self, cb, data):
        cb(None, None, data)
コード例 #17
0
class Config(object):
    """Hold configuration state and utility functions related to config state.

    This is kind of a catch all for functionality related to the current
    configuration.
    """
    def __init__(self, filename=None):
        self.c = RawConfigParser()

        if filename:
            if not os.path.exists(filename):
                raise ValueError('config file does not exist: %s' % filename)

            self.c.read(filename)

        if self.c.has_section('path_rewrites'):
            self._path_rewrites = self.c.items('path_rewrites')
        else:
            self._path_rewrites = []

        if self.c.has_section('pull_url_rewrites'):
            self._pull_url_rewrites = self.c.items('pull_url_rewrites')
        else:
            self._pull_url_rewrites = []

        if self.c.has_section('public_url_rewrites'):
            self._public_url_rewrites = self.c.items('public_url_rewrites')
        else:
            self._public_url_rewrites = []

        if self.c.has_section('replicationpathrewrites'):
            self._replication_path_rewrites = self.c.items('replicationpathrewrites')
        else:
            self._replication_path_rewrites = []

        if self.c.has_section('replicationrules'):
            re_includes, re_excludes = [], []
            self.path_includes, self.path_excludes = {}, {}
            for key, value in self.c.items('replicationrules'):
                (behaviour, name), (ruletype, rule) = key.split('.'), value.split(':')

                if ruletype == 're':
                    # Decide which list is correct and append to it
                    restore = re_includes if behaviour == 'include' else re_excludes
                    restore.append((name, rule))

                elif ruletype == 'path':
                    exstore = self.path_includes if behaviour == 'include' else self.path_excludes
                    exstore[rule] = name
                else:
                    raise Exception('bad ruletype %s' % ruletype)

            # Create the in/out rules as an `or` of all the rules
            includes_string = '|'.join(
                create_namedgroup(name, rule)
                for name, rule in re_includes
            )
            excludes_string = '|'.join(
                create_namedgroup(name, rule)
                for name, rule in re_excludes
            )

            self.include_regex = re.compile(includes_string) if includes_string else None
            self.exclude_regex = re.compile(excludes_string) if excludes_string else None

            self.has_filters = bool(self.path_includes or self.path_excludes or self.include_regex or self.exclude_regex)
        else:
            self.has_filters = False

    def get(self, section, option):
        return pycompat.sysstr(self.c.get(section, option))

    @property
    def hg_path(self):
        """Path to a hg executable."""
        if self.c.has_section('programs') and self.c.has_option('programs', 'hg'):
            return self.get('programs', 'hg')

        return 'hg'

    def parse_wire_repo_path(self, path):
        """Parse a normalized repository path into a local path."""
        for source, dest in self._path_rewrites:
            if path.startswith(source):
                return path.replace(source, dest)

        return path

    def get_replication_path_rewrite(self, path):
        """Parse a local path into a wire path"""
        for source, dest in self._replication_path_rewrites:
            if path.startswith(source):
                return dest + path[len(source):]

        return None

    def get_pull_url_from_repo_path(self, path):
        """Obtain a URL to be used for pulling from a local repo path."""
        for source, dest in self._pull_url_rewrites:
            if path.startswith(source):
                return dest + path[len(source):]

        return None

    def get_public_url_from_wire_path(self, path):
        """Obtain a URL to be used for public advertisement from a wire protocol path."""
        for source, dest in self._public_url_rewrites:
            if path.startswith(source):
                return dest + path[len(source):]

        return None

    def filter(self, repo):
        """Returns a RepoFilterResult indicating if the repo should be filtered out
        of the set and which rule performed the include/exclude.

        If the repo was not touched by any rule, we default to disallowing the repo
        to be replicated. This rule is called "noinclude". If there were no
        filters defined at all, we pass the filter. This rule is called "nofilter".
        """
        if not self.has_filters:
            return RepoFilterResult(True, 'nofilter')

        if repo in self.path_includes:
            return RepoFilterResult(True, self.path_includes[repo])

        if repo in self.path_excludes:
            return RepoFilterResult(False, self.path_excludes[repo])

        includematch = self.include_regex.match(repo) if self.include_regex else None
        excludematch = self.exclude_regex.match(repo) if self.exclude_regex else None

        # Repo passes through filter if matching an include rule
        # and not matching an exclude rule
        if includematch and not excludematch:
            matchkeys = iter(includematch.groupdict().keys())
            return RepoFilterResult(True, next(matchkeys))

        # Return specific exclude rule if there was a match
        if excludematch:
            matchkeys = iter(excludematch.groupdict().keys())
            return RepoFilterResult(False, next(matchkeys))

        # Use "noinclude" if we didn't get a match for an include rule
        return RepoFilterResult(False, 'noinclude')

    def get_client_from_section(self, section, timeout=-1):
        """Obtain a KafkaClient from a config section.

        The config section must have a ``hosts`` and ``client_id`` option.
        An optional ``connect_timeout`` defines the connection timeout.

        ``timeout`` specifies how many seconds to retry attempting to connect
        to Kafka in case the initial connection failed. -1 indicates to not
        retry. This is useful when attempting to connect to a cluster that may
        still be coming online, for example.
        """
        hosts = self.get(section, 'hosts')
        client_id = self.get(section, 'client_id')
        connect_timeout = 60
        if self.c.has_option(section, 'connect_timeout'):
            connect_timeout = self.c.getint(section, 'connect_timeout')

        start = time.time()
        while True:
            try:
                return SimpleClient(hosts, client_id=client_id,
                                   timeout=connect_timeout)
            except KafkaUnavailableError:
                if timeout == -1:
                    raise

            if time.time() - start > timeout:
                raise Exception('timeout reached trying to connect to Kafka')

            time.sleep(0.1)
コード例 #18
0
ファイル: dispatch_ms.py プロジェクト: stemid/ecsapi
        h = SysLogHandler(
            address=syslog_address,
            facility=SysLogHandler.LOG_LOCAL0
        )
    else:
        h = SysLogHandler(
            address=(
                config.get('logging', 'syslog_address'),
                config.get('logging', 'syslog_port')
            ),
            facility=SysLogHandler.LOG_LOCAL0
        )
else:
    h = RotatingFileHandler(
        config.get('logging', 'log_file'),
        maxBytes=config.getint('logging', 'log_max_bytes'),
        backupCount=config.getint('logging', 'log_max_copies')
    )
h.setFormatter(formatter)
l.addHandler(h)

if config.get('logging', 'log_debug'):
    l.setLevel(DEBUG)
else:
    l.setLevel(WARN)


# Timeout callback helper function
def timeout_callback(self, p):
    if p.poll() is None:
        try:
コード例 #19
0
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

try:
    from configparser import NoSectionError, RawConfigParser
except ImportError:
    # Python 2
    from ConfigParser import NoSectionError, RawConfigParser
from oneconf.paths import ONECONF_OVERRIDE_FILE

config = RawConfigParser()
try:
    config.read(ONECONF_OVERRIDE_FILE)
    MIN_TIME_WITHOUT_ACTIVITY = config.getint('TestSuite',
                                              'MIN_TIME_WITHOUT_ACTIVITY')
except NoSectionError:
    MIN_TIME_WITHOUT_ACTIVITY = 60 * 5

ONECONF_SERVICE_NAME = "com.ubuntu.OneConf"
コード例 #20
0
def serve(configfile):
    """Serve forever.
    """

    try:
        cfg = RawConfigParser()
        cfg.read(configfile)

        host = cfg.get("local_reception", "localhost")

        # for messages
        station = cfg.get("local_reception", "station")
        set_subject(station)

        # for elevation
        global coords
        coords = cfg.get("local_reception", "coordinates")
        coords = [float(coord) for coord in coords.split()]

        global tle_files

        try:
            tle_files = cfg.get("local_reception", "tle_files")
        except NoOptionError:
            tle_files = None

        # publisher
        pubport = cfg.getint(host, "pubport")
        pub = Publisher(pubport)

        # schedule reader
        try:
            sched = ScheduleReader(
                cfg.get("local_reception", "schedule_file"),
                cfg.get("local_reception", "schedule_format"))
            sched.get_next_pass()
        except NoOptionError:
            logger.warning("No schedule file given")
            sched = ScheduleReader(None, None)

        # heart
        hostname = cfg.get(host, "hostname")
        pubaddress = hostname + ":" + str(pubport)
        heart = Heart(pub, pubaddress, 30, sched)
        heart.start()

        # holder
        holder = Holder(pub, pubaddress)

        # cleaner

        cleaner = Cleaner(holder, 1)
        cleaner.start()

        # watcher
        #watcher = DummyWatcher(holder, 2)
        path = cfg.get("local_reception", "data_dir")
        watcher = None

        if not os.path.exists(path):
            logger.warning(path +
                           " doesn't exist, not getting data from files")
        else:
            pattern = cfg.get("local_reception", "file_pattern", raw=True)
            watcher = FileWatcher(holder, os.path.join(path, pattern), sched)
            watcher.start()

        mirror_watcher = None
        try:
            mirror = cfg.get("local_reception", "mirror")
        except NoOptionError:
            pass
        else:
            pubport_m = cfg.getint(mirror, "pubport")
            reqport_m = cfg.getint(mirror, "reqport")
            host_m = cfg.get(mirror, "hostname")
            mirror_watcher = MirrorWatcher(holder, host_m, pubport_m,
                                           reqport_m, sched)
            mirror_watcher.start()

        # request manager
        reqport = cfg.getint(host, "reqport")
        reqman = RequestManager(holder, reqport, station)
        reqman.start()

        while True:
            time.sleep(10000)

    except KeyboardInterrupt:
        pass
    except:
        logger.exception("There was an error!")
        raise
    finally:
        try:
            reqman.stop()
        except UnboundLocalError:
            pass

        try:
            if mirror_watcher is not None:
                mirror_watcher.stop()
        except UnboundLocalError:
            pass

        try:
            if watcher is not None:
                watcher.stop()
        except UnboundLocalError:
            pass
        try:
            cleaner.stop()
        except UnboundLocalError:
            pass
        try:
            heart.stop()
        except UnboundLocalError:
            pass
        try:
            pub.stop()
        except UnboundLocalError:
            pass
        try:
            get_context().term()
        except ZMQError:
            pass
コード例 #21
0
ファイル: config.py プロジェクト: ptrifonov/easywall
class Config(object):
    """
    This class is a generic class for configuration
    It is a wrapper around the default configparser and contains basic functionality.

    [Methods]
    get_value: retrieve a value from a config file
    set_value: set a value in the configuration and write the config file to disk
    get_sections: get a list of all possible config sections

    [Raises]
    FileNotFoundError: When the configuration file was not found a exception is thrown.
    Exception: when the configparser failed to read the config file a exception is thrown.
    """
    def __init__(self, config_file_path: str) -> None:
        self.config_file_path = config_file_path
        self.configlib = RawConfigParser()
        self.read_config_file()

    def read_config_file(self) -> None:
        """
        TODO: Doku
        """
        if not file_exists(self.config_file_path):
            raise FileNotFoundError("config file '{}' not found".format(
                self.config_file_path))
        try:
            self.configlib.read(self.config_file_path)
        except ParsingError as exc:
            raise ParsingError(
                "{} is not readable by RawConfigParser. \n inner exception: {}"
                .format(self.config_file_path, format_exception(exc)))

    def get_value(self, section: str,
                  key: str) -> Union[bool, int, float, str]:
        """
        Returns a value from a given section of the configuration.

        [Data Types] String, Float, Integer, Boolean
        """
        self.read_config_file()
        value = ""
        try:
            value = self.configlib[section][key]
        except KeyError:
            error("Could not find key {} in section {}".format(key, section))
            info("Valid sections are: ")
            info("{}".format(self.get_sections()))
        if value in ["yes", "no", "true", "false", "on", "off"]:
            return self.configlib.getboolean(section, key)
        if is_int(value):
            return self.configlib.getint(section, key)
        if is_float(value):
            return self.configlib.getfloat(section, key)
        return value

    def set_value(self, section: str, key: str, value: str) -> bool:
        """
        Writes a key, value pair into memory configuration and writes it to config file

        [Data Types] bool
        """
        result = True
        try:
            self.configlib[section][key] = value
        except KeyError as exc:
            message = "Failed to write data to configuration: \n " + \
                "section: '{}' \n key: '{}' \n value: '{}' \n " + \
                "valid sections are: \n {} \n inner error: \n {}"
            error(
                message.format(section, key, value, self.get_sections(),
                               format_exception(exc)))
            result = False

        if result:
            with open(self.config_file_path, 'w') as configfile:
                self.configlib.write(configfile)

        return result

    def get_sections(self) -> list:
        """
        Return a list of the configuration section names/keys
        [WARNING] The name [DEFAULT] is excluded here if used!

        [Data Types] list
        """
        return self.configlib.sections()

    def get_keys(self, section: str) -> AbstractSet[str]:
        """
        TODO: Docu
        """
        return self.configlib[section].keys()
コード例 #22
0
    def updateUI(self):
        self.canvas.delete("all")

        c = RawConfigParser()
        c.read(self.WORK_FILE)

        x = self.START_X

        y = self.START_Y

        for i in range(0, 40):
            state = c.getint("pin" + str(i), "state")
            value = c.getint("pin" + str(i), "value")
            ident = 2 * state + value

            self.currState[i] = state
            self.currValue[i] = value

            e_x = x + self.PIN_SIZE
            e_y = y + self.PIN_SIZE

            self.canvas.create_oval(x,
                                    y,
                                    e_x,
                                    e_y,
                                    outline="black",
                                    fill=self.PIN_COLORS[ident],
                                    width=2,
                                    tags='pin' + str(i))

            self.canvas.tag_bind('pin' + str(i), '<Button>', self.click_cb(i))

            if i % 2 == 0:  #LEFT COLUMN GPIOS
                self.canvas.create_window(x - 70,
                                          y + 10,
                                          window=Label(self.canvas,
                                                       text=self.GPIO_NAMES[i],
                                                       fg=self.TEXT_COLOR,
                                                       bg=self.BG_COLOR))

                if ident == 2:  #IN_LOW
                    self.canvas.create_window(x - 20,
                                              y + 8,
                                              window=Label(self.canvas,
                                                           image=self.phInLeft,
                                                           bd=0))
                    #freccia e cliccabile(?)
                elif ident == 3:  #IN_HIGH
                    self.canvas.create_window(x - 20,
                                              y + 8,
                                              window=Label(self.canvas,
                                                           image=self.phInLeft,
                                                           bd=0))
                    #freccia e cliccabile(?)
                elif state == self.STATE_GPIO_OUT:  #OUT
                    self.canvas.create_window(x - 20,
                                              y + 8,
                                              window=Label(
                                                  self.canvas,
                                                  image=self.phOutLeft,
                                                  bd=0))

                x = e_x + self.PIN_DISTANCE
            else:  #RIGHT COLUMN GPIOS
                self.canvas.create_window(e_x + 70,
                                          y + 10,
                                          window=Label(self.canvas,
                                                       text=self.GPIO_NAMES[i],
                                                       fg=self.TEXT_COLOR,
                                                       bg=self.BG_COLOR))

                if ident == 2:  #IN_LOW
                    self.canvas.create_window(e_x + 22,
                                              y + 8,
                                              window=Label(
                                                  self.canvas,
                                                  image=self.phInRight,
                                                  bd=0))
                    #freccia e cliccabile(?)
                elif ident == 3:  #IN_HIGH
                    self.canvas.create_window(e_x + 22,
                                              y + 8,
                                              window=Label(
                                                  self.canvas,
                                                  image=self.phInRight,
                                                  bd=0))
                    #freccia e cliccabile(?)
                elif state == self.STATE_GPIO_OUT:  #OUT
                    self.canvas.create_window(e_x + 22,
                                              y + 8,
                                              window=Label(
                                                  self.canvas,
                                                  image=self.phOutRight,
                                                  bd=0))

                y = e_y + self.PIN_DISTANCE
                x = self.START_X

        self.canvas.pack(fill=BOTH, expand=1)
コード例 #23
0
ファイル: config.py プロジェクト: samueltt/poezio
 def getint(self, option, section=DEFSECTION):
     """
     get a value and returns it as an int
     """
     return RawConfigParser.getint(self, section, option)
コード例 #24
0
ファイル: cache.py プロジェクト: joolswills/attic
class Cache(object):
    """Client Side cache
    """
    # Do not cache file metadata for files smaller than this
    FILE_MIN_SIZE = 4096

    class RepositoryReplay(Error):
        """Cache is newer than repository, refusing to continue"""

    def __init__(self, repository, key, manifest, path=None, sync=True):
        self.timestamp = None
        self.txn_active = False
        self.repository = repository
        self.key = key
        self.manifest = manifest
        self.path = path or os.path.join(get_cache_dir(), hexlify(repository.id).decode('ascii'))
        if not os.path.exists(self.path):
            self.create()
        self.open()
        if sync and self.manifest.id != self.manifest_id:
            # If repository is older than the cache something fishy is going on
            if self.timestamp and self.timestamp > manifest.timestamp:
                raise self.RepositoryReplay()
            self.sync()
            self.commit()

    def __del__(self):
        self.close()

    def create(self):
        """Create a new empty cache at `path`
        """
        os.makedirs(self.path)
        with open(os.path.join(self.path, 'README'), 'w') as fd:
            fd.write('This is an Attic cache')
        config = RawConfigParser()
        config.add_section('cache')
        config.set('cache', 'version', '1')
        config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii'))
        config.set('cache', 'manifest', '')
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            config.write(fd)
        ChunkIndex.create(os.path.join(self.path, 'chunks').encode('utf-8'))
        with open(os.path.join(self.path, 'files'), 'w') as fd:
            pass  # empty file

    def open(self):
        if not os.path.isdir(self.path):
            raise Exception('%s Does not look like an Attic cache' % self.path)
        self.lock = UpgradableLock(os.path.join(self.path, 'config'), exclusive=True)
        self.rollback()
        self.config = RawConfigParser()
        self.config.read(os.path.join(self.path, 'config'))
        if self.config.getint('cache', 'version') != 1:
            raise Exception('%s Does not look like an Attic cache')
        self.id = self.config.get('cache', 'repository')
        self.manifest_id = unhexlify(self.config.get('cache', 'manifest'))
        self.timestamp = self.config.get('cache', 'timestamp', fallback=None)
        self.chunks = ChunkIndex(os.path.join(self.path, 'chunks').encode('utf-8'))
        self.files = None

    def close(self):
        self.lock.release()

    def _read_files(self):
        self.files = {}
        self._newest_mtime = 0
        with open(os.path.join(self.path, 'files'), 'rb') as fd:
            u = msgpack.Unpacker(use_list=True)
            while True:
                data = fd.read(64 * 1024)
                if not data:
                    break
                u.feed(data)
                for path_hash, item in u:
                    if item[2] > self.FILE_MIN_SIZE:
                        item[0] += 1
                        self.files[path_hash] = item

    def begin_txn(self):
        # Initialize transaction snapshot
        txn_dir = os.path.join(self.path, 'txn.tmp')
        os.mkdir(txn_dir)
        shutil.copy(os.path.join(self.path, 'config'), txn_dir)
        shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
        shutil.copy(os.path.join(self.path, 'files'), txn_dir)
        os.rename(os.path.join(self.path, 'txn.tmp'),
                  os.path.join(self.path, 'txn.active'))
        self.txn_active = True

    def commit(self):
        """Commit transaction
        """
        if not self.txn_active:
            return
        if self.files is not None:
            with open(os.path.join(self.path, 'files'), 'wb') as fd:
                for item in self.files.items():
                    # Discard cached files with the newest mtime to avoid
                    # issues with filesystem snapshots and mtime precision
                    if item[1][0] < 10 and item[1][3] < self._newest_mtime:
                        msgpack.pack(item, fd)
        self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii'))
        self.config.set('cache', 'timestamp', self.manifest.timestamp)
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            self.config.write(fd)
        self.chunks.flush()
        os.rename(os.path.join(self.path, 'txn.active'),
                  os.path.join(self.path, 'txn.tmp'))
        shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False

    def rollback(self):
        """Roll back partial and aborted transactions
        """
        # Remove partial transaction
        if os.path.exists(os.path.join(self.path, 'txn.tmp')):
            shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        # Roll back active transaction
        txn_dir = os.path.join(self.path, 'txn.active')
        if os.path.exists(txn_dir):
            shutil.copy(os.path.join(txn_dir, 'config'), self.path)
            shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
            shutil.copy(os.path.join(txn_dir, 'files'), self.path)
            os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
            if os.path.exists(os.path.join(self.path, 'txn.tmp')):
                shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False

    def sync(self):
        """Initializes cache by fetching and reading all archive indicies
        """
        def add(id, size, csize):
            try:
                count, size, csize = self.chunks[id]
                self.chunks[id] = count + 1, size, csize
            except KeyError:
                self.chunks[id] = 1, size, csize
        self.begin_txn()
        print('Initializing cache...')
        self.chunks.clear()
        unpacker = msgpack.Unpacker()
        repository = cache_if_remote(self.repository)
        for name, info in self.manifest.archives.items():
            archive_id = info[b'id']
            cdata = repository.get(archive_id)
            data = self.key.decrypt(archive_id, cdata)
            add(archive_id, len(data), len(cdata))
            archive = msgpack.unpackb(data)
            if archive[b'version'] != 1:
                raise Exception('Unknown archive metadata version')
            decode_dict(archive, (b'name',))
            print('Analyzing archive:', archive[b'name'])
            for key, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])):
                data = self.key.decrypt(key, chunk)
                add(key, len(data), len(chunk))
                unpacker.feed(data)
                for item in unpacker:
                    if b'chunks' in item:
                        for chunk_id, size, csize in item[b'chunks']:
                            add(chunk_id, size, csize)

    def add_chunk(self, id, data, stats):
        if not self.txn_active:
            self.begin_txn()
        if self.seen_chunk(id):
            return self.chunk_incref(id, stats)
        size = len(data)
        data = self.key.encrypt(data)
        csize = len(data)
        self.repository.put(id, data, wait=False)
        self.chunks[id] = (1, size, csize)
        stats.update(size, csize, True)
        return id, size, csize

    def seen_chunk(self, id):
        return self.chunks.get(id, (0, 0, 0))[0]

    def chunk_incref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        self.chunks[id] = (count + 1, size, csize)
        stats.update(size, csize, False)
        return id, size, csize

    def chunk_decref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        if count == 1:
            del self.chunks[id]
            self.repository.delete(id, wait=False)
            stats.update(-size, -csize, True)
        else:
            self.chunks[id] = (count - 1, size, csize)
            stats.update(-size, -csize, False)

    def file_known_and_unchanged(self, path_hash, st):
        if self.files is None:
            self._read_files()
        entry = self.files.get(path_hash)
        if (entry and entry[3] == st_mtime_ns(st)
            and entry[2] == st.st_size and entry[1] == st.st_ino):
            # reset entry age
            if entry[0] != 0:
                self.files[path_hash][0] = 0
            return entry[4]
        else:
            return None

    def memorize_file(self, path_hash, st, ids):
        if st.st_size > self.FILE_MIN_SIZE:
            # Entry: Age, inode, size, mtime, chunk ids
            mtime_ns = st_mtime_ns(st)
            self.files[path_hash] = 0, st.st_ino, st.st_size, mtime_ns, ids
            self._newest_mtime = max(self._newest_mtime, mtime_ns)
コード例 #25
0
ファイル: settings.py プロジェクト: synthead/pimostat
# FIXME: Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!

TEMPLATE_DEBUG = True
# FIXME
# ALLOWED_HOSTS = config.get("django", "allowed_hosts").replace(" ", "").split(
#     ",")


# Pimostat settings.

PIMOSTAT_TESTING_WITHOUT_HARDWARE = config.getboolean(
    "pimostat", "testing_without_hardware")
PIMOSTAT_SENSOR_UPDATE_FREQUENCY = config.getint(
    "pimostat", "sensor_update_frequency")


# Celery settings.

# Hack to make celery run in debug mode.
import sys
if "/usr/bin/celery" in sys.argv:
  DEBUG = False
else:
  DEBUG = True

CELERYBEAT_SCHEDULE = {
  "UpdateEnabledSensors": {
    "task": "pimostat.hardware_controller.UpdateEnabledSensors",
    # FIXME: Race condition if this is 1 second.
コード例 #26
0
ファイル: jd_OpenCard.py プロジェクト: WYEEE/JD-Script
# 获取账号参数
try:
    configinfo = RawConfigParser()
    try:
        configinfo.read(pwd + "OpenCardConfig.ini", encoding="UTF-8")
    except Exception as e:
        with open(pwd + "OpenCardConfig.ini", "r", encoding="UTF-8") as config:
            getConfig = config.read().encode('utf-8').decode('utf-8-sig')
        with open(pwd + "OpenCardConfig.ini", "w", encoding="UTF-8") as config:
            config.write(getConfig)
        try:
            configinfo.read(pwd + "OpenCardConfig.ini", encoding="UTF-8")
        except:
            configinfo.read(pwd + "OpenCardConfig.ini", encoding="gbk")
    cookies = configinfo.get('main', 'JD_COOKIE')
    openCardBean = configinfo.getint('main', 'openCardBean')
    sleepNum = configinfo.getfloat('main', 'sleepNum')
    record = configinfo.getboolean('main', 'record')
    onlyRecord = configinfo.getboolean('main', 'onlyRecord')
    memory = configinfo.getboolean('main', 'memory')
    printlog = configinfo.getboolean('main', 'printlog')
    isRemoteSid = configinfo.getboolean('main', 'isRemoteSid')
    TG_BOT_TOKEN = configinfo.get('main', 'TG_BOT_TOKEN')
    TG_USER_ID = configinfo.get('main', 'TG_USER_ID')
    PUSH_PLUS_TOKEN = configinfo.get('main', 'PUSH_PLUS_TOKEN')
    TG_PROXY_IP = configinfo.get('main', 'TG_PROXY_IP')
    TG_PROXY_PORT = configinfo.get('main', 'TG_PROXY_PORT')
    TG_API_HOST = configinfo.get('main', 'TG_API_HOST')
    QYWX_AM = configinfo.get('main', 'QYWX_AM')
    Concurrent = configinfo.getboolean('main', 'Concurrent')
    BARK = configinfo.get('main', 'BARK')
コード例 #27
0
ファイル: tag_path.py プロジェクト: stemid/devops
                    path.pop(0)
                    vc_node = _entity
                    break

    return vc_node


args = parser.parse_args()

if args.config_file:
    config.readfp(args.config_file)

if args.verbose > 1:
    print('Connecting to https://{user}@{host}:{port}/sdk/vimServiceVersions.xml'.format(
        user=config.get('vcenter', 'username'),
        port=config.getint('vcenter', 'port'),
        host=config.get('vcenter', 'hostname')
    ))

# Disable warnings for insecure certificates
requests.packages.urllib3.disable_warnings()

# Workaround for GH issue #235, self-signed cert
try:
    import ssl
    context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
    context.verify_mode = ssl.CERT_NONE
except:
    import ssl
    context = ssl.create_default_context()
    context.verify_mode = ssl.CERT_NONE
コード例 #28
0
ファイル: endtoend_test.py プロジェクト: danBLA/fuglu
class DKIMTestCase(unittest.TestCase):

    """DKIM Sig Test"""

    FUGLU_HOST = "127.0.0.1"
    FUGLU_PORT = 7731
    DUMMY_PORT = 7732
    FUGLUCONTROL_PORT = 7733

    def setUp(self):

        k = ''
        for line in open(TESTDATADIR + '/dkim/testfuglu.org.public'):
            if line.startswith('---'):
                continue
            k = k + line.strip()
        record = "v=DKIM1; k=rsa; p=%s" % k
        fuglu.lib.patcheddkimlib.dnstxt = mock.Mock(return_value=record)

        self.config = RawConfigParser()
        self.config.read([TESTDATADIR + '/endtoendtest.conf'])
        self.config.set('main', 'incomingport', str(DKIMTestCase.FUGLU_PORT))
        self.config.set('main', 'outgoinghost', str(DKIMTestCase.FUGLU_HOST))
        self.config.set('main', 'outgoingport', str(DKIMTestCase.DUMMY_PORT))
        self.config.set(
            'main', 'controlport', str(DKIMTestCase.FUGLUCONTROL_PORT))
        guess_clamav_socket(self.config)

        # init core
        self.mc = MainController(self.config)

        # start listening smtp dummy server to get fuglus answer
        self.smtp = DummySMTPServer(self.config, self.config.getint(
            'main', 'outgoingport'), DKIMTestCase.FUGLU_HOST)
        dkdss = threading.Thread(target = self.smtp.serve, args = ())
        dkdss.daemon = True
        dkdss.start()

        # start fuglu's listening server
        fls = threading.Thread(target = self.mc.startup, args = ())
        fls.daemon = True
        fls.start()

    def tearDown(self):
        self.mc.shutdown()
        self.smtp.shutdown()

    def testDKIM(self):
        # give fuglu time to start listener
        time.sleep(1)
        inputfile = TESTDATADIR + '/helloworld.eml'
        msgstring = open(inputfile, 'r').read()

        dkimheader = sign(msgstring, 'whatever', 'testfuglu.org', open(
            TESTDATADIR + '/dkim/testfuglu.org.private').read(), include_headers=['From', 'To'])
        signedcontent = dkimheader + msgstring
        logbuffer = StringIO()
        self.assertTrue(verify(signedcontent, debuglog=logbuffer),
                        "Failed DKIM verification immediately after signing %s" % logbuffer.getvalue())

        # send test message
        try:
            smtpclient = smtplib.SMTP('127.0.0.1', DKIMTestCase.FUGLU_PORT)
        except Exception as e:
            self.fail("Could not connect to fuglu on port %s : %s" %
                      (DKIMTestCase.FUGLU_PORT, str(e)))
        # smtpServer.set_debuglevel(1)
        smtpclient.helo('test.dkim')

        smtpclient.sendmail(
            '*****@*****.**', '*****@*****.**', signedcontent)

        smtpclient.quit()

        # verify the smtp server stored the file correctly
        tmpfile = self.smtp.tempfilename
        self.assertTrue(tmpfile != None, 'Send to dummy smtp server failed')

        result = open(tmpfile, 'r').read()
        logbuffer = StringIO()
        verify_ok = verify(result, debuglog=logbuffer)
        self.assertTrue(
            verify_ok, "Failed DKIM verification: %s" % logbuffer.getvalue())
コード例 #29
0
    def bootstrap(self,
                  config_file: Optional[str] = None,
                  force_config: bool = False,
                  ns: Optional[Dict[str, Any]] = None) -> None:
        if config_file is None:
            config_file = self.default_config_file
        collect: List[Manager.Module] = []
        remotes: List[Tuple[Callable[..., Any], Tuple[Any, ...]]] = []
        self.info(
            'Bootstrap for paths {paths}'.format(paths=';'.join(self.paths)))
        for path in self.paths:
            if not os.path.isdir(path):
                self.verbose(f'{path}: not a directory')
                continue
            seen: Set[str] = set()
            files = os.listdir(path)
            cfg: Optional[RawConfigParser]
            if config_file != '-' and config_file in files:
                cfg = RawConfigParser()
                cfg.read(os.path.join(path, config_file))
                self.verbose(f'{path}: Using configuration file {config_file}')
            else:
                cfg = None
                self.verbose('{path}: No separate configuration file found')
                if force_config:
                    continue
            sources = {}
            binaries = {}
            nfiles = []
            for fname in [_f for _f in files if not _f.startswith('_')]:
                (base, ext) = os.path.splitext(fname)
                if ext == '.py':
                    sources[base] = fname
                    nfiles.append(fname)
                elif ext in ('.pyc', '.pyo'):
                    binaries[base] = fname
            for binary in binaries:
                if binary not in sources:
                    nfiles.append(binaries[binary])
            for fname in nfiles:
                (base, ext) = os.path.splitext(fname)
                if base not in seen and (not force_config or cast(
                        RawConfigParser, cfg).has_section(base)):
                    m = self.__import(fname, [path], ns)
                    magic = '__aps__'
                    if m is None:
                        self.warning(
                            f'{path}: {base} skiped as it is unparsable')
                    elif magic not in m:
                        self.verbose(
                            f'{path}: Skip {base} as no {magic} attribute is found'
                        )
                    else:
                        ctrl = m[magic]
                        if cfg is not None and cfg.has_section(base):
                            self.debug(
                                f'{path}: {base} configuration is modified through config file'
                            )
                            for opt in cfg.options(base):
                                try:
                                    t = type(ctrl[opt])
                                    val: Any
                                    if t is int:
                                        val = cfg.getint(base, opt)
                                    elif t is float:
                                        val = cfg.getfloat(base, opt)
                                    elif t is bool:
                                        val = cfg.getboolean(base, opt)
                                    else:
                                        val = cfg.get(base, opt)
                                except KeyError:
                                    val = cfg.get(base, opt)
                                ctrl[opt] = val
                        md = self.Module(base, os.path.join(path, fname), ctrl,
                                         m)
                        if not self.__valid(md.api):
                            self.info(
                                f'{path}: Version conflict for module {base}, disabled'
                            )
                        elif md.active:
                            collect.append(md)
                            self.info(f'{path}: Loaded module {base}')
                        else:
                            self.info(
                                f'{path}: module {base} is marked as inactive')
                    seen.add(base)
            if cfg is not None and cfg.has_section(self.remote_section):
                for opt in cfg.options(self.remote_section):
                    parts = opt.split('.')
                    if len(parts) == 2:
                        val = cfg.get(self.remote_section, opt)
                        if parts[0].lower() in ('xmlrpc', 'xml-rpc'):
                            mtch = self.remote_xmlrpc_parse.match(val)
                            if mtch is not None:
                                (protocol, host, port, path) = mtch.groups()
                                remotes.append(
                                    (self.register_remote_xmlrpc,
                                     (host, int(port[1:]) if port is not None
                                      else None, protocol, path)))
                            else:
                                self.warning(
                                    '{path}: invalid value {value} for option {option} in section {section}'
                                    .format(path=os.path.join(
                                        path, config_file),
                                            value=val,
                                            option=opt,
                                            section=self.remote_section))
                    else:
                        self.warning(
                            '{path}: invalid option for section {section}'.
                            format(path=os.path.join(path, config_file),
                                   section=self.remote_section))
        backlog: List[Manager.Module] = []
        seen = set()

        def resolve(md: Manager.Module) -> None:
            incomplete = False
            if md.depend is not None:
                for d in md.depend:
                    if d not in seen:
                        incomplete = True
                        break
            if not incomplete:
                self.modules.append(md)
                seen.add(md.name)
                if md in backlog:
                    backlog.remove(md)
            elif md not in backlog:
                backlog.append(md)

        for module in sorted(collect, key=lambda m: (m.order, m.name)):
            resolve(module)
            for module in backlog[:]:
                resolve(module)
        cnt = len(backlog) + 1
        while cnt > len(backlog):
            cnt = len(backlog)
            for module in backlog[:]:
                resolve(module)
        self.modules += backlog
        for module in self.modules:
            self.info(f'Adding module {module.name}')
            module.bootstrap()
            self.__load(module.m, module.load, module.path)
        for (method, args) in remotes:
            rem = method(*args)
            self.info(f'Registered remote module {rem.address}')
        self.info('Bootstrapping finished')
コード例 #30
0
 def getint(self, section, option, default=None, *args, **kwargs):
     if self.has_option(section, option) or not isinstance(default, int):
         return RawConfigParser.getint(self, section, option)
     else:
         return default
コード例 #31
0
ファイル: helpers.py プロジェクト: rongz609/cloud-init
 def getint(self, section, option):
     if not self.has_option(section, option):
         return self.DEF_INT
     return RawConfigParser.getint(self, section, option)
コード例 #32
0
        log.info("输出日志到:" + colored(os.path.abspath(log_file), attrs=['bold', ]))
        log_file_handler = logging.FileHandler(log_file, mode='a', encoding='utf-8')
        log_file_handler.setFormatter(utils.TrimColorFormatter(config.get('LOG', 'FORMAT')))
        log.addHandler(log_file_handler)
except Exception as exception:
    print(colored(text="程序初始化错误:" + str(exception), color='red', on_color='on_green', attrs=('bold', )))
    os.system("PAUSE")
    sys.exit(1)

"""
**************************************************************************
********** 初始化:读取配置文件,初始化全局变量,做连接到直播间前的准备 *********
**************************************************************************
"""
# 初始化全局变量
room_id = config.getint('LIVE', 'ROOM_ID')
verify = Verify(sessdata=config.get('USER', 'SESSDATA'), csrf=config.get('USER', 'BILIBILI_JCT'))
room = live.LiveDanmaku(room_display_id=room_id, verify=verify)
ds = danmaku_sender.DanmakuSender(room_id=room_id, verify=verify, enable=config.getboolean('DANMAKU', 'ENABLE'))
scheduler = BackgroundScheduler()
turing = TuringAI(api_url=config.get('TURING_AI', 'API_URL'),
                  api_keys=config.get('TURING_AI', 'API_KEYS').split(','),
                  request_body=config.get('TURING_AI', 'REQUEST_FORMAT'),
                  enable=config.getboolean('TURING_AI', 'ENABLE'))
# 查询API获取初始化需要的数据
# TODO: 已登录用户xxx
room_info = live.get_room_info(room_id, verify)
isStreaming = room_info['room_info']['live_status'] == 1
up_name = room_info['anchor_info']['base_info']['uname']
fan_medal = room_info['anchor_info']['medal_info']['medal_name']
log.info("连接到 [" + up_name + "] 的直播间 [" + str(room_id) + "] ,当前直播状态:" + colored("直播中" if isStreaming else "未开播", attrs=['bold', ]))
コード例 #33
0
ファイル: cache.py プロジェクト: tgharold/borg
class Cache:
    """Client Side cache
    """
    class RepositoryReplay(Error):
        """Cache is newer than repository, refusing to continue"""

    class CacheInitAbortedError(Error):
        """Cache initialization aborted"""

    class RepositoryAccessAborted(Error):
        """Repository access aborted"""

    class EncryptionMethodMismatch(Error):
        """Repository encryption method changed since last acccess, refusing to continue
        """

    def __init__(self, repository, key, manifest, path=None, sync=True, do_files=False, warn_if_unencrypted=True):
        self.lock = None
        self.timestamp = None
        self.lock = None
        self.txn_active = False
        self.repository = repository
        self.key = key
        self.manifest = manifest
        self.path = path or os.path.join(get_cache_dir(), hexlify(repository.id).decode('ascii'))
        self.do_files = do_files
        # Warn user before sending data to a never seen before unencrypted repository
        if not os.path.exists(self.path):
            if warn_if_unencrypted and isinstance(key, PlaintextKey):
                if not self._confirm('Warning: Attempting to access a previously unknown unencrypted repository',
                                     'BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK'):
                    raise self.CacheInitAbortedError()
            self.create()
        self.open()
        # Warn user before sending data to a relocated repository
        if self.previous_location and self.previous_location != repository._location.canonical_path():
            msg = 'Warning: The repository at location {} was previously located at {}'.format(repository._location.canonical_path(), self.previous_location)
            if not self._confirm(msg, 'BORG_RELOCATED_REPO_ACCESS_IS_OK'):
                raise self.RepositoryAccessAborted()

        if sync and self.manifest.id != self.manifest_id:
            # If repository is older than the cache something fishy is going on
            if self.timestamp and self.timestamp > manifest.timestamp:
                raise self.RepositoryReplay()
            # Make sure an encrypted repository has not been swapped for an unencrypted repository
            if self.key_type is not None and self.key_type != str(key.TYPE):
                raise self.EncryptionMethodMismatch()
            self.sync()
            self.commit()

    def __del__(self):
        self.close()

    def _confirm(self, message, env_var_override=None):
        print(message, file=sys.stderr)
        if env_var_override and os.environ.get(env_var_override):
            print("Yes (From {})".format(env_var_override))
            return True
        if not sys.stdin.isatty():
            return False
        try:
            answer = input('Do you want to continue? [yN] ')
        except EOFError:
            return False
        return answer and answer in 'Yy'

    def create(self):
        """Create a new empty cache at `self.path`
        """
        os.makedirs(self.path)
        with open(os.path.join(self.path, 'README'), 'w') as fd:
            fd.write('This is a Borg cache')
        config = RawConfigParser()
        config.add_section('cache')
        config.set('cache', 'version', '1')
        config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii'))
        config.set('cache', 'manifest', '')
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            config.write(fd)
        ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
        os.makedirs(os.path.join(self.path, 'chunks.archive.d'))
        with open(os.path.join(self.path, 'files'), 'wb') as fd:
            pass  # empty file

    def destroy(self):
        """destroy the cache at `self.path`
        """
        self.close()
        os.remove(os.path.join(self.path, 'config'))  # kill config first
        shutil.rmtree(self.path)

    def _do_open(self):
        self.config = RawConfigParser()
        self.config.read(os.path.join(self.path, 'config'))
        if self.config.getint('cache', 'version') != 1:
            raise Exception('%s Does not look like a Borg cache')
        self.id = self.config.get('cache', 'repository')
        self.manifest_id = unhexlify(self.config.get('cache', 'manifest'))
        self.timestamp = self.config.get('cache', 'timestamp', fallback=None)
        self.key_type = self.config.get('cache', 'key_type', fallback=None)
        self.previous_location = self.config.get('cache', 'previous_location', fallback=None)
        self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8'))
        self.files = None

    def open(self):
        if not os.path.isdir(self.path):
            raise Exception('%s Does not look like a Borg cache' % self.path)
        self.lock = UpgradableLock(os.path.join(self.path, 'lock'), exclusive=True).acquire()
        self.rollback()

    def close(self):
        if self.lock:
            self.lock.release()
            self.lock = None

    def _read_files(self):
        self.files = {}
        self._newest_mtime = 0
        with open(os.path.join(self.path, 'files'), 'rb') as fd:
            u = msgpack.Unpacker(use_list=True)
            while True:
                data = fd.read(64 * 1024)
                if not data:
                    break
                u.feed(data)
                for path_hash, item in u:
                    item[0] += 1
                    # in the end, this takes about 240 Bytes per file
                    self.files[path_hash] = msgpack.packb(item)

    def begin_txn(self):
        # Initialize transaction snapshot
        txn_dir = os.path.join(self.path, 'txn.tmp')
        os.mkdir(txn_dir)
        shutil.copy(os.path.join(self.path, 'config'), txn_dir)
        shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
        shutil.copy(os.path.join(self.path, 'files'), txn_dir)
        os.rename(os.path.join(self.path, 'txn.tmp'),
                  os.path.join(self.path, 'txn.active'))
        self.txn_active = True

    def commit(self):
        """Commit transaction
        """
        if not self.txn_active:
            return
        if self.files is not None:
            with open(os.path.join(self.path, 'files'), 'wb') as fd:
                for path_hash, item in self.files.items():
                    # Discard cached files with the newest mtime to avoid
                    # issues with filesystem snapshots and mtime precision
                    item = msgpack.unpackb(item)
                    if item[0] < 10 and bigint_to_int(item[3]) < self._newest_mtime:
                        msgpack.pack((path_hash, item), fd)
        self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii'))
        self.config.set('cache', 'timestamp', self.manifest.timestamp)
        self.config.set('cache', 'key_type', str(self.key.TYPE))
        self.config.set('cache', 'previous_location', self.repository._location.canonical_path())
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            self.config.write(fd)
        self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
        os.rename(os.path.join(self.path, 'txn.active'),
                  os.path.join(self.path, 'txn.tmp'))
        shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False

    def rollback(self):
        """Roll back partial and aborted transactions
        """
        # Remove partial transaction
        if os.path.exists(os.path.join(self.path, 'txn.tmp')):
            shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        # Roll back active transaction
        txn_dir = os.path.join(self.path, 'txn.active')
        if os.path.exists(txn_dir):
            shutil.copy(os.path.join(txn_dir, 'config'), self.path)
            shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
            shutil.copy(os.path.join(txn_dir, 'files'), self.path)
            os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
            if os.path.exists(os.path.join(self.path, 'txn.tmp')):
                shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False
        self._do_open()

    def sync(self):
        """Re-synchronize chunks cache with repository.

        Maintains a directory with known backup archive indexes, so it only
        needs to fetch infos from repo and build a chunk index once per backup
        archive.
        If out of sync, missing archive indexes get added, outdated indexes
        get removed and a new master chunks index is built by merging all
        archive indexes.
        """
        archive_path = os.path.join(self.path, 'chunks.archive.d')

        def mkpath(id, suffix=''):
            id_hex = hexlify(id).decode('ascii')
            path = os.path.join(archive_path, id_hex + suffix)
            return path.encode('utf-8')

        def cached_archives():
            fns = os.listdir(archive_path)
            # filenames with 64 hex digits == 256bit
            return set(unhexlify(fn) for fn in fns if len(fn) == 64)

        def repo_archives():
            return set(info[b'id'] for info in self.manifest.archives.values())

        def cleanup_outdated(ids):
            for id in ids:
                os.unlink(mkpath(id))

        def add(chunk_idx, id, size, csize, incr=1):
            try:
                count, size, csize = chunk_idx[id]
                chunk_idx[id] = count + incr, size, csize
            except KeyError:
                chunk_idx[id] = incr, size, csize

        def fetch_and_build_idx(archive_id, repository, key):
            chunk_idx = ChunkIndex()
            cdata = repository.get(archive_id)
            data = key.decrypt(archive_id, cdata)
            add(chunk_idx, archive_id, len(data), len(cdata))
            archive = msgpack.unpackb(data)
            if archive[b'version'] != 1:
                raise Exception('Unknown archive metadata version')
            decode_dict(archive, (b'name',))
            unpacker = msgpack.Unpacker()
            for item_id, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])):
                data = key.decrypt(item_id, chunk)
                add(chunk_idx, item_id, len(data), len(chunk))
                unpacker.feed(data)
                for item in unpacker:
                    if not isinstance(item, dict):
                        print('Error: Did not get expected metadata dict - archive corrupted!')
                        continue
                    if b'chunks' in item:
                        for chunk_id, size, csize in item[b'chunks']:
                            add(chunk_idx, chunk_id, size, csize)
            fn = mkpath(archive_id)
            fn_tmp = mkpath(archive_id, suffix='.tmp')
            try:
                chunk_idx.write(fn_tmp)
            except Exception:
                os.unlink(fn_tmp)
            else:
                os.rename(fn_tmp, fn)
            return chunk_idx

        def lookup_name(archive_id):
            for name, info in self.manifest.archives.items():
                if info[b'id'] == archive_id:
                    return name

        def create_master_idx(chunk_idx):
            print('Synchronizing chunks cache...')
            cached_ids = cached_archives()
            archive_ids = repo_archives()
            print('Archives: %d, w/ cached Idx: %d, w/ outdated Idx: %d, w/o cached Idx: %d.' % (
                len(archive_ids), len(cached_ids),
                len(cached_ids - archive_ids), len(archive_ids - cached_ids), ))
            # deallocates old hashindex, creates empty hashindex:
            chunk_idx.clear()
            cleanup_outdated(cached_ids - archive_ids)
            if archive_ids:
                chunk_idx = None
                for archive_id in archive_ids:
                    archive_name = lookup_name(archive_id)
                    if archive_id in cached_ids:
                        archive_chunk_idx_path = mkpath(archive_id)
                        print("Reading cached archive chunk index for %s ..." % archive_name)
                        archive_chunk_idx = ChunkIndex.read(archive_chunk_idx_path)
                    else:
                        print('Fetching and building archive index for %s ...' % archive_name)
                        archive_chunk_idx = fetch_and_build_idx(archive_id, repository, self.key)
                    print("Merging into master chunks index ...")
                    if chunk_idx is None:
                        # we just use the first archive's idx as starting point,
                        # to avoid growing the hash table from 0 size and also
                        # to save 1 merge call.
                        chunk_idx = archive_chunk_idx
                    else:
                        chunk_idx.merge(archive_chunk_idx)
            print('Done.')
            return chunk_idx

        def legacy_cleanup():
            """bring old cache dirs into the desired state (cleanup and adapt)"""
            try:
                os.unlink(os.path.join(self.path, 'chunks.archive'))
            except:
                pass
            try:
                os.unlink(os.path.join(self.path, 'chunks.archive.tmp'))
            except:
                pass
            try:
                os.mkdir(archive_path)
            except:
                pass

        self.begin_txn()
        repository = cache_if_remote(self.repository)
        legacy_cleanup()
        self.chunks = create_master_idx(self.chunks)

    def add_chunk(self, id, data, stats):
        if not self.txn_active:
            self.begin_txn()
        size = len(data)
        if self.seen_chunk(id, size):
            return self.chunk_incref(id, stats)
        data = self.key.encrypt(data)
        csize = len(data)
        self.repository.put(id, data, wait=False)
        self.chunks[id] = (1, size, csize)
        stats.update(size, csize, True)
        return id, size, csize

    def seen_chunk(self, id, size=None):
        refcount, stored_size, _ = self.chunks.get(id, (0, None, None))
        if size is not None and stored_size is not None and size != stored_size:
            # we already have a chunk with that id, but different size.
            # this is either a hash collision (unlikely) or corruption or a bug.
            raise Exception("chunk has same id [%r], but different size (stored: %d new: %d)!" % (
                            id, stored_size, size))
        return refcount

    def chunk_incref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        self.chunks[id] = (count + 1, size, csize)
        stats.update(size, csize, False)
        return id, size, csize

    def chunk_decref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        if count == 1:
            del self.chunks[id]
            self.repository.delete(id, wait=False)
            stats.update(-size, -csize, True)
        else:
            self.chunks[id] = (count - 1, size, csize)
            stats.update(-size, -csize, False)

    def file_known_and_unchanged(self, path_hash, st):
        if not (self.do_files and stat.S_ISREG(st.st_mode)):
            return None
        if self.files is None:
            self._read_files()
        entry = self.files.get(path_hash)
        if not entry:
            return None
        entry = msgpack.unpackb(entry)
        if entry[2] == st.st_size and bigint_to_int(entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino:
            # reset entry age
            entry[0] = 0
            self.files[path_hash] = msgpack.packb(entry)
            return entry[4]
        else:
            return None

    def memorize_file(self, path_hash, st, ids):
        if not (self.do_files and stat.S_ISREG(st.st_mode)):
            return
        # Entry: Age, inode, size, mtime, chunk ids
        mtime_ns = st_mtime_ns(st)
        self.files[path_hash] = msgpack.packb((0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids))
        self._newest_mtime = max(self._newest_mtime, mtime_ns)
コード例 #34
0
        wp_post.content = p.body
        wp_post.thumbnail = uploaded_image['id']
        wp_post.post_status = "draft"  # TODO fix it
        wp.call(wp_posts.NewPost(wp_post))

if __name__ == "__main__":
    logger.debug("---------------------------------------------------------------------------------------")
    logger.debug("Running Yelly sniffer")

    logger.debug("Preparing configs from file %s", os.path.abspath(LOG_FILE))

    config = RawConfigParser()
    config.read(CONFIG_FILE)

    sites = config.get("SNIFF", "SITES")
    count = config.getint("SNIFF", "POSTS_PER_SITE")
    publish_method = config.get("SNIFF", "PUBLISH_METHOD")
    dump_folder = config.get("SNIFF", "DUMP_FOLDER")

    logger.debug("Start parsing...")
    posts = yelly.process_sites(sites.split(","), count)
    logger.debug("Parsing finished, Dumping posts to disk %s", os.path.abspath(dump_folder))
    for post in posts:
        if post.title and post.body:
            try:
                logger.debug("Dumping %s", post.title)
                tools.dump_to_file(os.path.join(dump_folder, u'{}.html'.format(post.title)), post.body)
                image_path = os.path.join(dump_folder, u'{}.jpeg'.format(post.title))
                network.download_file(post.image, image_path)
                post.image = image_path
            except BaseException as e:
コード例 #35
0
class PiRelaysControl(BaseApplication):
    """
    Configuration file :
    - ./pi_relays_control.conf
    - /etc/pi_relays_control.conf
    - /etc/pi_relays_control/pi_relays_control.conf
    """
    _CONF_FILE_NAME = f'{pi_relays_control.__name__}.conf'

    def __init__(self):
        self._flask_app: Flask = None
        self._conf = RawConfigParser()
        self._db_engine = None
        self._session_factory = None
        RelaysBoard.create()
        super().__init__()

    @property
    def db_engine(self) -> Engine:
        return self._db_engine

    def init(self, parser, opts, args):
        pass

    def load_config(self):
        """
        Called by Gunicorn for configuration init
        """
        read_files = self._conf.read({
            self._CONF_FILE_NAME,
            os.path.join('/etc', self._CONF_FILE_NAME),
            os.path.join('/etc', pi_relays_control.__name__,
                         self._CONF_FILE_NAME),
        })
        if not read_files:
            print('Failed to read any configuration file!', file=sys.stderr)

        gunicorn_conf = {
            'bind':
            '{}:{}'.format(
                self._conf.get('app', 'listen', fallback='127.0.0.1'),
                self._conf.get('app', 'port', fallback=8080)),
            'workers':
            self._conf.get('api', 'workers', fallback=1),
            'threads':
            self._conf.get('api', 'threads', fallback=16),
            'worker_class':
            self._conf.get('api', 'worker_class', fallback='gthread'),
            'timeout':
            self._conf.get('api', 'timeout', fallback=30),
            'on_exit':
            self._on_exit
        }
        for key, value in gunicorn_conf.items():
            if key in self.cfg.settings and value is not None:
                self.cfg.set(key.lower(), value)

        for section in self._conf.sections():
            search = re.search(r'^relay\.(\w+)$', section)
            if search:
                relay_id = search.group(1)
                if not relay_id:
                    continue

                relay_conf_data = dict(self._conf[section])
                if not relay_conf_data['name'] or not relay_conf_data[
                        'gpio_channel']:
                    continue

                RelaysBoard.instance().add_relay(
                    Relay(id=relay_id, **relay_conf_data))

        RelaysBoard.instance().init()

        db_engine = self._conf.get('database',
                                   'engine',
                                   fallback='mysql+pymysql')
        db_host = self._conf.get('database', 'host', fallback='localhost')
        db_port = self._conf.getint('database', 'port', fallback=3306)
        db_name = self._conf.get('database', 'name')
        db_user = self._conf.get('database', 'user')
        db_password = self._conf.get('database', 'password')
        self._db_engine = sqlalchemy.create_engine(
            f'{db_engine}://{db_user}:{db_password}@{db_host}:{db_port}/{db_name}',
            echo=self._conf.getboolean('database', 'echo', fallback=False))
        init_database(self._db_engine)
        self._session_factory = sessionmaker(bind=self._db_engine)

    def load(self):
        """
        Called on app loaded, initializes WSGI application
        """
        if not self._flask_app:
            self._flask_app = Flask(pi_relays_control.__name__)
            self._flask_app.wsgi_app = ProxyFix(self._flask_app.wsgi_app)

            self._flask_app.add_url_rule('/',
                                         view_func=self.index,
                                         methods=['GET'])
            self._flask_app.add_url_rule('/on_relay_clicked/<relay_id>',
                                         view_func=self.on_relay_clicked,
                                         methods=['PUT'])
            self._flask_app.add_url_rule('/auth',
                                         view_func=self.auth,
                                         methods=['GET', 'POST'])
            self._flask_app.add_url_rule('/users',
                                         view_func=self.users,
                                         methods=['GET', 'POST'])

            self._flask_app.register_error_handler(AuthenticationException,
                                                   self._handle_auth_exception)

            self._flask_app.jinja_env.globals.update({
                'title':
                self._conf.get('app', 'title'),
                'version':
                pi_relays_control.__version__
            })

        return self._flask_app

    @staticmethod
    def _on_exit(__):
        """
        Triggered when Gunicorn app is shut down, we cleanup the relays board
        """
        RelaysBoard.instance().cleanup()

    def index(self):
        """
        Home page
        """
        user = self._check_user_auth()
        return render_template(
            'index.html',
            user=user,
            waiting_users=User.get_waiting_count(self._session_factory)
            if user.admin else None,
            relays=RelaysBoard.instance().relays.values())

    def auth(self):
        """
        Authentication page
        """
        new_auth_token = None
        auth_token = request.cookies.get('auth-token')
        user = None

        # User registration
        if request.method == 'POST':
            user = User.register(self._session_factory,
                                 request.headers.get('User-Agent'),
                                 request.remote_addr)
            new_auth_token = user.auth_token
            time.sleep(1)  # TODO enhance DDoS protection (captcha ?)

        # User loading
        elif auth_token:
            user = User.get_by_token(auth_token, self._session_factory)
            if user and user.access_granted:
                return redirect('/', code=302)

        response = make_response(render_template('auth.html', user=user))
        if new_auth_token:
            response.set_cookie('auth-token',
                                new_auth_token,
                                secure=request.scheme.lower() == 'https',
                                httponly=True)
        return response

    def users(self):
        user = self._check_user_auth()
        if not user.admin:
            return 'Forbidden', 403

        if request.method == 'POST':
            user_id = int(request.form.get('user_id'))
            if not user_id:
                return 'Unkown user id', 400

            action = request.form.get('action')
            if action == 'grant':
                User.grant_access(self._session_factory, user_id)
            elif action == 'revoke':
                User.revoke_access(self._session_factory, user_id)
            elif action == 'upgrade':
                User.upgrade(self._session_factory, user_id)
            elif action == 'downgrade':
                User.downgrade(self._session_factory, user_id)
            elif action == 'editName':
                User.set_name(self._session_factory, user_id,
                              request.form.get('user_name'))
            else:
                return 'Unknown action', 400

        session = self._session_factory()
        users = session.query(User).all()
        session.close()
        return render_template('users.html', user=user, users=users)

    def on_relay_clicked(self, relay_id):
        """
        User clicked on a relay button
        """
        session = self._session_factory()
        user = self._check_user_auth(session)
        user.last_access = datetime.now()
        session.commit()
        session.close()

        # TODO display and handle errors
        RelaysBoard.instance().trigger_relay(relay_id)
        return '', 204

    def _check_user_auth(self, session=None):
        """
        Ensures user is authenticated
        """
        auth_token = request.cookies.get('auth-token')
        if auth_token:
            kwargs = {}
            if session:
                kwargs['session'] = session
            else:
                kwargs['session_factory'] = self._session_factory

            user = User.get_by_token(auth_token, **kwargs)
            if user and user.access_granted:
                return user

        raise AuthenticationException()

    @staticmethod
    def _handle_auth_exception(__):
        return redirect('/auth', code=302)
コード例 #36
0
def ini_to_dict(fname, section):
    """Convert *section* of .ini *config* to dictionary."""
    from configparser import RawConfigParser, NoOptionError

    config = RawConfigParser()
    config.read(fname)

    conf = {}
    conf['posttroll'] = {}
    posttroll = conf['posttroll']
    posttroll['topics'] = config.get(section, 'topics').split()
    try:
        nameservers = config.get(section, 'nameservers')
        nameservers = nameservers.split()
    except (NoOptionError, ValueError):
        nameservers = None
    posttroll['nameservers'] = nameservers

    try:
        addresses = config.get(section, 'addresses')
        addresses = addresses.split()
    except (NoOptionError, ValueError):
        addresses = None
    posttroll['addresses'] = addresses

    try:
        services = config.get(section, 'services')
        services = services.split()
    except (NoOptionError, ValueError):
        services = ""
    posttroll['services'] = services

    try:
        publish_port = config.get(section, 'publish_port')
    except NoOptionError:
        publish_port = 0
    posttroll['publish_port'] = publish_port

    posttroll['publish_topic'] = config.get(section, "publish_topic")

    conf['patterns'] = {section: {}}
    patterns = conf['patterns'][section]
    patterns['pattern'] = config.get(section, 'pattern')
    patterns['critical_files'] = config.get(section, 'critical_files')
    patterns['wanted_files'] = config.get(section, 'wanted_files')
    patterns['all_files'] = config.get(section, 'all_files')
    patterns['is_critical_set'] = False
    try:
        patterns['variable_tags'] = config.get(section,
                                               'variable_tags').split(',')
    except NoOptionError:
        patterns['variable_tags'] = []

    try:
        conf['time_tolerance'] = config.getint(section, "time_tolerance")
    except NoOptionError:
        conf['time_tolerance'] = 30
    try:
        # Seconds
        conf['timeliness'] = config.getint(section, "timeliness")
    except (NoOptionError, ValueError):
        conf['timeliness'] = 1200

    try:
        conf['num_files_premature_publish'] = \
            config.getint(section, "num_files_premature_publish")
    except (NoOptionError, ValueError):
        conf['num_files_premature_publish'] = -1

    try:
        conf['group_by_minutes'] = config.getint(section, 'group_by_minutes')
    except (NoOptionError, ValueError):
        pass

    try:
        kps = config.get(section, 'keep_parsed_keys')
        conf['keep_parsed_keys'] = kps.split()
    except NoOptionError:
        pass

    try:
        conf['providing_server'] = config.get(section, "providing_server")
    except (NoOptionError, ValueError):
        conf['providing_server'] = None

    try:
        conf['time_name'] = config.get(section, "time_name")
    except (NoOptionError, ValueError):
        conf['time_name'] = 'start_time'

    try:
        conf['check_existing_files_after_start'] = config.getboolean(
            section, "check_existing_files_after_start")
    except (NoOptionError, ValueError):
        conf['check_existing_files_after_start'] = False

    return conf
コード例 #37
0
        'OPTIONS': {
            'context_processors': [
                'django.template.context_processors.debug',
                'django.template.context_processors.request',
                'django.contrib.auth.context_processors.auth',
                'django.contrib.messages.context_processors.messages',
            ],
        },
    },
]

WSGI_APPLICATION = 'negativeentropy.wsgi.application'

# Email Server Setup
EMAIL_HOST = config.get('email', 'EMAIL_HOST')
EMAIL_PORT = config.getint('email', 'EMAIL_PORT')
EMAIL_HOST_USER = config.get('email', 'EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config.get('email', 'EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = config.getboolean('email', 'EMAIL_USE_TLS')

# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if config.get('db', 'ENGINE') != 'sqlite3':
    DATABASES = {
        'default': {
            'ENGINE': 'django.db.backends.' + config.get('db', 'ENGINE'),
            'NAME': config.get('db', 'NAME'),
            'USER': config.get('db', 'USER'),
            'PASSWORD': config.get('db', 'PASSWORD'),
            'HOST': config.get('db', 'HOST'),
            'PORT': config.get('db', 'PORT')
コード例 #38
0
ファイル: eds.py プロジェクト: christiansandberg/canopen
def import_eds(source, node_id):
    eds = RawConfigParser()
    eds.optionxform = str
    if hasattr(source, "read"):
        fp = source
    else:
        fp = open(source)
    try:
        # Python 3
        eds.read_file(fp)
    except AttributeError:
        # Python 2
        eds.readfp(fp)
    fp.close()
    od = objectdictionary.ObjectDictionary()

    if eds.has_section("FileInfo"):
        od.__edsFileInfo = {
            opt: eds.get("FileInfo", opt)
            for opt in eds.options("FileInfo")
        }

    if eds.has_section("Comments"):
        linecount = int(eds.get("Comments", "Lines"), 0)
        od.comments = '\n'.join([
            eds.get("Comments", "Line%i" % line)
            for line in range(1, linecount + 1)
        ])

    if not eds.has_section("DeviceInfo"):
        logger.warn(
            "eds file does not have a DeviceInfo section. This section is mandatory"
        )
    else:
        for rate in [10, 20, 50, 125, 250, 500, 800, 1000]:
            baudPossible = int(
                eds.get("DeviceInfo", "BaudRate_%i" % rate, fallback='0'), 0)
            if baudPossible != 0:
                od.device_information.allowed_baudrates.add(rate * 1000)

        for t, eprop, odprop in [
            (str, "VendorName", "vendor_name"),
            (int, "VendorNumber", "vendor_number"),
            (str, "ProductName", "product_name"),
            (int, "ProductNumber", "product_number"),
            (int, "RevisionNumber", "revision_number"),
            (str, "OrderCode", "order_code"),
            (bool, "SimpleBootUpMaster", "simple_boot_up_master"),
            (bool, "SimpleBootUpSlave", "simple_boot_up_slave"),
            (bool, "Granularity", "granularity"),
            (bool, "DynamicChannelsSupported", "dynamic_channels_supported"),
            (bool, "GroupMessaging", "group_messaging"),
            (int, "NrOfRXPDO", "nr_of_RXPDO"),
            (int, "NrOfTXPDO", "nr_of_TXPDO"),
            (bool, "LSS_Supported", "LSS_supported"),
        ]:
            try:
                if t in (int, bool):
                    setattr(od.device_information, odprop,
                            t(int(eds.get("DeviceInfo", eprop), 0)))
                elif t is str:
                    setattr(od.device_information, odprop,
                            eds.get("DeviceInfo", eprop))
            except NoOptionError:
                pass

    if eds.has_section("DeviceComissioning"):
        od.bitrate = int(eds.get("DeviceComissioning", "BaudRate")) * 1000
        od.node_id = int(eds.get("DeviceComissioning", "NodeID"), 0)

    for section in eds.sections():
        # Match dummy definitions
        match = re.match(r"^[Dd]ummy[Uu]sage$", section)
        if match is not None:
            for i in range(1, 8):
                key = "Dummy%04d" % i
                if eds.getint(section, key) == 1:
                    var = objectdictionary.Variable(key, i, 0)
                    var.data_type = i
                    var.access_type = "const"
                    od.add_object(var)

        # Match indexes
        match = re.match(r"^[0-9A-Fa-f]{4}$", section)
        if match is not None:
            index = int(section, 16)
            name = eds.get(section, "ParameterName")
            try:
                object_type = int(eds.get(section, "ObjectType"), 0)
            except NoOptionError:
                # DS306 4.6.3.2 object description
                # If the keyword ObjectType is missing, this is regarded as
                # "ObjectType=0x7" (=VAR).
                object_type = VAR
            try:
                storage_location = eds.get(section, "StorageLocation")
            except NoOptionError:
                storage_location = None

            if object_type in (VAR, DOMAIN):
                var = build_variable(eds, section, node_id, index)
                od.add_object(var)
            elif object_type == ARR and eds.has_option(section,
                                                       "CompactSubObj"):
                arr = objectdictionary.Array(name, index)
                last_subindex = objectdictionary.Variable(
                    "Number of entries", index, 0)
                last_subindex.data_type = objectdictionary.UNSIGNED8
                arr.add_member(last_subindex)
                arr.add_member(build_variable(eds, section, node_id, index, 1))
                arr.storage_location = storage_location
                od.add_object(arr)
            elif object_type == ARR:
                arr = objectdictionary.Array(name, index)
                arr.storage_location = storage_location
                od.add_object(arr)
            elif object_type == RECORD:
                record = objectdictionary.Record(name, index)
                record.storage_location = storage_location
                od.add_object(record)

            continue

        # Match subindexes
        match = re.match(r"^([0-9A-Fa-f]{4})[S|s]ub([0-9A-Fa-f]+)$", section)
        if match is not None:
            index = int(match.group(1), 16)
            subindex = int(match.group(2), 16)
            entry = od[index]
            if isinstance(entry,
                          (objectdictionary.Record, objectdictionary.Array)):
                var = build_variable(eds, section, node_id, index, subindex)
                entry.add_member(var)

        # Match [index]Name
        match = re.match(r"^([0-9A-Fa-f]{4})Name", section)
        if match is not None:
            index = int(match.group(1), 16)
            num_of_entries = int(eds.get(section, "NrOfEntries"))
            entry = od[index]
            # For CompactSubObj index 1 is were we find the variable
            src_var = od[index][1]
            for subindex in range(1, num_of_entries + 1):
                var = copy_variable(eds, section, subindex, src_var)
                if var is not None:
                    entry.add_member(var)

    return od
コード例 #39
0
class MrxsFile(object):
    def __init__(self, filename):
        # Split filename
        dirname, ext = os.path.splitext(filename)
        if ext != '.mrxs':
            raise UnrecognizedFile

        # Parse slidedat
        self._slidedatfile = os.path.join(dirname, 'Slidedat.ini')
        self._dat = RawConfigParser()
        self._dat.optionxform = str
        try:
            with open(self._slidedatfile, 'rb') as fh:
                self._have_bom = (fh.read(len(UTF8_BOM)) == UTF8_BOM)
                if not self._have_bom:
                    fh.seek(0)
                self._dat.readfp(fh)
        except IOError:
            raise UnrecognizedFile

        # Get file paths
        self._indexfile = os.path.join(
            dirname, self._dat.get(MRXS_HIERARCHICAL, 'INDEXFILE'))
        self._datafiles = [
            os.path.join(dirname, self._dat.get('DATAFILE', 'FILE_%d' % i))
            for i in range(self._dat.getint('DATAFILE', 'FILE_COUNT'))
        ]

        # Build levels
        self._make_levels()

    def _make_levels(self):
        self._levels = {}
        self._level_list = []
        layer_count = self._dat.getint(MRXS_HIERARCHICAL, 'NONHIER_COUNT')
        for layer_id in range(layer_count):
            level_count = self._dat.getint(MRXS_HIERARCHICAL,
                                           'NONHIER_%d_COUNT' % layer_id)
            for level_id in range(level_count):
                level = MrxsNonHierLevel(self._dat, layer_id, level_id,
                                         len(self._level_list))
                self._levels[(level.layer_name, level.name)] = level
                self._level_list.append(level)

    @classmethod
    def _read_int32(cls, f):
        buf = f.read(4)
        if len(buf) != 4:
            raise IOError('Short read')
        return struct.unpack('<i', buf)[0]

    @classmethod
    def _assert_int32(cls, f, value):
        v = cls._read_int32(f)
        if v != value:
            raise ValueError('%d != %d' % (v, value))

    def _get_data_location(self, record):
        with open(self._indexfile, 'rb') as fh:
            fh.seek(MRXS_NONHIER_ROOT_OFFSET)
            # seek to record
            table_base = self._read_int32(fh)
            fh.seek(table_base + record * 4)
            # seek to list head
            list_head = self._read_int32(fh)
            fh.seek(list_head)
            # seek to data page
            self._assert_int32(fh, 0)
            page = self._read_int32(fh)
            fh.seek(page)
            # check pagesize
            self._assert_int32(fh, 1)
            # read rest of prologue
            self._read_int32(fh)
            self._assert_int32(fh, 0)
            self._assert_int32(fh, 0)
            # read values
            position = self._read_int32(fh)
            size = self._read_int32(fh)
            fileno = self._read_int32(fh)
            return (self._datafiles[fileno], position, size)

    def _zero_record(self, record):
        path, offset, length = self._get_data_location(record)
        with open(path, 'r+b') as fh:
            fh.seek(0, 2)
            do_truncate = (fh.tell() == offset + length)
            if DEBUG:
                if do_truncate:
                    print('Truncating', path, 'to', offset)
                else:
                    print('Zeroing', path, 'at', offset, 'for', length)
            fh.seek(offset)
            buf = fh.read(len(JPEG_SOI))
            if buf != JPEG_SOI:
                raise IOError('Unexpected data in nonhier image')
            if do_truncate:
                fh.truncate(offset)
            else:
                fh.seek(offset)
                fh.write('\0' * length)

    def _delete_index_record(self, record):
        if DEBUG:
            print('Deleting record', record)
        with open(self._indexfile, 'r+b') as fh:
            entries_to_move = len(self._level_list) - record - 1
            if entries_to_move == 0:
                return
            # get base of table
            fh.seek(MRXS_NONHIER_ROOT_OFFSET)
            table_base = self._read_int32(fh)
            # read tail of table
            fh.seek(table_base + (record + 1) * 4)
            buf = fh.read(entries_to_move * 4)
            if len(buf) != entries_to_move * 4:
                raise IOError('Short read')
            # overwrite the target record
            fh.seek(table_base + record * 4)
            fh.write(buf)

    def _hier_keys_for_level(self, level):
        ret = []
        for k, _ in self._dat.items(MRXS_HIERARCHICAL):
            if k == level.key_prefix or k.startswith(level.key_prefix + '_'):
                ret.append(k)
        return ret

    def _rename_section(self, old, new):
        if self._dat.has_section(old):
            if DEBUG:
                print('[%s] -> [%s]' % (old, new))
            self._dat.add_section(new)
            for k, v in self._dat.items(old):
                self._dat.set(new, k, v)
            self._dat.remove_section(old)
        elif DEBUG:
            print('[%s] does not exist' % old)

    def _delete_section(self, section):
        if DEBUG:
            print('Deleting [%s]' % section)
        self._dat.remove_section(section)

    def _set_key(self, section, key, value):
        if DEBUG:
            prev = self._dat.get(section, key)
            print('[%s] %s: %s -> %s' % (section, key, prev, value))
        self._dat.set(section, key, value)

    def _rename_key(self, section, old, new):
        if DEBUG:
            print('[%s] %s -> %s' % (section, old, new))
        v = self._dat.get(section, old)
        self._dat.remove_option(section, old)
        self._dat.set(section, new, v)

    def _delete_key(self, section, key):
        if DEBUG:
            print('Deleting [%s] %s' % (section, key))
        self._dat.remove_option(section, key)

    def _write(self):
        buf = io.StringIO()
        self._dat.write(buf)
        with open(self._slidedatfile, 'wb') as fh:
            if self._have_bom:
                fh.write(bytearray(UTF8_BOM))
            fh.write(bytearray(buf.getvalue().replace('\n', '\r\n')))

    def delete_level(self, layer_name, level_name):
        level = self._levels[(layer_name, level_name)]
        record = level.record

        # Zero image data
        self._zero_record(record)

        # Delete pointer from nonhier table in index
        self._delete_index_record(record)

        # Remove slidedat keys
        for k in self._hier_keys_for_level(level):
            self._delete_key(MRXS_HIERARCHICAL, k)

        # Remove slidedat section
        self._delete_section(level.section)

        # Rename section and keys for subsequent levels in the layer
        prev_level = level
        for cur_level in self._level_list[record + 1:]:
            if cur_level.layer_id != prev_level.layer_id:
                break
            for k in self._hier_keys_for_level(cur_level):
                new_k = k.replace(cur_level.key_prefix, prev_level.key_prefix,
                                  1)
                self._rename_key(MRXS_HIERARCHICAL, k, new_k)
            self._set_key(MRXS_HIERARCHICAL, prev_level.section_key,
                          prev_level.section)
            self._rename_section(cur_level.section, prev_level.section)
            prev_level = cur_level

        # Update level count within layer
        count_k = 'NONHIER_%d_COUNT' % level.layer_id
        count_v = self._dat.getint(MRXS_HIERARCHICAL, count_k)
        self._set_key(MRXS_HIERARCHICAL, count_k, count_v - 1)

        # Write slidedat
        self._write()

        # Refresh metadata
        self._make_levels()
コード例 #40
0
ファイル: cache.py プロジェクト: AaronWebster/attic
class Cache(object):
  """Client Side cache"""

  class RepositoryReplay(Error):
    """Cache is newer than repository, refusing to continue"""

  class CacheInitAbortedError(Error):
    """Cache initialization aborted"""

  class RepositoryAccessAborted(Error):
    """Repository access aborted"""

  class EncryptionMethodMismatch(Error):
    """Repository encryption method changed since last acccess, refusing to continue"""

  def __init__(self,
               repository,
               key,
               manifest,
               path=None,
               sync=True,
               warn_if_unencrypted=True):
    self.lock = None
    self.timestamp = None
    self.txn_active = False
    self.repository = repository
    self.key = key
    self.manifest = manifest
    self.path = path or os.path.join(get_cache_dir(),
                                     hexlify(repository.id).decode('ascii'))
    # Warn user before sending data to a never seen before unencrypted repository
    if not os.path.exists(self.path):
      if warn_if_unencrypted and isinstance(key, PlaintextKey):
        if not self._confirm(
            'Warning: Attempting to access a previously unknown unencrypted repository',
            'ATTIC_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK'):
          raise self.CacheInitAbortedError()
      self.create()
    self.open()
    # Warn user before sending data to a relocated repository
    if self.previous_location and self.previous_location != repository._location.canonical_path(
    ):
      msg = ('Warning: The repository at location {} was previously located at '
             '{}').format(
          repository._location.canonical_path(), self.previous_location)
      if not self._confirm(msg, 'ATTIC_RELOCATED_REPO_ACCESS_IS_OK'):
        raise self.RepositoryAccessAborted()

    if sync and self.manifest.id != self.manifest_id:
      # If repository is older than the cache something fishy is going on
      if self.timestamp and self.timestamp > manifest.timestamp:
        raise self.RepositoryReplay()
      # Make sure an encrypted repository has not been swapped for an unencrypted repository
      if self.key_type is not None and self.key_type != str(key.TYPE):
        raise self.EncryptionMethodMismatch()
      self.sync()
      self.commit()

  def __del__(self):
    self.close()

  def _confirm(self, message, env_var_override=None):
    print(message, file=sys.stderr)
    if env_var_override and os.environ.get(env_var_override):
      print('Yes (From {})'.format(env_var_override))
      return True
    if not sys.stdin.isatty():
      return False
    try:
      answer = input('Do you want to continue? [yN] ')
    except EOFError:
      return False
    return answer and answer in 'Yy'

  def create(self):
    """Create a new empty cache at `path`"""
    os.makedirs(self.path)
    with open(os.path.join(self.path, 'README'), 'w') as fd:
      fd.write('This is an Attic cache')
    config = RawConfigParser()
    config.add_section('cache')
    config.set('cache', 'version', '1')
    config.set('cache', 'repository',
               hexlify(self.repository.id).decode('ascii'))
    config.set('cache', 'manifest', '')
    with open(os.path.join(self.path, 'config'), 'w') as fd:
      config.write(fd)
    ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
    with open(os.path.join(self.path, 'files'), 'w') as fd:
      pass  # empty file

  def _do_open(self):
    self.config = RawConfigParser()
    self.config.read(os.path.join(self.path, 'config'))
    if self.config.getint('cache', 'version') != 1:
      raise Exception('%s Does not look like an Attic cache')
    self.id = self.config.get('cache', 'repository')
    self.manifest_id = unhexlify(self.config.get('cache', 'manifest'))
    self.timestamp = self.config.get('cache', 'timestamp', fallback=None)
    self.key_type = self.config.get('cache', 'key_type', fallback=None)
    self.previous_location = self.config.get(
        'cache', 'previous_location', fallback=None)
    self.chunks = ChunkIndex.read(
        os.path.join(self.path, 'chunks').encode('utf-8'))
    self.files = None

  def open(self):
    if not os.path.isdir(self.path):
      raise Exception('%s Does not look like an Attic cache' % self.path)
    self.lock = UpgradableLock(
        os.path.join(self.path, 'config'), exclusive=True)
    self.rollback()

  def close(self):
    if self.lock:
      self.lock.release()
      self.lock = None

  def _read_files(self):
    self.files = {}
    self._newest_mtime = 0
    with open(os.path.join(self.path, 'files'), 'rb') as fd:
      u = msgpack.Unpacker(use_list=True)
      while True:
        data = fd.read(64 * 1024)
        if not data:
          break
        u.feed(data)
        for path_hash, item in u:
          item[0] += 1
          self.files[path_hash] = msgpack.packb(item)

  def begin_txn(self):
    # Initialize transaction snapshot
    txn_dir = os.path.join(self.path, 'txn.tmp')
    os.mkdir(txn_dir)
    shutil.copy(os.path.join(self.path, 'config'), txn_dir)
    shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
    shutil.copy(os.path.join(self.path, 'files'), txn_dir)
    os.rename(
        os.path.join(self.path, 'txn.tmp'),
        os.path.join(self.path, 'txn.active'))
    self.txn_active = True

  def commit(self):
    """Commit transaction"""
    if not self.txn_active:
      return
    if self.files is not None:
      with open(os.path.join(self.path, 'files'), 'wb') as fd:
        for path_hash, item in self.files.items():
          # Discard cached files with the newest mtime to avoid
          # issues with filesystem snapshots and mtime precision
          item = msgpack.unpackb(item)
          if item[0] < 10 and bigint_to_int(item[3]) < self._newest_mtime:
            msgpack.pack((path_hash, item), fd)
    self.config.set('cache', 'manifest',
                    hexlify(self.manifest.id).decode('ascii'))
    self.config.set('cache', 'timestamp', self.manifest.timestamp)
    self.config.set('cache', 'key_type', str(self.key.TYPE))
    self.config.set('cache', 'previous_location',
                    self.repository._location.canonical_path())
    with open(os.path.join(self.path, 'config'), 'w') as fd:
      self.config.write(fd)
    self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
    os.rename(
        os.path.join(self.path, 'txn.active'),
        os.path.join(self.path, 'txn.tmp'))
    shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
    self.txn_active = False

  def rollback(self):
    """Roll back partial and aborted transactions"""
    # Remove partial transaction
    if os.path.exists(os.path.join(self.path, 'txn.tmp')):
      shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
    # Roll back active transaction
    txn_dir = os.path.join(self.path, 'txn.active')
    if os.path.exists(txn_dir):
      shutil.copy(os.path.join(txn_dir, 'config'), self.path)
      shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
      shutil.copy(os.path.join(txn_dir, 'files'), self.path)
      os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
      if os.path.exists(os.path.join(self.path, 'txn.tmp')):
        shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
    self.txn_active = False
    self._do_open()

  def sync(self):
    """Initializes cache by fetching and reading all archive indicies"""

    def add(id, size, csize):
      try:
        count, size, csize = self.chunks[id]
        self.chunks[id] = count + 1, size, csize
      except KeyError:
        self.chunks[id] = 1, size, csize

    self.begin_txn()
    print('Initializing cache...')
    self.chunks.clear()
    unpacker = msgpack.Unpacker()
    repository = cache_if_remote(self.repository)
    for name, info in self.manifest.archives.items():
      archive_id = info[b'id']
      cdata = repository.get(archive_id)
      data = self.key.decrypt(archive_id, cdata)
      add(archive_id, len(data), len(cdata))
      archive = msgpack.unpackb(data)
      if archive[b'version'] != 1:
        raise Exception('Unknown archive metadata version')
      decode_dict(archive, (b'name',))
      print('Analyzing archive:', archive[b'name'])
      for key, chunk in zip(archive[b'items'],
                            repository.get_many(archive[b'items'])):
        data = self.key.decrypt(key, chunk)
        add(key, len(data), len(chunk))
        unpacker.feed(data)
        for item in unpacker:
          if b'chunks' in item:
            for chunk_id, size, csize in item[b'chunks']:
              add(chunk_id, size, csize)

  def add_chunk(self, id, data, stats):
    if not self.txn_active:
      self.begin_txn()
    if self.seen_chunk(id):
      return self.chunk_incref(id, stats)
    size = len(data)
    data = self.key.encrypt(data)
    csize = len(data)
    self.repository.put(id, data, wait=False)
    self.chunks[id] = (1, size, csize)
    stats.update(size, csize, True)
    return id, size, csize

  def seen_chunk(self, id):
    return self.chunks.get(id, (0, 0, 0))[0]

  def chunk_incref(self, id, stats):
    if not self.txn_active:
      self.begin_txn()
    count, size, csize = self.chunks[id]
    self.chunks[id] = (count + 1, size, csize)
    stats.update(size, csize, False)
    return id, size, csize

  def chunk_decref(self, id, stats):
    if not self.txn_active:
      self.begin_txn()
    count, size, csize = self.chunks[id]
    if count == 1:
      del self.chunks[id]
      self.repository.delete(id, wait=False)
      stats.update(-size, -csize, True)
    else:
      self.chunks[id] = (count - 1, size, csize)
      stats.update(-size, -csize, False)

  def file_known_and_unchanged(self, path_hash, st):
    if self.files is None:
      self._read_files()
    entry = self.files.get(path_hash)
    if not entry:
      return None
    entry = msgpack.unpackb(entry)
    if entry[2] == st.st_size and bigint_to_int(
        entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino:
      # reset entry age
      entry[0] = 0
      self.files[path_hash] = msgpack.packb(entry)
      return entry[4]
    else:
      return None

  def memorize_file(self, path_hash, st, ids):
    # Entry: Age, inode, size, mtime, chunk ids
    mtime_ns = st_mtime_ns(st)
    self.files[path_hash] = msgpack.packb(
        (0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids))
    self._newest_mtime = max(self._newest_mtime, mtime_ns)
コード例 #41
0
ファイル: config.py プロジェクト: bgol/EDMarketConnector
class Config(object):

    OUT_MKT_EDDN = 1
    # OUT_MKT_BPC     = 2	# No longer supported
    OUT_MKT_TD = 4
    OUT_MKT_CSV = 8
    OUT_SHIP = 16
    # OUT_SHIP_EDS    = 16	# Replaced by OUT_SHIP
    # OUT_SYS_FILE    = 32	# No longer supported
    # OUT_STAT        = 64	# No longer available
    # OUT_SHIP_CORIOLIS = 128	# Replaced by OUT_SHIP
    OUT_STATION_ANY = OUT_MKT_EDDN | OUT_MKT_TD | OUT_MKT_CSV
    # OUT_SYS_EDSM      = 256	# Now a plugin
    # OUT_SYS_AUTO    = 512	# Now always automatic
    OUT_MKT_MANUAL = 1024
    OUT_SYS_EDDN = 2048
    OUT_SYS_DELAY = 4096

    if platform == 'darwin':

        def __init__(self):
            self.app_dir = join(
                NSSearchPathForDirectoriesInDomains(
                    NSApplicationSupportDirectory, NSUserDomainMask, True)[0],
                appname)
            if not isdir(self.app_dir):
                mkdir(self.app_dir)

            self.plugin_dir = join(self.app_dir, 'plugins')
            if not isdir(self.plugin_dir):
                mkdir(self.plugin_dir)

            self.internal_plugin_dir = getattr(
                sys, 'frozen', False) and normpath(
                    join(dirname(sys.executable), pardir, 'Library',
                         'plugins')) or join(dirname(__file__), 'plugins')

            self.default_journal_dir = join(
                NSSearchPathForDirectoriesInDomains(
                    NSApplicationSupportDirectory, NSUserDomainMask, True)[0],
                'Frontier Developments', 'Elite Dangerous')

            self.home = expanduser('~')

            self.respath = getattr(sys, 'frozen', False) and normpath(
                join(dirname(sys.executable), pardir,
                     'Resources')) or dirname(__file__)

            if not getattr(sys, 'frozen', False):
                # Don't use Python's settings if interactive
                self.identifier = 'uk.org.marginal.%s' % appname.lower()
                NSBundle.mainBundle().infoDictionary(
                )['CFBundleIdentifier'] = self.identifier
            else:
                self.identifier = NSBundle.mainBundle().bundleIdentifier()
            self.defaults = NSUserDefaults.standardUserDefaults()
            self.settings = dict(
                self.defaults.persistentDomainForName_(self.identifier)
                or {})  # make writeable

            # Check out_dir exists
            if not self.get('outdir') or not isdir(self.get('outdir')):
                self.set(
                    'outdir',
                    NSSearchPathForDirectoriesInDomains(
                        NSDocumentDirectory, NSUserDomainMask, True)[0])

        def get(self, key):
            val = self.settings.get(key)
            if val is None:
                return None
            elif isinstance(val, str):
                return str(val)
            elif hasattr(val, '__iter__'):
                return list(val)  # make writeable
            else:
                return None

        def getint(self, key):
            try:
                return int(self.settings.get(
                    key, 0))  # should already be int, but check by casting
            except:
                return 0

        def set(self, key, val):
            self.settings[key] = val

        def delete(self, key):
            self.settings.pop(key, None)

        def save(self):
            self.defaults.setPersistentDomain_forName_(self.settings,
                                                       self.identifier)
            self.defaults.synchronize()

        def close(self):
            self.save()
            self.defaults = None

    elif platform == 'win32':

        def __init__(self):

            self.app_dir = join(KnownFolderPath(FOLDERID_LocalAppData),
                                appname)
            if not isdir(self.app_dir):
                mkdir(self.app_dir)

            self.plugin_dir = join(self.app_dir, 'plugins')
            if not isdir(self.plugin_dir):
                mkdir(self.plugin_dir)

            self.internal_plugin_dir = join(
                dirname(
                    getattr(sys, 'frozen', False) and sys.executable
                    or __file__), u'plugins')

            # expanduser in Python 2 on Windows doesn't handle non-ASCII - http://bugs.python.org/issue13207
            self.home = KnownFolderPath(FOLDERID_Profile) or u'\\'

            journaldir = KnownFolderPath(FOLDERID_SavedGames)
            self.default_journal_dir = journaldir and join(
                journaldir, 'Frontier Developments', 'Elite Dangerous') or None

            self.respath = dirname(
                getattr(sys, 'frozen', False) and sys.executable or __file__)

            self.identifier = applongname

            self.hkey = HKEY()
            disposition = DWORD()
            if RegCreateKeyEx(HKEY_CURRENT_USER,
                              r'Software\Marginal\EDMarketConnector', 0, None,
                              0, KEY_ALL_ACCESS, None, ctypes.byref(self.hkey),
                              ctypes.byref(disposition)):
                raise Exception()

            # set WinSparkle defaults - https://github.com/vslavik/winsparkle/wiki/Registry-Settings
            edcdhkey = HKEY()
            if RegCreateKeyEx(HKEY_CURRENT_USER,
                              r'Software\EDCD\EDMarketConnector', 0, None, 0,
                              KEY_ALL_ACCESS, None, ctypes.byref(edcdhkey),
                              ctypes.byref(disposition)):
                raise Exception()

            sparklekey = HKEY()
            if not RegCreateKeyEx(
                    edcdhkey, 'WinSparkle', 0, None, 0, KEY_ALL_ACCESS, None,
                    ctypes.byref(sparklekey), ctypes.byref(disposition)):
                if disposition.value == REG_CREATED_NEW_KEY:
                    buf = ctypes.create_unicode_buffer('1')
                    RegSetValueEx(sparklekey, 'CheckForUpdates', 0, 1, buf,
                                  len(buf) * 2)
                buf = ctypes.create_unicode_buffer(str(update_interval))
                RegSetValueEx(sparklekey, 'UpdateInterval', 0, 1, buf,
                              len(buf) * 2)
                RegCloseKey(sparklekey)

            if not self.get('outdir') or not isdir(self.get('outdir')):
                self.set('outdir',
                         KnownFolderPath(FOLDERID_Documents) or self.home)

        def get(self, key):
            typ = DWORD()
            size = DWORD()
            if RegQueryValueEx(self.hkey, key, 0, ctypes.byref(typ), None,
                               ctypes.byref(size)) or typ.value not in [
                                   REG_SZ, REG_MULTI_SZ
                               ]:
                return None
            buf = ctypes.create_unicode_buffer(int(size.value / 2))
            if RegQueryValueEx(self.hkey, key, 0, ctypes.byref(typ), buf,
                               ctypes.byref(size)):
                return None
            elif typ.value == REG_MULTI_SZ:
                return [
                    x for x in ctypes.wstring_at(buf,
                                                 len(buf) - 2).split(u'\x00')
                ]
            else:
                return str(buf.value)

        def getint(self, key):
            typ = DWORD()
            size = DWORD(4)
            val = DWORD()
            if RegQueryValueEx(self.hkey, key, 0, ctypes.byref(typ),
                               ctypes.byref(val),
                               ctypes.byref(size)) or typ.value != REG_DWORD:
                return 0
            else:
                return val.value

        def set(self, key, val):
            if isinstance(val, str):
                buf = ctypes.create_unicode_buffer(val)
                RegSetValueEx(self.hkey, key, 0, REG_SZ, buf, len(buf) * 2)
            elif isinstance(val, numbers.Integral):
                RegSetValueEx(self.hkey, key, 0, REG_DWORD,
                              ctypes.byref(DWORD(val)), 4)
            elif hasattr(val, '__iter__'):  # iterable
                stringval = u'\x00'.join(
                    [str(x) or u' ' for x in val] +
                    [u''])  # null terminated non-empty strings
                buf = ctypes.create_unicode_buffer(stringval)
                RegSetValueEx(self.hkey, key, 0, REG_MULTI_SZ, buf,
                              len(buf) * 2)
            else:
                raise NotImplementedError()

        def delete(self, key):
            RegDeleteValue(self.hkey, key)

        def save(self):
            pass  # Redundant since registry keys are written immediately

        def close(self):
            RegCloseKey(self.hkey)
            self.hkey = None

    elif platform == 'linux':

        SECTION = 'config'

        def __init__(self):

            # http://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
            self.app_dir = join(
                getenv('XDG_DATA_HOME', expanduser('~/.local/share')), appname)
            if not isdir(self.app_dir):
                makedirs(self.app_dir)

            self.plugin_dir = join(self.app_dir, 'plugins')
            if not isdir(self.plugin_dir):
                mkdir(self.plugin_dir)

            self.internal_plugin_dir = join(dirname(__file__), 'plugins')

            self.default_journal_dir = None

            self.home = expanduser('~')

            self.respath = dirname(__file__)

            self.identifier = 'uk.org.marginal.%s' % appname.lower()

            self.filename = join(
                getenv('XDG_CONFIG_HOME', expanduser('~/.config')), appname,
                '%s.ini' % appname)
            if not isdir(dirname(self.filename)):
                makedirs(dirname(self.filename))

            self.config = RawConfigParser(comment_prefixes=('#', ))
            try:
                with codecs.open(self.filename, 'r') as h:
                    self.config.read_file(h)
            except:
                self.config.add_section(self.SECTION)

            if not self.get('outdir') or not isdir(self.get('outdir')):
                self.set('outdir', expanduser('~'))

        def get(self, key):
            try:
                val = self.config.get(self.SECTION, key)
                if u'\n' in val:  # list
                    # ConfigParser drops the last entry if blank, so we add a spurious ';' entry in set() and remove it here
                    assert val.split('\n')[-1] == ';', val.split('\n')
                    return [self._unescape(x) for x in val.split(u'\n')[:-1]]
                else:
                    return self._unescape(val)
            except:
                return None

        def getint(self, key):
            try:
                return self.config.getint(self.SECTION, key)
            except:
                return 0

        def set(self, key, val):
            if isinstance(val, bool):
                self.config.set(self.SECTION, key, val and '1' or '0')
            elif isinstance(val, str) or isinstance(val, numbers.Integral):
                self.config.set(self.SECTION, key, self._escape(val))
            elif hasattr(val, '__iter__'):  # iterable
                self.config.set(
                    self.SECTION, key,
                    u'\n'.join([self._escape(x) for x in val] + [u';']))
            else:
                raise NotImplementedError()

        def delete(self, key):
            self.config.remove_option(self.SECTION, key)

        def save(self):
            with codecs.open(self.filename, 'w', 'utf-8') as h:
                self.config.write(h)

        def close(self):
            self.save()
            self.config = None

        def _escape(self, val):
            return str(val).replace(u'\\', u'\\\\').replace(u'\n',
                                                            u'\\n').replace(
                                                                u';', u'\\;')

        def _unescape(self, val):
            chars = list(val)
            i = 0
            while i < len(chars):
                if chars[i] == '\\':
                    chars.pop(i)
                    if chars[i] == 'n':
                        chars[i] = '\n'
                i += 1
            return u''.join(chars)

    else:  # ???

        def __init__(self):
            raise NotImplementedError('Implement me')

    # Common

    def get_password(self, account):
        warnings.warn("password subsystem is no longer supported",
                      DeprecationWarning)

    def set_password(self, account, password):
        warnings.warn("password subsystem is no longer supported",
                      DeprecationWarning)

    def delete_password(self, account):
        warnings.warn("password subsystem is no longer supported",
                      DeprecationWarning)
コード例 #42
0
ファイル: repository.py プロジェクト: swipswaps/attic
class Repository(object):
    """Filesystem based transactional key value store

    On disk layout:
    dir/README
    dir/config
    dir/data/<X / SEGMENTS_PER_DIR>/<X>
    dir/index.X
    dir/hints.X
    """
    DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
    DEFAULT_SEGMENTS_PER_DIR = 10000

    class DoesNotExist(Error):
        """Repository {} does not exist"""

    class AlreadyExists(Error):
        """Repository {} already exists"""

    class InvalidRepository(Error):
        """{} is not a valid repository"""

    class CheckNeeded(Error):
        '''Inconsistency detected. Please run "attic check {}"'''

    def __init__(self, path, create=False):
        self.path = path
        self.io = None
        self.lock = None
        self.index = None
        self._active_txn = False
        if create:
            self.create(path)
        self.open(path)

    def __del__(self):
        self.close()

    def create(self, path):
        """Create a new empty repository at `path`
        """
        if os.path.exists(path) and (not os.path.isdir(path)
                                     or os.listdir(path)):
            raise self.AlreadyExists(path)
        if not os.path.exists(path):
            os.mkdir(path)
        with open(os.path.join(path, 'README'), 'w') as fd:
            fd.write('This is an Attic repository\n')
        os.mkdir(os.path.join(path, 'data'))
        config = RawConfigParser()
        config.add_section('repository')
        config.set('repository', 'version', '1')
        config.set('repository', 'segments_per_dir',
                   self.DEFAULT_SEGMENTS_PER_DIR)
        config.set('repository', 'max_segment_size',
                   self.DEFAULT_MAX_SEGMENT_SIZE)
        config.set('repository', 'id', hexlify(os.urandom(32)).decode('ascii'))
        with open(os.path.join(path, 'config'), 'w') as fd:
            config.write(fd)

    def get_index_transaction_id(self):
        indicies = sorted(
            (int(name[6:]) for name in os.listdir(self.path)
             if name.startswith('index.') and name[6:].isdigit()))
        if indicies:
            return indicies[-1]
        else:
            return None

    def get_transaction_id(self):
        index_transaction_id = self.get_index_transaction_id()
        segments_transaction_id = self.io.get_segments_transaction_id()
        if index_transaction_id is not None and segments_transaction_id is None:
            raise self.CheckNeeded(self.path)
        # Attempt to automatically rebuild index if we crashed between commit
        # tag write and index save
        if index_transaction_id != segments_transaction_id:
            if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
                replay_from = None
            else:
                replay_from = index_transaction_id
            self.replay_segments(replay_from, segments_transaction_id)
        return self.get_index_transaction_id()

    def open(self, path):
        self.path = path
        if not os.path.isdir(path):
            raise self.DoesNotExist(path)
        self.config = RawConfigParser()
        self.config.read(os.path.join(self.path, 'config'))
        if not 'repository' in self.config.sections() or self.config.getint(
                'repository', 'version') != 1:
            raise self.InvalidRepository(path)
        self.lock = UpgradableLock(os.path.join(path, 'config'))
        self.max_segment_size = self.config.getint('repository',
                                                   'max_segment_size')
        self.segments_per_dir = self.config.getint('repository',
                                                   'segments_per_dir')
        self.id = unhexlify(self.config.get('repository', 'id').strip())
        self.io = LoggedIO(self.path, self.max_segment_size,
                           self.segments_per_dir)

    def close(self):
        if self.lock:
            if self.io:
                self.io.close()
            self.io = None
            self.lock.release()
            self.lock = None

    def commit(self):
        """Commit transaction
        """
        self.io.write_commit()
        self.compact_segments()
        self.write_index()
        self.rollback()

    def get_read_only_index(self, transaction_id):
        if transaction_id is None:
            return {}
        return NSIndex((os.path.join(self.path, 'index.%d') %
                        transaction_id).encode('utf-8'),
                       readonly=True)

    def get_index(self, transaction_id, do_cleanup=True):
        self._active_txn = True
        self.lock.upgrade()
        if transaction_id is None:
            self.index = NSIndex.create(
                os.path.join(self.path, 'index.tmp').encode('utf-8'))
            self.segments = {}
            self.compact = set()
        else:
            if do_cleanup:
                self.io.cleanup(transaction_id)
            shutil.copy(os.path.join(self.path, 'index.%d' % transaction_id),
                        os.path.join(self.path, 'index.tmp'))
            self.index = NSIndex(
                os.path.join(self.path, 'index.tmp').encode('utf-8'))
            hints = read_msgpack(
                os.path.join(self.path, 'hints.%d' % transaction_id))
            if hints[b'version'] != 1:
                raise ValueError('Unknown hints file version: %d' %
                                 hints['version'])
            self.segments = hints[b'segments']
            self.compact = set(hints[b'compact'])

    def write_index(self):
        hints = {
            b'version': 1,
            b'segments': self.segments,
            b'compact': list(self.compact)
        }
        transaction_id = self.io.get_segments_transaction_id()
        write_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id),
                      hints)
        self.index.flush()
        os.rename(os.path.join(self.path, 'index.tmp'),
                  os.path.join(self.path, 'index.%d' % transaction_id))
        # Remove old indices
        current = '.%d' % transaction_id
        for name in os.listdir(self.path):
            if not name.startswith('index.') and not name.startswith('hints.'):
                continue
            if name.endswith(current):
                continue
            os.unlink(os.path.join(self.path, name))
        self.index = None

    def compact_segments(self):
        """Compact sparse segments by copying data into new segments
        """
        if not self.compact:
            return
        index_transaction_id = self.get_index_transaction_id()
        segments = self.segments
        for segment in sorted(self.compact):
            if self.io.segment_exists(segment):
                for tag, key, offset, data in self.io.iter_objects(
                        segment, include_data=True):
                    if tag == TAG_PUT and self.index.get(
                            key, (-1, -1)) == (segment, offset):
                        new_segment, offset = self.io.write_put(key, data)
                        self.index[key] = new_segment, offset
                        segments.setdefault(new_segment, 0)
                        segments[new_segment] += 1
                        segments[segment] -= 1
                    elif tag == TAG_DELETE:
                        if index_transaction_id is None or segment > index_transaction_id:
                            self.io.write_delete(key)
                assert segments[segment] == 0

        self.io.write_commit()
        for segment in sorted(self.compact):
            assert self.segments.pop(segment) == 0
            self.io.delete_segment(segment)
        self.compact = set()

    def replay_segments(self, index_transaction_id, segments_transaction_id):
        self.get_index(index_transaction_id, do_cleanup=False)
        for segment, filename in self.io.segment_iterator():
            if index_transaction_id is not None and segment <= index_transaction_id:
                continue
            if segment > segments_transaction_id:
                break
            self.segments[segment] = 0
            for tag, key, offset in self.io.iter_objects(segment):
                if tag == TAG_PUT:
                    try:
                        s, _ = self.index[key]
                        self.compact.add(s)
                        self.segments[s] -= 1
                    except KeyError:
                        pass
                    self.index[key] = segment, offset
                    self.segments[segment] += 1
                elif tag == TAG_DELETE:
                    try:
                        s, _ = self.index.pop(key)
                        self.segments[s] -= 1
                        self.compact.add(s)
                    except KeyError:
                        pass
                    self.compact.add(segment)
                elif tag == TAG_COMMIT:
                    continue
                else:
                    raise self.CheckNeeded(self.path)
            if self.segments[segment] == 0:
                self.compact.add(segment)
        self.write_index()
        self.rollback()

    def check(self, repair=False):
        """Check repository consistency

        This method verifies all segment checksums and makes sure
        the index is consistent with the data stored in the segments.
        """
        error_found = False

        def report_error(msg):
            nonlocal error_found
            error_found = True
            print(msg, file=sys.stderr)

        assert not self._active_txn
        try:
            transaction_id = self.get_transaction_id()
            current_index = self.get_read_only_index(transaction_id)
        except Exception:
            transaction_id = self.io.get_segments_transaction_id()
            current_index = None
        if transaction_id is None:
            transaction_id = self.get_index_transaction_id()
        if transaction_id is None:
            transaction_id = self.io.get_latest_segment()
        if repair:
            self.io.cleanup(transaction_id)
        segments_transaction_id = self.io.get_segments_transaction_id()
        self.get_index(None)
        for segment, filename in self.io.segment_iterator():
            if segment > transaction_id:
                continue
            try:
                objects = list(self.io.iter_objects(segment))
            except (IntegrityError, struct.error):
                report_error('Error reading segment {}'.format(segment))
                objects = []
                if repair:
                    self.io.recover_segment(segment, filename)
                    objects = list(self.io.iter_objects(segment))
            self.segments[segment] = 0
            for tag, key, offset in objects:
                if tag == TAG_PUT:
                    try:
                        s, _ = self.index[key]
                        self.compact.add(s)
                        self.segments[s] -= 1
                    except KeyError:
                        pass
                    self.index[key] = segment, offset
                    self.segments[segment] += 1
                elif tag == TAG_DELETE:
                    try:
                        s, _ = self.index.pop(key)
                        self.segments[s] -= 1
                        self.compact.add(s)
                    except KeyError:
                        pass
                    self.compact.add(segment)
                elif tag == TAG_COMMIT:
                    continue
                else:
                    report_error('Unexpected tag {} in segment {}'.format(
                        tag, segment))
        # We might need to add a commit tag if no committed segment is found
        if repair and segments_transaction_id is None:
            report_error(
                'Adding commit tag to segment {}'.format(transaction_id))
            self.io.segment = transaction_id + 1
            self.io.write_commit()
            self.io.close_segment()
        if current_index and not repair:
            if len(current_index) != len(self.index):
                report_error('Index object count mismatch. {} != {}'.format(
                    len(current_index), len(self.index)))
            elif current_index:
                for key, value in self.index.iteritems():
                    if current_index.get(key, (-1, -1)) != value:
                        report_error(
                            'Index mismatch for key {}. {} != {}'.format(
                                key, value, current_index.get(key, (-1, -1))))
        if repair:
            self.compact_segments()
            self.write_index()
        else:
            os.unlink(os.path.join(self.path, 'index.tmp'))
        self.rollback()
        return not error_found or repair

    def rollback(self):
        """
        """
        self.index = None
        self._active_txn = False

    def __len__(self):
        if not self.index:
            self.index = self.get_read_only_index(self.get_transaction_id())
        return len(self.index)

    def list(self, limit=None, marker=None):
        if not self.index:
            self.index = self.get_read_only_index(self.get_transaction_id())
        return [
            id_
            for id_, _ in islice(self.index.iteritems(marker=marker), limit)
        ]

    def get(self, id_):
        if not self.index:
            self.index = self.get_read_only_index(self.get_transaction_id())
        try:
            segment, offset = self.index[id_]
            return self.io.read(segment, offset, id_)
        except KeyError:
            raise self.DoesNotExist(self.path)

    def get_many(self, ids, is_preloaded=False):
        for id_ in ids:
            yield self.get(id_)

    def put(self, id, data, wait=True):
        if not self._active_txn:
            self.get_index(self.get_transaction_id())
        try:
            segment, _ = self.index[id]
            self.segments[segment] -= 1
            self.compact.add(segment)
            segment = self.io.write_delete(id)
            self.segments.setdefault(segment, 0)
            self.compact.add(segment)
        except KeyError:
            pass
        segment, offset = self.io.write_put(id, data)
        self.segments.setdefault(segment, 0)
        self.segments[segment] += 1
        self.index[id] = segment, offset

    def delete(self, id, wait=True):
        if not self._active_txn:
            self.get_index(self.get_transaction_id())
        try:
            segment, offset = self.index.pop(id)
        except KeyError:
            raise self.DoesNotExist(self.path)
        self.segments[segment] -= 1
        self.compact.add(segment)
        segment = self.io.write_delete(id)
        self.compact.add(segment)
        self.segments.setdefault(segment, 0)

    def preload(self, ids):
        """Preload objects (only applies to remote repositories
コード例 #43
0
        "mean" : config.getfloat('Bubbles', 'small_bubble.mean'),
        "deviation" : config.getfloat('Bubbles', 'small_bubble.deviation'),
        "stroke" : small_bubble_stroke,
        "fill" : small_bubble_fill
    }
    large_bubble_stroke = config.get('Bubbles', 'large_bubble.stroke')
    large_bubble_fill = config.get('Bubbles', 'large_bubble.fill')
    large_bubble = {
        "mean" : config.getfloat('Bubbles', 'large_bubble.mean'),
        "deviation" : config.getfloat('Bubbles', 'large_bubble.deviation'),
        "stroke" : large_bubble_stroke,
        "fill" : large_bubble_fill
    }
    min_bubble_size = config.getfloat('Bubbles', 'min_size')
    max_bubble_size = config.getfloat('Bubbles', 'max_size')
    max_large_bubbles = config.getfloat('Bubbles', 'max_large_bubbles')
    max_small_bubbles = config.getfloat('Bubbles', 'max_small_bubbles')
    timeout = config.getint('Bubbles', 'timeout')
    coaster_center = (coaster_radius, coaster_radius)
    drawing_size = (2*coaster_radius*mm,2*coaster_radius*mm)

    outfile.write('<?xml version="1.0"?>')
    outfile.write('<svg width="%s" height="%s" version="%s">\n' % (drawing_size[0], drawing_size[1],svg_version))

    main()

    outfile.write('</svg>\n')
    outfile.close()
    sys.stdout.write("File written to %s\n" % filename)
    os.system("xdg-open %s &" % filename)
コード例 #44
0
#!/usr/bin/env python3

import time
import os

from configparser import RawConfigParser
from influxdb import InfluxDBClient
from pymodbus.client.sync import ModbusSerialClient as ModbusClient

from growatt import Growatt

settings = RawConfigParser()
settings.read(os.path.dirname(os.path.realpath(__file__)) + '/solarmon.cfg')

interval = settings.getint('query', 'interval', fallback=1)
offline_interval = settings.getint('query', 'offline_interval', fallback=60)
error_interval = settings.getint('query', 'error_interval', fallback=60)

db_name = settings.get('influx', 'db_name', fallback='inverter')
measurement = settings.get('influx', 'measurement', fallback='inverter')

# Clients
print('Setup InfluxDB Client... ', end='')
influx = InfluxDBClient(host=settings.get('influx',
                                          'host',
                                          fallback='localhost'),
                        port=settings.getint('influx', 'port', fallback=8086),
                        username=settings.get('influx',
                                              'username',
                                              fallback=None),
                        password=settings.get('influx',
コード例 #45
0
ファイル: regexbot.py プロジェクト: cequencer/ircbots
try:
	config.readfp(open(argv[1]))
except:
	try:
		config.readfp(open('regexbot.ini'))
	except Exception:
		print "Syntax:"
		print "  %s [config]" % argv[0]
		print ""
		print "If no configuration file is specified or there was an error, it will default to `regexbot.ini'."
		print "If there was a failure reading the configuration, it will display this message."
		exit(1)

# read config
SERVER = config.get('regexbot', 'server')
PORT = config.getint('regexbot', 'port')
IPV6 = config.getboolean('regexbot', 'ipv6')
NICK = str(config.get('regexbot', 'nick'))
CHANNELS = str(config.get('regexbot', 'channels')).split()
VERSION = str(config.get('regexbot', 'version')) + '; %s'
try: VERSION = VERSION % Popen(["git","branch","-v","--contains"], stdout=PIPE).communicate()[0].strip()
except: VERSION = VERSION % 'unknown'
del Popen, PIPE
TRANSLATE_ENABLED = config.getboolean('regexbot','translate_enabled')
RECONNECT_TO_SERVER = config.getboolean('regexbot', 'reconnect_to_server')
FORCE_ENDING_SLASH = config.getboolean('regexbot', 'force_ending_slash')

CHANNEL_FLOOD_COOLDOWN = timedelta(seconds=config.getint('regexbot', 'channel_flood_cooldown'))
GLOBAL_FLOOD_COOLDOWN = timedelta(seconds=config.getint('regexbot', 'global_flood_cooldown'))
MAX_MESSAGES = config.getint('regexbot', 'max_messages')
MAX_MESSAGE_SIZE = config.getint('regexbot', 'max_message_size')
コード例 #46
0
ファイル: enums.py プロジェクト: ArslanRafique/dist-packages
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

try:
    from configparser import NoSectionError, RawConfigParser
except ImportError:
    # Python 2
    from ConfigParser import NoSectionError, RawConfigParser
from oneconf.paths import ONECONF_OVERRIDE_FILE

config = RawConfigParser()
try:
    config.read(ONECONF_OVERRIDE_FILE)
    MIN_TIME_WITHOUT_ACTIVITY = config.getint(
        'TestSuite', 'MIN_TIME_WITHOUT_ACTIVITY')
except NoSectionError:
    MIN_TIME_WITHOUT_ACTIVITY = 60 * 5

ONECONF_SERVICE_NAME = "com.ubuntu.OneConf"
コード例 #47
0
ファイル: app.py プロジェクト: stemid/picam
    return videos


@route('/static/<path:path>')
def server_static(path):
    return static_file(path, root=config.get('picam', 'static_dir'))


@route('/')
def picam_index():
    video_list = get_video_list(config.get('picam', 'video_dir'))
    picam_data = {
        'stream_url': config.get('picam', 'stream_url'),
        'control_url': config.get('picam', 'control_url')
    }
    return template(
        'index',
        picam_config=picam_data,
        videos=video_list
    )


if __name__ == '__main__':
    run(
        host=config.get('picam', 'listen_host'),
        port=config.getint('picam', 'listen_port')
    )
    debug(config.getbool('picam', 'debug'))
else:
    application = default_app
コード例 #48
0
class Repository:
    """Filesystem based transactional key value store

    On disk layout:
    dir/README
    dir/config
    dir/data/<X / SEGMENTS_PER_DIR>/<X>
    dir/index.X
    dir/hints.X
    """
    DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
    DEFAULT_SEGMENTS_PER_DIR = 10000

    class DoesNotExist(Error):
        """Repository {} does not exist."""

    class AlreadyExists(Error):
        """Repository {} already exists."""

    class InvalidRepository(Error):
        """{} is not a valid repository."""

    class CheckNeeded(Error):
        """Inconsistency detected. Please run "borg check {}"."""

    class ObjectNotFound(Error):
        """Object with key {} not found in repository {}."""

    def __init__(self, path, create=False, exclusive=False):
        self.path = os.path.abspath(path)
        self.io = None
        self.lock = None
        self.index = None
        self._active_txn = False
        if create:
            self.create(self.path)
        self.open(self.path, exclusive)

    def __del__(self):
        self.close()

    def __repr__(self):
        return '<%s %s>' % (self.__class__.__name__, self.path)

    def create(self, path):
        """Create a new empty repository at `path`
        """
        if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
            raise self.AlreadyExists(path)
        if not os.path.exists(path):
            os.mkdir(path)
        with open(os.path.join(path, 'README'), 'w') as fd:
            fd.write('This is a Borg repository\n')
        os.mkdir(os.path.join(path, 'data'))
        config = RawConfigParser()
        config.add_section('repository')
        config.set('repository', 'version', '1')
        config.set('repository', 'segments_per_dir', self.DEFAULT_SEGMENTS_PER_DIR)
        config.set('repository', 'max_segment_size', self.DEFAULT_MAX_SEGMENT_SIZE)
        config.set('repository', 'id', hexlify(os.urandom(32)).decode('ascii'))
        self.save_config(path, config)

    def save_config(self, path, config):
        config_path = os.path.join(path, 'config')
        with open(config_path, 'w') as fd:
            config.write(fd)

    def save_key(self, keydata):
        assert self.config
        keydata = keydata.decode('utf-8')  # remote repo: msgpack issue #99, getting bytes
        self.config.set('repository', 'key', keydata)
        self.save_config(self.path, self.config)

    def load_key(self):
        keydata = self.config.get('repository', 'key')
        return keydata.encode('utf-8')  # remote repo: msgpack issue #99, returning bytes

    def destroy(self):
        """Destroy the repository at `self.path`
        """
        self.close()
        os.remove(os.path.join(self.path, 'config'))  # kill config first
        shutil.rmtree(self.path)

    def get_index_transaction_id(self):
        indices = sorted((int(name[6:]) for name in os.listdir(self.path) if name.startswith('index.') and name[6:].isdigit()))
        if indices:
            return indices[-1]
        else:
            return None

    def get_transaction_id(self):
        index_transaction_id = self.get_index_transaction_id()
        segments_transaction_id = self.io.get_segments_transaction_id()
        if index_transaction_id is not None and segments_transaction_id is None:
            raise self.CheckNeeded(self.path)
        # Attempt to automatically rebuild index if we crashed between commit
        # tag write and index save
        if index_transaction_id != segments_transaction_id:
            if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
                replay_from = None
            else:
                replay_from = index_transaction_id
            self.replay_segments(replay_from, segments_transaction_id)
        return self.get_index_transaction_id()

    def open(self, path, exclusive):
        self.path = path
        if not os.path.isdir(path):
            raise self.DoesNotExist(path)
        self.lock = UpgradableLock(os.path.join(path, 'lock'), exclusive).acquire()
        self.config = RawConfigParser()
        self.config.read(os.path.join(self.path, 'config'))
        if 'repository' not in self.config.sections() or self.config.getint('repository', 'version') != 1:
            raise self.InvalidRepository(path)
        self.max_segment_size = self.config.getint('repository', 'max_segment_size')
        self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
        self.id = unhexlify(self.config.get('repository', 'id').strip())
        self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)

    def close(self):
        if self.lock:
            if self.io:
                self.io.close()
            self.io = None
            self.lock.release()
            self.lock = None

    def commit(self):
        """Commit transaction
        """
        self.io.write_commit()
        self.compact_segments()
        self.write_index()
        self.rollback()

    def open_index(self, transaction_id):
        if transaction_id is None:
            return NSIndex()
        return NSIndex.read((os.path.join(self.path, 'index.%d') % transaction_id).encode('utf-8'))

    def prepare_txn(self, transaction_id, do_cleanup=True):
        self._active_txn = True
        try:
            self.lock.upgrade()
        except UpgradableLock.ExclusiveLockFailed:
            # if upgrading the lock to exclusive fails, we do not have an
            # active transaction. this is important for "serve" mode, where
            # the repository instance lives on - even if exceptions happened.
            self._active_txn = False
            raise
        if not self.index:
            self.index = self.open_index(transaction_id)
        if transaction_id is None:
            self.segments = {}
            self.compact = set()
        else:
            if do_cleanup:
                self.io.cleanup(transaction_id)
            hints = read_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id))
            if hints[b'version'] != 1:
                raise ValueError('Unknown hints file version: %d' % hints['version'])
            self.segments = hints[b'segments']
            self.compact = set(hints[b'compact'])

    def write_index(self):
        hints = {b'version': 1,
                 b'segments': self.segments,
                 b'compact': list(self.compact)}
        transaction_id = self.io.get_segments_transaction_id()
        write_msgpack(os.path.join(self.path, 'hints.%d' % transaction_id), hints)
        self.index.write(os.path.join(self.path, 'index.tmp'))
        os.rename(os.path.join(self.path, 'index.tmp'),
                  os.path.join(self.path, 'index.%d' % transaction_id))
        # Remove old indices
        current = '.%d' % transaction_id
        for name in os.listdir(self.path):
            if not name.startswith('index.') and not name.startswith('hints.'):
                continue
            if name.endswith(current):
                continue
            os.unlink(os.path.join(self.path, name))
        self.index = None

    def compact_segments(self):
        """Compact sparse segments by copying data into new segments
        """
        if not self.compact:
            return
        index_transaction_id = self.get_index_transaction_id()
        segments = self.segments
        for segment in sorted(self.compact):
            if self.io.segment_exists(segment):
                for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
                    if tag == TAG_PUT and self.index.get(key, (-1, -1)) == (segment, offset):
                        new_segment, offset = self.io.write_put(key, data)
                        self.index[key] = new_segment, offset
                        segments.setdefault(new_segment, 0)
                        segments[new_segment] += 1
                        segments[segment] -= 1
                    elif tag == TAG_DELETE:
                        if index_transaction_id is None or segment > index_transaction_id:
                            self.io.write_delete(key)
                assert segments[segment] == 0

        self.io.write_commit()
        for segment in sorted(self.compact):
            assert self.segments.pop(segment) == 0
            self.io.delete_segment(segment)
        self.compact = set()

    def replay_segments(self, index_transaction_id, segments_transaction_id):
        self.prepare_txn(index_transaction_id, do_cleanup=False)
        for segment, filename in self.io.segment_iterator():
            if index_transaction_id is not None and segment <= index_transaction_id:
                continue
            if segment > segments_transaction_id:
                break
            self.segments[segment] = 0
            for tag, key, offset in self.io.iter_objects(segment):
                if tag == TAG_PUT:
                    try:
                        s, _ = self.index[key]
                        self.compact.add(s)
                        self.segments[s] -= 1
                    except KeyError:
                        pass
                    self.index[key] = segment, offset
                    self.segments[segment] += 1
                elif tag == TAG_DELETE:
                    try:
                        s, _ = self.index.pop(key)
                        self.segments[s] -= 1
                        self.compact.add(s)
                    except KeyError:
                        pass
                    self.compact.add(segment)
                elif tag == TAG_COMMIT:
                    continue
                else:
                    raise self.CheckNeeded(self.path)
            if self.segments[segment] == 0:
                self.compact.add(segment)
        self.write_index()
        self.rollback()

    def check(self, repair=False):
        """Check repository consistency

        This method verifies all segment checksums and makes sure
        the index is consistent with the data stored in the segments.
        """
        error_found = False

        def report_error(msg):
            nonlocal error_found
            error_found = True
            print(msg, file=sys.stderr)

        assert not self._active_txn
        try:
            transaction_id = self.get_transaction_id()
            current_index = self.open_index(transaction_id)
        except Exception:
            transaction_id = self.io.get_segments_transaction_id()
            current_index = None
        if transaction_id is None:
            transaction_id = self.get_index_transaction_id()
        if transaction_id is None:
            transaction_id = self.io.get_latest_segment()
        if repair:
            self.io.cleanup(transaction_id)
        segments_transaction_id = self.io.get_segments_transaction_id()
        self.prepare_txn(None)
        for segment, filename in self.io.segment_iterator():
            if segment > transaction_id:
                continue
            try:
                objects = list(self.io.iter_objects(segment))
            except IntegrityError as err:
                report_error(str(err))
                objects = []
                if repair:
                    self.io.recover_segment(segment, filename)
                    objects = list(self.io.iter_objects(segment))
            self.segments[segment] = 0
            for tag, key, offset in objects:
                if tag == TAG_PUT:
                    try:
                        s, _ = self.index[key]
                        self.compact.add(s)
                        self.segments[s] -= 1
                    except KeyError:
                        pass
                    self.index[key] = segment, offset
                    self.segments[segment] += 1
                elif tag == TAG_DELETE:
                    try:
                        s, _ = self.index.pop(key)
                        self.segments[s] -= 1
                        self.compact.add(s)
                    except KeyError:
                        pass
                    self.compact.add(segment)
                elif tag == TAG_COMMIT:
                    continue
                else:
                    report_error('Unexpected tag {} in segment {}'.format(tag, segment))
        # We might need to add a commit tag if no committed segment is found
        if repair and segments_transaction_id is None:
            report_error('Adding commit tag to segment {}'.format(transaction_id))
            self.io.segment = transaction_id + 1
            self.io.write_commit()
        if current_index and not repair:
            if len(current_index) != len(self.index):
                report_error('Index object count mismatch. {} != {}'.format(len(current_index), len(self.index)))
            elif current_index:
                for key, value in self.index.iteritems():
                    if current_index.get(key, (-1, -1)) != value:
                        report_error('Index mismatch for key {}. {} != {}'.format(key, value, current_index.get(key, (-1, -1))))
        if repair:
            self.compact_segments()
            self.write_index()
        self.rollback()
        return not error_found or repair

    def rollback(self):
        """
        """
        self.index = None
        self._active_txn = False

    def __len__(self):
        if not self.index:
            self.index = self.open_index(self.get_transaction_id())
        return len(self.index)

    def __contains__(self, id):
        if not self.index:
            self.index = self.open_index(self.get_transaction_id())
        return id in self.index

    def list(self, limit=None, marker=None):
        if not self.index:
            self.index = self.open_index(self.get_transaction_id())
        return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]

    def get(self, id_):
        if not self.index:
            self.index = self.open_index(self.get_transaction_id())
        try:
            segment, offset = self.index[id_]
            return self.io.read(segment, offset, id_)
        except KeyError:
            raise self.ObjectNotFound(id_, self.path)

    def get_many(self, ids, is_preloaded=False):
        for id_ in ids:
            yield self.get(id_)

    def put(self, id, data, wait=True):
        if not self._active_txn:
            self.prepare_txn(self.get_transaction_id())
        try:
            segment, _ = self.index[id]
            self.segments[segment] -= 1
            self.compact.add(segment)
            segment = self.io.write_delete(id)
            self.segments.setdefault(segment, 0)
            self.compact.add(segment)
        except KeyError:
            pass
        segment, offset = self.io.write_put(id, data)
        self.segments.setdefault(segment, 0)
        self.segments[segment] += 1
        self.index[id] = segment, offset

    def delete(self, id, wait=True):
        if not self._active_txn:
            self.prepare_txn(self.get_transaction_id())
        try:
            segment, offset = self.index.pop(id)
        except KeyError:
            raise self.ObjectNotFound(id, self.path)
        self.segments[segment] -= 1
        self.compact.add(segment)
        segment = self.io.write_delete(id)
        self.compact.add(segment)
        self.segments.setdefault(segment, 0)

    def preload(self, ids):
        """Preload objects (only applies to remote repositories)
コード例 #49
0
ファイル: crawl.py プロジェクト: samularity/dochparasoup
                        default=open(path.join(_file_path, 'config.ini'), 'r'),
                        help='a file path to the config ini',
                        dest="config_file")
args = arg_parser.parse_args()


## configuration
config = RawConfigParser()
config.read(path.join(_file_path, 'config.defaults.ini'))
try:
    config.read_file(args.config_file)  # py3
except AttributeError:
    config.readfp(args.config_file)     # py2
args.config_file.close()

nps_port = config.getint("General", "Port")
nps_bindip = config.get("General", "IP")
min_cache_imgs = config.getint("Cache", "Images")
min_cache_imgs_before_refill = config.getint("Cache", "Images_min_limit")
user_agent = config.get("General", "Useragent")
logverbosity = config.get("Logging", "Verbosity")
logger = logging.getLogger(config.get("Logging", "Log_name"))
hdlr = logging.FileHandler(config.get("Logging", "File"))
hdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(hdlr)
logger.setLevel(logverbosity.upper())

call_flush_timeout = 10  # value in seconds
call_flush_last = time.time() - call_flush_timeout

call_reset_timeout = 10  # value in seconds
コード例 #50
0
ファイル: GPIOSim.py プロジェクト: ekapujiw2002/GPIOSim
    def updateUI(self):
        self.canvas.delete("all")

        c = RawConfigParser()
        c.read(self.WORK_FILE)
            

        x = self.START_X

        y = self.START_Y

        for i in range(0,40):
            state = c.getint("pin"+str(i),"state")
            value = c.getint("pin"+str(i),"value")
            ident = 2*state+value

            self.currState[i] = state
            self.currValue[i]  = value

            e_x = x + self.PIN_SIZE
            e_y = y + self.PIN_SIZE

            self.canvas.create_oval(x, y, e_x, e_y, outline="black", fill=self.PIN_COLORS[ident], width=2, tags='pin'+str(i))

            self.canvas.tag_bind('pin'+str(i),'<Button>', self.click_cb(i) )

        

            if i%2==0: #LEFT COLUMN GPIOS
                self.canvas.create_window(x-70, y+10, window=Label(self.canvas, text=self.GPIO_NAMES[i], fg=self.TEXT_COLOR, bg= self.BG_COLOR)) 

                if ident==2:   #IN_LOW
                    self.canvas.create_window(x - 20, y+8, window=Label(self.canvas, image=self.phInLeft, bd=0))
                    #freccia e cliccabile(?)
                elif ident==3: #IN_HIGH
                    self.canvas.create_window(x - 20, y+8, window=Label(self.canvas, image=self.phInLeft, bd=0))
                    #freccia e cliccabile(?)
                elif state==self.STATE_GPIO_OUT: #OUT
                    self.canvas.create_window(x - 20, y+8, window=Label(self.canvas, image=self.phOutLeft, bd=0))



                x = e_x + self.PIN_DISTANCE
            else: #RIGHT COLUMN GPIOS
                self.canvas.create_window(e_x + 70, y+10, window=Label(self.canvas, text=self.GPIO_NAMES[i], fg=self.TEXT_COLOR, bg= self.BG_COLOR)) 

                

                if ident==2:   #IN_LOW
                    self.canvas.create_window(e_x + 22, y+8, window=Label(self.canvas, image=self.phInRight, bd=0))
                    #freccia e cliccabile(?)
                elif ident==3: #IN_HIGH
                    self.canvas.create_window(e_x + 22, y+8, window=Label(self.canvas, image=self.phInRight, bd=0))
                    #freccia e cliccabile(?)
                elif state==self.STATE_GPIO_OUT: #OUT
                    self.canvas.create_window(e_x + 22, y+8, window=Label(self.canvas, image=self.phOutRight, bd=0))

                


                y = e_y + self.PIN_DISTANCE
                x = self.START_X
        
        
        self.canvas.pack(fill=BOTH, expand=1)
コード例 #51
0
ファイル: config.py プロジェクト: Perdu/poezio
 def getint(self, option, section=DEFSECTION):
     """
     get a value and returns it as an int
     """
     return RawConfigParser.getint(self, section, option)
コード例 #52
0
ファイル: app.py プロジェクト: stemid/bottle-boilerplate
#        return str(uuid)
#
#    return regexp, to_python, to_url

# End of custom filters


config = RawConfigParser()
config.readfp(open('./app.cfg'))


# Serve static files
@route('/static/<path:path>')
def server_static(path):
    return static_file(path, root=config.get('app', 'static_dir'))


@route('/')
def index():
    return template('index')


if __name__ == '__main__':
    run(
        host=config.get('app', 'listen_host'),
        port=config.getint('app', 'listen_port')
    )
    debug(config.getbool('app', 'debug'))
else:
    application = default_app()
コード例 #53
0
ファイル: settings.py プロジェクト: synthead/pidrator
USE_TZ = True

# FIXME: Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!

TEMPLATE_DEBUG = True
# FIXME
# ALLOWED_HOSTS = config.get("django", "allowed_hosts").replace(" ", "").split(
#     ",")

# Pidrator settings.

PIDRATOR_TESTING_WITHOUT_HARDWARE = config.getboolean(
    "pidrator", "testing_without_hardware")
PIDRATOR_SENSOR_UPDATE_FREQUENCY = config.getint("pidrator",
                                                 "sensor_update_frequency")

# Celery settings.

# Hack to make celery run in debug mode.
import sys
if "/usr/bin/celery" in sys.argv:
    DEBUG = False
else:
    DEBUG = True

CELERYBEAT_SCHEDULE = {
    "UpdateEnabledSensors": {
        "task": "pidrator.hardware_controller.UpdateEnabledSensors",
        # FIXME: Race condition if this is 1 second.
        "schedule": timedelta(seconds=PIDRATOR_SENSOR_UPDATE_FREQUENCY)
コード例 #54
0
    def from_config(cls, config: RawConfigParser):
        """
        Creates a TextImportDescriptor from a ConfigParser.RawConfigParser
        by parsing its content
        """
        def getvalue(section, option, type=str):
            if config.has_option(section, option):
                return type(config.get(section, option))
            else:
                return None

        def config_getlist(section, option):
            if config.has_option(section, option):
                return ast.literal_eval(config.get(section, option))
            else:
                return []

        sections = config.sections()
        if not sections:
            raise IOError('Empty config file')

        # Get the data which shall be checked
        project = getvalue(sections[0], 'project')
        timezone = getvalue(sections[0], 'timezone')

        # Check integrity of the data
        with db.session_scope() as session:

            if project:
                rows = session.query(db.Project)\
                    .filter(db.Project.id == project).count()
                if rows != 1:
                    raise ValueError(
                        'Error in import description: \'%s\' is no'
                        ' valid project identifier' % project)

        if timezone:
            # Search in pytz's "set" cause of the set is faster than the
            # list
            if timezone not in common_timezones_set:
                raise ValueError('Error in import description: \'%s\' is no'
                                 ' valid timezone' % timezone)

        # Create a new TextImportDescriptor from config file
        tid = cls(instrument=config.getint(sections[0],
                                           'instrument',
                                           fallback=0),
                  skiplines=config.getint(sections[0], 'skiplines',
                                          fallback=0),
                  skipfooter=config.getint(sections[0],
                                           'skipfooter',
                                           fallback=0),
                  delimiter=getvalue(sections[0], 'delimiter'),
                  decimalpoint=getvalue(sections[0], 'decimalpoint'),
                  dateformat=getvalue(sections[0], 'dateformat'),
                  datecolumns=config_getlist(sections[0], 'datecolumns'),
                  project=getvalue(sections[0], 'project'),
                  timezone=getvalue(sections[0], 'timezone'),
                  nodata=config_getlist(sections[0], 'nodata'),
                  worksheet=getvalue(sections[0], 'worksheet', int),
                  samplecolumn=getvalue(sections[0], 'samplecolumn', int),
                  encoding=getvalue(sections[0], 'encoding'),
                  sample_mapping=config_getdict(config, sections[0],
                                                'sample_mapping'))

        tid.name = sections[0]
        for section in sections[1:]:
            tid.columns.append(ImportColumn.from_config(config, section))
        return tid
コード例 #55
0
ファイル: endtoend_test.py プロジェクト: gryphius/fuglu
class DKIMTestCase(unittest.TestCase):

    """DKIM Sig Test"""

    FUGLU_HOST = "127.0.0.1"
    FUGLU_PORT = 7731
    DUMMY_PORT = 7732
    FUGLUCONTROL_PORT = 7733

    def setUp(self):

        k = ''
        for line in open(TESTDATADIR + '/dkim/testfuglu.org.public'):
            if line.startswith('---'):
                continue
            k = k + line.strip()
        record = "v=DKIM1; k=rsa; p=%s" % k
        fuglu.lib.patcheddkimlib.dnstxt = mock.Mock(return_value=record)

        self.config = RawConfigParser()
        self.config.read([TESTDATADIR + '/endtoendtest.conf'])
        self.config.set('main', 'incomingport', str(DKIMTestCase.FUGLU_PORT))
        self.config.set('main', 'outgoinghost', str(DKIMTestCase.FUGLU_HOST))
        self.config.set('main', 'outgoingport', str(DKIMTestCase.DUMMY_PORT))
        self.config.set(
            'main', 'controlport', str(DKIMTestCase.FUGLUCONTROL_PORT))
        guess_clamav_socket(self.config)

        # init core
        self.mc = MainController(self.config)

        # start listening smtp dummy server to get fuglus answer
        self.smtp = DummySMTPServer(self.config, self.config.getint(
            'main', 'outgoingport'), DKIMTestCase.FUGLU_HOST)
        dkdss = threading.Thread(target = self.smtp.serve, args = ())
        dkdss.daemon = True
        dkdss.start()

        # start fuglu's listening server
        fls = threading.Thread(target = self.mc.startup, args = ())
        fls.daemon = True
        fls.start()

    def tearDown(self):
        self.mc.shutdown()
        self.smtp.shutdown()

    def testDKIM(self):
        # give fuglu time to start listener
        time.sleep(1)
        inputfile = TESTDATADIR + '/helloworld.eml'
        msgstring = open(inputfile, 'r').read()

        dkimheader = sign(msgstring, 'whatever', 'testfuglu.org', open(
            TESTDATADIR + '/dkim/testfuglu.org.private').read(), include_headers=['From', 'To'])
        signedcontent = dkimheader + msgstring
        logbuffer = StringIO()
        self.assertTrue(verify(signedcontent, debuglog=logbuffer),
                        "Failed DKIM verification immediately after signing %s" % logbuffer.getvalue())

        # send test message
        try:
            smtpclient = smtplib.SMTP('127.0.0.1', DKIMTestCase.FUGLU_PORT)
        except Exception as e:
            self.fail("Could not connect to fuglu on port %s : %s" %
                      (DKIMTestCase.FUGLU_PORT, str(e)))
        # smtpServer.set_debuglevel(1)
        smtpclient.helo('test.dkim')

        smtpclient.sendmail(
            '*****@*****.**', '*****@*****.**', signedcontent)

        smtpclient.quit()

        # verify the smtp server stored the file correctly
        tmpfile = self.smtp.tempfilename
        self.assertTrue(tmpfile != None, 'Send to dummy smtp server failed')

        result = open(tmpfile, 'r').read()
        logbuffer = StringIO()
        verify_ok = verify(result, debuglog=logbuffer)
        self.assertTrue(
            verify_ok, "Failed DKIM verification: %s" % logbuffer.getvalue())
コード例 #56
0
def readConfigFile(debug=False):
    """Read the config file ews2org.cnf"""

    config = RawConfigParser({
        'username': '',
        'email_address': '',
        'password': '',
        'timezone': '',
        'days_past': 7,
        'days_future': 30,
        'max_items': 100,
        'output_file': 'myCalendar.org',
        'calendar_name': 'My calendar',
        'orgmode_labels': ':Org:ews2cal:',  # Org mode labels
        'orgmode_status_future':
        '',  # Default Org mode status future, e.g. TODO
        'orgmode_status_current':
        '',  # Default Org mode status current, e.g. PROGRESS
        'orgmode_status_past': '',  # Default Org mode status past, e.g. DONE
        'orgmode_status_cancel':
        '',  # Default Org mode status cancel, e.g. CANCELLED
        'orgmode_priority_default':
        '',  # Default Org mode priority default, e.g. [#B] or empty
        'orgmode_priority_high':
        '',  # Default Org mode priority high, e.g. [#A]
        'orgmode_priority_low': '',  # Default Org mode priority low, e.g. [#C]
    })
    dir = os.path.dirname(os.path.realpath(__file__))
    config.read(os.path.join(dir, 'ews2org.cfg'))

    server = config.get('ews2org', 'server')
    username = config.get('ews2org', 'username')
    emailAddress = config.get('ews2org', 'email_address')
    password = config.get('ews2org', 'password')
    timezone = config.get('ews2org', 'timezone')
    daysPast = config.getint('ews2org', 'days_past')
    daysFuture = config.getint('ews2org', 'days_future')
    maxItems = config.getint('ews2org', 'max_items')
    outFileName = config.get('ews2org', 'output_file')
    calName = config.get('ews2org', 'calendar_name')
    orgLabels = config.get('ews2org', 'orgmode_labels')

    orgStatusFuture = config.get('ews2org', 'orgmode_status_future')
    orgStatusCurrent = config.get('ews2org', 'orgmode_status_current')
    orgStatusPast = config.get('ews2org', 'orgmode_status_past')
    orgStatusCancel = config.get('ews2org', 'orgmode_status_cancel')

    orgPrioDefault = config.get('ews2org', 'orgmode_priority_default')
    orgPrioHigh = config.get('ews2org', 'orgmode_priority_high')
    orgPrioLow = config.get('ews2org', 'orgmode_priority_low')

    # Check settings:
    if (debug):
        print("server:      ", server)
        print("username:    "******"email:       ", emailAddress)
        print("password:    "******"timezone:    ", timezone)
        print()

        print("daysPast:    ", daysPast)
        print("daysFuture:  ", daysFuture)
        print("maxItems:    ", maxItems)

        print()
        print("outFileName: ", outFileName)
        print("calName:     ", calName)
        print("orgLabels:   ", orgLabels)
        print()

        print("orgStatusFuture:   ", orgStatusFuture)
        print("orgStatusCurrent:  ", orgStatusCurrent)
        print("orgStatusPast:     ", orgStatusPast)
        print("orgStatusCancel:   ", orgStatusCancel)
        print()

        print("orgPrioDefault:   ", orgPrioDefault)
        print("orgPrioHigh:      ", orgPrioHigh)
        print("orgPrioLow:       ", orgPrioLow)
        print()


    return server, username, emailAddress, password, timezone, daysPast, daysFuture, \
        maxItems, outFileName, calName, orgLabels, \
        orgStatusFuture, orgStatusCurrent, orgStatusPast, orgStatusCancel, \
        orgPrioDefault, orgPrioHigh, orgPrioLow
コード例 #57
0
ファイル: settings.py プロジェクト: powerswitch/TIMA
DEBUG = config.getboolean('debug','DEBUG')
TEMPLATE_DEBUG = config.getboolean('debug','TEMPLATE_DEBUG')


ALLOWED_HOSTS = config.get('host','ALLOWED_HOSTS').split()
ADMINS = tuple(config.items('admins'))


# Email settings

EMAIL_USE_TLS = config.getboolean('host_email','EMAIL_USE_TLS')
EMAIL_USE_SSL = config.getboolean('host_email','EMAIL_USE_SSL')
DEFAULT_FROM_EMAIL = config.get('host_email','DEFAULT_FROM_EMAIL')
SERVER_EMAIL = config.get('host_email','SERVER_EMAIL')
EMAIL_HOST = config.get('host_email','EMAIL_HOST')
EMAIL_PORT = config.getint('host_email','EMAIL_PORT')
EMAIL_HOST_USER = config.get('host_email','EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config.get('host_email','EMAIL_HOST_PASSWORD')
EMAIL_SUBJECT_PREFIX = '[TIMA] '


# Application definition

INSTALLED_APPS = (
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
    'app',
コード例 #58
0
from configparser import RawConfigParser

# ------------------------------GLOBALS---------------------------------------------------------------
pygame.init()
rcp = RawConfigParser()
rcp.read('config.ini')
windowx, windowy = (int(
    k.strip()) for k in (rcp.get('GENERAL', 'window_size')[1:-1].split(',')))
caption = rcp.get('GENERAL', 'name')
scorebar_width = 50
screenx, screeny = windowx, windowy - scorebar_width
win = pygame.display.set_mode((windowx, windowy))
background_image = pygame.image.load('player_assets/background.jpg')
background_image = pygame.transform.scale(background_image, (windowx, windowy))
pygame.display.set_caption(caption)
FPS = rcp.getint('GENERAL', 'FPS')
fpsclock = pygame.time.Clock()
divisions = 5
width_safe = screeny / (3 * divisions + 1)
width_division = 3 * width_safe
width_water = width_division - width_safe
obstacles_positions = list()
obstacles = pygame.sprite.Group()
safe_areas = pygame.sprite.Group()
MAX_OBSTACLES_IN_ANY_LEVEL = 15
MAX_MIN_OBSTACLES_IN_ANY_LEVEL = 4
MAX_OBSTACLES = 2
MIN_OBSTACLES = 0
font_name = rcp.get('FONT', 'font')
small_font = pygame.font.Font(font_name, scorebar_width // 2)
big_font = pygame.font.Font(font_name, windowy // 10)
コード例 #59
0
ファイル: cache.py プロジェクト: bjenkins100/attic
class Cache(object):
    """Client Side cache
    """
    class RepositoryReplay(Error):
        """Cache is newer than repository, refusing to continue"""

    class CacheInitAbortedError(Error):
        """Cache initialization aborted"""


    class EncryptionMethodMismatch(Error):
        """Repository encryption method changed since last acccess, refusing to continue
        """

    def __init__(self, repository, key, manifest, path=None, sync=True, warn_if_unencrypted=True):
        self.lock = None
        self.timestamp = None
        self.txn_active = False
        self.repository = repository
        self.key = key
        self.manifest = manifest
        self.path = path or os.path.join(get_cache_dir(), hexlify(repository.id).decode('ascii'))
        if not os.path.exists(self.path):
            if warn_if_unencrypted and isinstance(key, PlaintextKey):
                if 'ATTIC_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK' not in os.environ:
                    print("""Warning: Attempting to access a previously unknown unencrypted repository\n""", file=sys.stderr)
                    answer = input('Do you want to continue? [yN] ')
                    if not (answer and answer in 'Yy'):
                        raise self.CacheInitAbortedError()
            self.create()
        self.open()
        if sync and self.manifest.id != self.manifest_id:
            # If repository is older than the cache something fishy is going on
            if self.timestamp and self.timestamp > manifest.timestamp:
                raise self.RepositoryReplay()
            # Make sure an encrypted repository has not been swapped for an unencrypted repository
            if self.key_type is not None and self.key_type != str(key.TYPE):
                raise self.EncryptionMethodMismatch()
            self.sync()
            self.commit()

    def __del__(self):
        self.close()

    def create(self):
        """Create a new empty cache at `path`
        """
        os.makedirs(self.path)
        with open(os.path.join(self.path, 'README'), 'w') as fd:
            fd.write('This is an Attic cache')
        config = RawConfigParser()
        config.add_section('cache')
        config.set('cache', 'version', '1')
        config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii'))
        config.set('cache', 'manifest', '')
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            config.write(fd)
        ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
        with open(os.path.join(self.path, 'files'), 'w') as fd:
            pass  # empty file

    def open(self):
        if not os.path.isdir(self.path):
            raise Exception('%s Does not look like an Attic cache' % self.path)
        self.lock = UpgradableLock(os.path.join(self.path, 'config'), exclusive=True)
        self.rollback()
        self.config = RawConfigParser()
        self.config.read(os.path.join(self.path, 'config'))
        if self.config.getint('cache', 'version') != 1:
            raise Exception('%s Does not look like an Attic cache')
        self.id = self.config.get('cache', 'repository')
        self.manifest_id = unhexlify(self.config.get('cache', 'manifest'))
        self.timestamp = self.config.get('cache', 'timestamp', fallback=None)
        self.key_type = self.config.get('cache', 'key_type', fallback=None)
        self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8'))
        self.files = None

    def close(self):
        if self.lock:
            self.lock.release()

    def _read_files(self):
        self.files = {}
        self._newest_mtime = 0
        with open(os.path.join(self.path, 'files'), 'rb') as fd:
            u = msgpack.Unpacker(use_list=True)
            while True:
                data = fd.read(64 * 1024)
                if not data:
                    break
                u.feed(data)
                for path_hash, item in u:
                    item[0] += 1
                    self.files[path_hash] = msgpack.packb(item)

    def begin_txn(self):
        # Initialize transaction snapshot
        txn_dir = os.path.join(self.path, 'txn.tmp')
        os.mkdir(txn_dir)
        shutil.copy(os.path.join(self.path, 'config'), txn_dir)
        shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
        shutil.copy(os.path.join(self.path, 'files'), txn_dir)
        os.rename(os.path.join(self.path, 'txn.tmp'),
                  os.path.join(self.path, 'txn.active'))
        self.txn_active = True

    def commit(self):
        """Commit transaction
        """
        if not self.txn_active:
            return
        if self.files is not None:
            with open(os.path.join(self.path, 'files'), 'wb') as fd:
                for path_hash, item in self.files.items():
                    # Discard cached files with the newest mtime to avoid
                    # issues with filesystem snapshots and mtime precision
                    item = msgpack.unpackb(item)
                    if item[0] < 10 and bigint_to_int(item[3]) < self._newest_mtime:
                        msgpack.pack((path_hash, item), fd)
        self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii'))
        self.config.set('cache', 'timestamp', self.manifest.timestamp)
        self.config.set('cache', 'key_type', str(self.key.TYPE))
        with open(os.path.join(self.path, 'config'), 'w') as fd:
            self.config.write(fd)
        self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
        os.rename(os.path.join(self.path, 'txn.active'),
                  os.path.join(self.path, 'txn.tmp'))
        shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False

    def rollback(self):
        """Roll back partial and aborted transactions
        """
        # Remove partial transaction
        if os.path.exists(os.path.join(self.path, 'txn.tmp')):
            shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        # Roll back active transaction
        txn_dir = os.path.join(self.path, 'txn.active')
        if os.path.exists(txn_dir):
            shutil.copy(os.path.join(txn_dir, 'config'), self.path)
            shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
            shutil.copy(os.path.join(txn_dir, 'files'), self.path)
            os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
            if os.path.exists(os.path.join(self.path, 'txn.tmp')):
                shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
        self.txn_active = False

    def sync(self):
        """Initializes cache by fetching and reading all archive indicies
        """
        def add(id, size, csize):
            try:
                count, size, csize = self.chunks[id]
                self.chunks[id] = count + 1, size, csize
            except KeyError:
                self.chunks[id] = 1, size, csize
        self.begin_txn()
        print('Initializing cache...')
        self.chunks.clear()
        unpacker = msgpack.Unpacker()
        repository = cache_if_remote(self.repository)
        for name, info in self.manifest.archives.items():
            archive_id = info[b'id']
            cdata = repository.get(archive_id)
            data = self.key.decrypt(archive_id, cdata)
            add(archive_id, len(data), len(cdata))
            archive = msgpack.unpackb(data)
            if archive[b'version'] != 1:
                raise Exception('Unknown archive metadata version')
            decode_dict(archive, (b'name',))
            print('Analyzing archive:', archive[b'name'])
            for key, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])):
                data = self.key.decrypt(key, chunk)
                add(key, len(data), len(chunk))
                unpacker.feed(data)
                for item in unpacker:
                    if b'chunks' in item:
                        for chunk_id, size, csize in item[b'chunks']:
                            add(chunk_id, size, csize)

    def add_chunk(self, id, data, stats):
        if not self.txn_active:
            self.begin_txn()
        if self.seen_chunk(id):
            return self.chunk_incref(id, stats)
        size = len(data)
        data = self.key.encrypt(data)
        csize = len(data)
        self.repository.put(id, data, wait=False)
        self.chunks[id] = (1, size, csize)
        stats.update(size, csize, True)
        return id, size, csize

    def seen_chunk(self, id):
        return self.chunks.get(id, (0, 0, 0))[0]

    def chunk_incref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        self.chunks[id] = (count + 1, size, csize)
        stats.update(size, csize, False)
        return id, size, csize

    def chunk_decref(self, id, stats):
        if not self.txn_active:
            self.begin_txn()
        count, size, csize = self.chunks[id]
        if count == 1:
            del self.chunks[id]
            self.repository.delete(id, wait=False)
            stats.update(-size, -csize, True)
        else:
            self.chunks[id] = (count - 1, size, csize)
            stats.update(-size, -csize, False)

    def file_known_and_unchanged(self, path_hash, st):
        if self.files is None:
            self._read_files()
        entry = self.files.get(path_hash)
        if not entry:
            return None
        entry = msgpack.unpackb(entry)
        if entry[2] == st.st_size and bigint_to_int(entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino:
            # reset entry age
            entry[0] = 0
            self.files[path_hash] = msgpack.packb(entry)
            return entry[4]
        else:
            return None

    def memorize_file(self, path_hash, st, ids):
        # Entry: Age, inode, size, mtime, chunk ids
        mtime_ns = st_mtime_ns(st)
        self.files[path_hash] = msgpack.packb((0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids))
        self._newest_mtime = max(self._newest_mtime, mtime_ns)
コード例 #60
0
ファイル: api.py プロジェクト: stemid/vrealize-alerts
        except Exception as e:
            log.exception('Exception while loading "{plugin}": {error}'.format(
                plugin=plugin_name,
                error=str(e)
            ))
            continue

        # Run plugin
        try:
            inst.run()
        except Exception as e:
            log.exception('Exception while running "{plugin}": {error}'.format(
                plugin=plugin_name,
                error=str(e)
            ))
            continue

    return json.dumps({
        'status': True
    })


if __name__ == '__main__':
    app.run(
        host=config.get('api', 'listen_host'),
        port=config.getint('api', 'listen_port')
    )
    debug(config.getbool('api', 'debug'))
else:
    application = app