Пример #1
0
    def _pull(self, in_seq, packet, out_seq, dry_run):
        _logger.debug('Start sync: in_seq=%r', in_seq)

        files = 0
        deleted = 0
        pos = 0

        for start, end in in_seq[:]:
            pos = bisect_left(self._index, [start, None, None], pos)
            for pos, (seqno, path, mtime) in enumerate(self._index[pos:]):
                if end is not None and seqno > end:
                    break
                if dry_run:
                    return True

                coroutine.dispatch()
                if mtime < 0:
                    packet.push(arcname=join('files', path),
                            cmd='files_delete', directory=self._directory,
                            path=path)
                    deleted += 1
                else:
                    packet.push_file(join(self._files_path, path),
                            arcname=join('files', path), cmd='files_push',
                            directory=self._directory, path=path)
                in_seq.exclude(seqno, seqno)
                out_seq.include(start, seqno)
                start = seqno
                files += 1

        if dry_run:
            return False

        _logger.debug('Stop sync: in_seq=%r out_seq=%r updates=%r deletes=%r',
                in_seq, out_seq, files, deleted)
Пример #2
0
            def patch():
                for guid, seqno, diff in \
                        directory.diff(orig_seq, limit=_DIFF_CHUNK):
                    coroutine.dispatch()

                    for prop, value in diff.items():
                        if 'path' in value:
                            data = file(value.pop('path'), 'rb')
                        elif 'url' in value:
                            data = self._download_blob(value.pop('url'))
                        else:
                            continue
                        del diff[prop]
                        arcname = join(document, 'blobs', guid, prop)
                        out_packet.push(data,
                                        arcname=arcname,
                                        cmd='sn_push',
                                        document=document,
                                        guid=guid,
                                        **value)

                    if not diff:
                        continue

                    yield {'guid': guid, 'diff': diff}

                    # Update `in_seq`, it might be reused by caller
                    in_seq.exclude(seqno, seqno)
                    push_seq.include(seqno, seqno)
Пример #3
0
    def _mount_volume(self, path):
        lazy_open = client.lazy_open.value
        server_mode = client.server_mode.value and exists(join(path, 'node'))

        if server_mode:
            if self._servers:
                _logger.warning(
                    'Do not start server for %r, '
                    'server already started', path)
                server_mode = False
            else:
                lazy_open = False

        volume = Volume(path, lazy_open=lazy_open)
        self._jobs.spawn(volume.populate)

        if server_mode:
            _logger.info('Start %r server on %s port', volume.root,
                         node.port.value)
            server = coroutine.WSGIServer(('0.0.0.0', node.port.value),
                                          router.Router(NodeCommands(volume)))
            self._servers.spawn(server.serve_forever)

            # Let servers start before publishing mount event
            coroutine.dispatch()

        return volume, server_mode
Пример #4
0
    def diff(self, in_seq, out_packet):
        # Since `in_seq` will be changed in `patch()`, original sequence
        # should be passed as-is to every document's `diff()` because
        # seqno handling is common for all documents
        orig_seq = Sequence(in_seq)
        push_seq = Sequence()

        for document, directory in self.items():
            coroutine.dispatch()
            directory.commit()

            def patch():
                for guid, seqno, diff in \
                        directory.diff(orig_seq, limit=_DIFF_CHUNK):
                    coroutine.dispatch()

                    for prop, value in diff.items():
                        if 'path' in value:
                            data = file(value.pop('path'), 'rb')
                        elif 'url' in value:
                            data = self._download_blob(value.pop('url'))
                        else:
                            continue
                        del diff[prop]
                        arcname = join(document, 'blobs', guid, prop)
                        out_packet.push(data,
                                        arcname=arcname,
                                        cmd='sn_push',
                                        document=document,
                                        guid=guid,
                                        **value)

                    if not diff:
                        continue

                    yield {'guid': guid, 'diff': diff}

                    # Update `in_seq`, it might be reused by caller
                    in_seq.exclude(seqno, seqno)
                    push_seq.include(seqno, seqno)

            try:
                out_packet.push(patch(),
                                arcname=join(document, 'diff'),
                                cmd='sn_push',
                                document=document)
            except DiskFull:
                if push_seq:
                    out_packet.push(force=True,
                                    cmd='sn_commit',
                                    sequence=push_seq)
                raise

        if push_seq:
            # Only here we can collapse `push_seq` since seqno handling
            # is common for all documents; if there was an exception before
            # this place, `push_seq` should contain not-collapsed sequence
            orig_seq.floor(push_seq.last)
            out_packet.push(force=True, cmd='sn_commit', sequence=orig_seq)
Пример #5
0
 def merge(self, record, increment_seqno=True):
     coroutine.dispatch()
     if record.get('content_type') == 'blob':
         diff = record['blob']
     else:
         diff = record['diff']
     return self[record['document']].merge(record['guid'],
                                           diff,
                                           increment_seqno=increment_seqno)
Пример #6
0
    def __init__(self, root, metadata, commit_cb=None):
        IndexReader.__init__(self, root, metadata, commit_cb)

        self._lang = env.default_lang()
        self._pending_updates = 0
        self._commit_cond = coroutine.Event()
        self._commit_job = coroutine.spawn(self._commit_handler)

        # Let `_commit_handler()` call `wait()` to not miss immediate commit
        coroutine.dispatch()

        self._do_open()
Пример #7
0
 def serve_forever(self):
     while True:
         coroutine.select([self.fileno()], [], [])
         if self.closed:
             break
         for filename, event, cb in self.read():
             try:
                 cb(filename, event)
             except Exception:
                 util.exception('Cannot dispatch 0x%X event for %r',
                         event, filename)
             coroutine.dispatch()
Пример #8
0
    def _sync(self):
        if os.stat(self._files_path).st_mtime <= self._stamp:
            return

        new_files = set()
        updates = 0
        deletes = 0

        # Populate list of new files at first
        for root, __, files in os.walk(self._files_path):
            coroutine.dispatch()
            rel_root = relpath(root, self._files_path)
            if rel_root == '.':
                rel_root = ''
            else:
                rel_root += os.sep
            for filename in files:
                coroutine.dispatch()
                path = join(root, filename)
                if os.lstat(path).st_mtime > self._stamp:
                    new_files.add(rel_root + filename)

        # Check for updates for already tracked files
        tail = []
        for pos, (__, rel_path, mtime) in enumerate(self._index[:]):
            coroutine.dispatch()
            path = join(self._files_path, rel_path)
            existing = lexists(path)
            if existing == (mtime >= 0) and \
                    (not existing or os.lstat(path).st_mtime == mtime):
                continue
            if existing:
                new_files.discard(rel_path)
            pos -= len(tail)
            self._index = self._index[:pos] + self._index[pos + 1:]
            tail.append([
                self._seqno.next(),
                rel_path,
                int(os.lstat(path).st_mtime) if existing else -1,
                ])
            if existing:
                updates += 1
            else:
                deletes += 1
        self._index.extend(tail)

        _logger.debug('Updated %r index: new=%r updates=%r deletes=%r',
                self._files_path, len(self._files_path), updates, deletes)

        # Finally, add new files
        for rel_path in sorted(new_files):
            coroutine.dispatch()
            mtime = os.lstat(join(self._files_path, rel_path)).st_mtime
            self._index.append([self._seqno.next(), rel_path, mtime])

        self._stamp = os.stat(self._files_path).st_mtime
        if self._seqno.commit():
            with util.new_file(self._index_path) as f:
                json.dump((self._index, self._stamp), f)
Пример #9
0
 def populate(self):
     for cls in self.values():
         for __ in cls.populate():
             coroutine.dispatch()
Пример #10
0
 def _notify(self, event):
     _logger.debug('Publish event: %r', event)
     self._notifier.set(event)
     self._notifier = coroutine.AsyncResult()
     coroutine.dispatch()
Пример #11
0
 def _populate(self, directory):
     for __ in directory.populate():
         coroutine.dispatch()