Exemplo n.º 1
0
    def set_blob(self, prop, data=None, size=None, **kwargs):
        if not exists(self._root):
            os.makedirs(self._root)
        path = join(self._root, prop + PropertyMeta.BLOB_SUFFIX)
        meta = PropertyMeta(**kwargs)

        if data is None:
            if exists(path):
                os.unlink(path)
        elif isinstance(data, PropertyMeta):
            data.update(meta)
            meta = data
        else:
            digest = hashlib.sha1()
            if hasattr(data, 'read'):
                if size is None:
                    size = sys.maxint
                self._set_blob_by_stream(digest, data, size, path)
            elif isabs(data) and exists(data):
                self._set_blob_by_path(digest, data, path)
            else:
                with util.new_file(path) as f:
                    f.write(data)
                digest.update(data)
            meta['digest'] = digest.hexdigest()

        self.set(prop, **meta)
Exemplo n.º 2
0
def presolve(names):
    for repo in _get_repos(obs_presolve_project.value):
        for arch in repo['arches']:
            dirname = join(obs_presolve_path.value, repo['name'], arch)
            if not exists(dirname):
                os.makedirs(dirname)
            for package in names:
                try:
                    response = _request('GET', ['resolve'], params={
                        'project': obs_presolve_project.value,
                        'repository': repo['name'],
                        'arch': arch,
                        'package': package,
                        'withdeps': '1',
                        'exclude': 'sugar',
                        })
                except Exception:
                    util.exception('Failed to resolve %s:%s:%s for presolving',
                            repo['name'], arch, package)
                    continue
                deps_graph = []
                for pkg in response.findall('binary'):
                    deps_graph.append(dict(pkg.items()))
                with util.new_file(join(dirname, package)) as f:
                    json.dump(deps_graph, f)
Exemplo n.º 3
0
    def _sync(self):
        if os.stat(self._files_path).st_mtime <= self._stamp:
            return

        new_files = set()
        updates = 0
        deletes = 0

        # Populate list of new files at first
        for root, __, files in os.walk(self._files_path):
            coroutine.dispatch()
            rel_root = relpath(root, self._files_path)
            if rel_root == '.':
                rel_root = ''
            else:
                rel_root += os.sep
            for filename in files:
                coroutine.dispatch()
                path = join(root, filename)
                if os.lstat(path).st_mtime > self._stamp:
                    new_files.add(rel_root + filename)

        # Check for updates for already tracked files
        tail = []
        for pos, (__, rel_path, mtime) in enumerate(self._index[:]):
            coroutine.dispatch()
            path = join(self._files_path, rel_path)
            existing = lexists(path)
            if existing == (mtime >= 0) and \
                    (not existing or os.lstat(path).st_mtime == mtime):
                continue
            if existing:
                new_files.discard(rel_path)
            pos -= len(tail)
            self._index = self._index[:pos] + self._index[pos + 1:]
            tail.append([
                self._seqno.next(),
                rel_path,
                int(os.lstat(path).st_mtime) if existing else -1,
                ])
            if existing:
                updates += 1
            else:
                deletes += 1
        self._index.extend(tail)

        _logger.debug('Updated %r index: new=%r updates=%r deletes=%r',
                self._files_path, len(self._files_path), updates, deletes)

        # Finally, add new files
        for rel_path in sorted(new_files):
            coroutine.dispatch()
            mtime = os.lstat(join(self._files_path, rel_path)).st_mtime
            self._index.append([self._seqno.next(), rel_path, mtime])

        self._stamp = os.stat(self._files_path).st_mtime
        if self._seqno.commit():
            with util.new_file(self._index_path) as f:
                json.dump((self._index, self._stamp), f)
Exemplo n.º 4
0
 def commit(self):
     dir_path = dirname(self._path)
     if dir_path and not exists(dir_path):
         os.makedirs(dir_path)
     with util.new_file(self._path) as f:
         json.dump(self, f)
         f.flush()
         os.fsync(f.fileno())
Exemplo n.º 5
0
 def _set_blob_by_stream(self, digest, stream, size, path):
     with util.new_file(path) as f:
         while size > 0:
             chunk = stream.read(min(size, BUFFER_SIZE))
             if not chunk:
                 break
             f.write(chunk)
             size -= len(chunk)
             if digest is not None:
                 digest.update(chunk)
Exemplo n.º 6
0
    def set(self, prop, mtime=None, path=None, content=None, **meta):
        if not exists(self._root):
            os.makedirs(self._root)
        meta_path = join(self._root, prop)

        blob_path = join(self._root, prop + PropertyMeta.BLOB_SUFFIX)
        if content is not None:
            with util.new_file(blob_path) as f:
                f.write(b64decode(content))
        elif path and exists(path):
            util.cptree(path, blob_path)

        with util.new_file(meta_path) as f:
            pickle.dump(meta, f)
        if mtime:
            os.utime(meta_path, (mtime, mtime))

        if prop == 'guid':
            if not mtime:
                mtime = time.time()
            # Touch directory to let it possible to crawl it on startup
            # when index was not previously closed properly
            os.utime(join(self._root, '..'), (mtime, mtime))
Exemplo n.º 7
0
    def commit(self):
        """Store current seqno value in a file.

        :returns:
            `True` if commit was happened

        """
        if self._value == self._orig_value:
            return False
        with util.new_file(self._path) as f:
            f.write(str(self._value))
            f.flush()
            os.fsync(f.fileno())
        self._orig_value = self._value
        return True
Exemplo n.º 8
0
 def push(self, record):
     cmd = record.get('cmd')
     if cmd == 'files_push':
         blob = record['blob']
         path = join(self._files_path, record['path'])
         if not exists(dirname(path)):
             os.makedirs(dirname(path))
         with util.new_file(path) as f:
             while True:
                 chunk = blob.read(BUFFER_SIZE)
                 if not chunk:
                     break
                 f.write(chunk)
     elif cmd == 'files_delete':
         path = join(self._files_path, record['path'])
         if exists(path):
             os.unlink(path)
     elif cmd == 'files_commit':
         self.sequence.exclude(record['sequence'])
         self.sequence.commit()
Exemplo n.º 9
0
 def _save_layout(self):
     path = join(self._root, 'layout')
     with util.new_file(path) as f:
         f.write(str(_LAYOUT_VERSION))