Пример #1
0
def get_files_info(request, linkstore, show_toxresults=False):
    files = []
    filedata = linkstore.get_links(rel='releasefile')
    if not filedata:
        log.warn("project %r version %r has no files",
                 linkstore.projectname, linkstore.version)
    for link in sorted(filedata, key=attrgetter('basename')):
        url = url_for_entrypath(request, link.entrypath)
        entry = link.entry
        if entry.eggfragment:
            url += "#egg=%s" % entry.eggfragment
        elif entry.md5:
            url += "#md5=%s" % entry.md5
        py_version, file_type = get_pyversion_filetype(link.basename)
        if py_version == 'source':
            py_version = ''
        size = ''
        if entry.file_exists():
            size = "%.0f %s" % sizeof_fmt(entry.file_size())
        fileinfo = dict(
            title=link.basename,
            url=url,
            basename=link.basename,
            md5=entry.md5,
            dist_type=dist_file_types.get(file_type, ''),
            py_version=py_version,
            size=size)
        if show_toxresults:
            toxresults = get_toxresults_info(linkstore, link)
            if toxresults:
                fileinfo['toxresults'] = toxresults
        files.append(fileinfo)
    return files
Пример #2
0
 def ix(self, name):
     schema = getattr(self, '%s_schema' % name)
     if not exists_in(self.index_path, indexname=name):
         return create_in(self.index_path, schema, indexname=name)
     ix = open_dir(self.index_path, indexname=name)
     if ix.schema != schema:
         log.warn("\n".join([
             "The index schema on disk differs from the current code schema.",
             "You need to run devpi-server with the --index-projects option to recreate the index."]))
     return ix
Пример #3
0
def get_files_info(request, linkstore, show_toxresults=False):
    files = []
    filedata = linkstore.get_links(rel='releasefile')
    if not filedata:
        log.warn("project %r version %r has no files", linkstore.project,
                 linkstore.version)
    for link in sorted(filedata, key=attrgetter('basename')):
        url = url_for_entrypath(request, link.entrypath)
        entry = link.entry
        if getattr(entry, 'eggfragment', None):
            # BBB for older devpi-server (<5.0.0)
            # before 5.0.0, eggfragment value was the result of searching for
            # downloads outside PyPI via scraping
            # can be removed once devpi-web requires devpi-server >= 5.0.0
            url += "#egg=%s" % entry.eggfragment
        elif entry.hash_spec:
            url += "#" + entry.hash_spec
        py_version, file_type = get_pyversion_filetype(link.basename)
        if py_version == 'source':
            py_version = ''
        size = ''
        if entry.file_exists():
            size = "%.0f %s" % sizeof_fmt(entry.file_size())
        try:
            history = [
                make_history_view_item(request, x) for x in link.get_logs()
            ]
        except AttributeError:
            history = []
        last_modified = format_timetuple(parsedate(entry.last_modified))
        fileinfo = dict(title=link.basename,
                        url=url,
                        basename=link.basename,
                        hash_spec=entry.hash_spec,
                        dist_type=dist_file_types.get(file_type, ''),
                        py_version=py_version,
                        last_modified=last_modified,
                        history=history,
                        size=size)
        if show_toxresults:
            toxresults = get_toxresults_info(linkstore, link)
            if toxresults:
                fileinfo['toxresults'] = toxresults
        files.append(fileinfo)
    return files
Пример #4
0
def get_files_info(request, linkstore, show_toxresults=False):
    files = []
    filedata = linkstore.get_links(rel='releasefile')
    if not filedata:
        log.warn("project %r version %r has no files",
                 linkstore.project, linkstore.version)
    for link in sorted(filedata, key=attrgetter('basename')):
        url = url_for_entrypath(request, link.entrypath)
        entry = link.entry
        if entry.eggfragment:
            url += "#egg=%s" % entry.eggfragment
        elif entry.hash_spec:
            url += "#" + entry.hash_spec
        py_version, file_type = get_pyversion_filetype(link.basename)
        if py_version == 'source':
            py_version = ''
        size = ''
        if entry.file_exists():
            size = "%.0f %s" % sizeof_fmt(entry.file_size())
        try:
            history = [
                make_history_view_item(request, x)
                for x in link.get_logs()]
        except AttributeError:
            history = []
        last_modified = format_timetuple(parsedate(entry.last_modified))
        fileinfo = dict(
            title=link.basename,
            url=url,
            basename=link.basename,
            hash_spec=entry.hash_spec,
            dist_type=dist_file_types.get(file_type, ''),
            py_version=py_version,
            last_modified=last_modified,
            history=history,
            size=size)
        if show_toxresults:
            toxresults = get_toxresults_info(linkstore, link)
            if toxresults:
                fileinfo['toxresults'] = toxresults
        files.append(fileinfo)
    return files
Пример #5
0
def update_schema(ix, schema):
    if ix.schema == schema:
        return
    existing_names = set(ix.schema.names())
    schema_names = set(schema.names())
    removed_names = existing_names - schema_names
    added_names = schema_names - existing_names
    if not (removed_names or added_names):
        return
    writer = ix.writer()
    for name in removed_names:
        writer.remove_field(name)
    for name in added_names:
        writer.add_field(name, schema[name])
    log.warn(
        "The search index schema has changed. "
        "The update can take a while depending on the size of your index.")
    if removed_names:
        writer.commit(optimize=True)
    else:
        writer.commit()
Пример #6
0
 def _entries(self):
     if not self.exists():
         # this happens on import, when the metadata is registered, but the docs
         # aren't uploaded yet
         threadlog.warn("Tried to access %s, but it doesn't exist.", self.unpack_path)
         return {}
     html = []
     fjson = []
     for entry in self.unpack_path.visit():
         if entry.basename.endswith('.fjson'):
             fjson.append(entry)
         elif entry.basename.endswith('.html'):
             html.append(entry)
     if fjson:
         entries = dict(
             (x.relto(self.unpack_path)[:-6], x)
             for x in fjson)
     else:
         entries = dict(
             (x.relto(self.unpack_path)[:-5], x)
             for x in html)
     return entries
Пример #7
0
 def _entries(self):
     if not self.exists():
         # this happens on import, when the metadata is registered, but the docs
         # aren't uploaded yet
         threadlog.warn("Tried to access %s, but it doesn't exist.",
                        self.unpack_path)
         return {}
     unpack_path = unpack_docs(self.stage, self.name, self.version,
                               self.entry)
     html = []
     fjson = []
     for entry in unpack_path.visit():
         basename = entry.basename
         if basename.endswith('.fjson'):
             fjson.append(entry)
         elif basename.endswith('.html'):
             html.append(entry)
     if fjson:
         # if there is fjson, then we get structured data
         # see http://www.sphinx-doc.org/en/master/usage/builders/index.html#serialization-builder-details
         return {x.relto(unpack_path)[:-6]: x for x in fjson}
     else:
         return {x.relto(unpack_path)[:-5]: x for x in html}
Пример #8
0
def warn(msg):
    threadlog.warn("devpi-rss: %s" % msg)
Пример #9
0
def warn(msg):
    threadlog.warn("devpi-rss: %s" % msg)