コード例 #1
0
ファイル: Download.py プロジェクト: vmordan/klever
class MarkArchiveGenerator:
    def __init__(self, mark):
        self.mark = mark
        if isinstance(self.mark, MarkUnsafe):
            self.type = 'unsafe'
        elif isinstance(self.mark, MarkSafe):
            self.type = 'safe'
        elif isinstance(self.mark, MarkUnknown):
            self.type = 'unknown'
        else:
            return
        self.name = 'Mark-%s-%s.zip' % (self.type, self.mark.identifier[:10])
        self.stream = ZipStream()

    def __iter__(self):
        for markversion in self.mark.versions.all():
            version_data = {
                'status': markversion.status,
                'comment': markversion.comment,
                'description': markversion.description
            }
            if self.type == 'unknown':
                version_data['function'] = markversion.function
                version_data['problem'] = markversion.problem_pattern
                version_data['is_regexp'] = markversion.is_regexp
                if markversion.link is not None:
                    version_data['link'] = markversion.link
            else:
                version_data['attrs'] = []
                for aname, aval, compare in markversion.attrs.order_by('id')\
                        .values_list('attr__name__name', 'attr__value', 'is_compare'):
                    version_data['attrs'].append({
                        'attr': aname,
                        'value': aval,
                        'is_compare': compare
                    })

                version_data['tags'] = list(
                    tag for tag, in markversion.tags.values_list('tag__tag'))
                version_data['verdict'] = markversion.verdict

                if self.type == 'unsafe':
                    version_data['function'] = markversion.function.name
                    with markversion.error_trace.file.file as fp:
                        version_data['error_trace'] = fp.read().decode('utf8')

            content = json.dumps(version_data,
                                 ensure_ascii=False,
                                 sort_keys=True,
                                 indent=4)
            for data in self.stream.compress_string(
                    'version-%s' % markversion.version, content):
                yield data
        common_data = {
            'is_modifiable': self.mark.is_modifiable,
            'mark_type': self.type,
            'format': self.mark.format,
            'identifier': self.mark.identifier
        }
        if self.type == 'unknown':
            common_data['component'] = self.mark.component.name
        content = json.dumps(common_data,
                             ensure_ascii=False,
                             sort_keys=True,
                             indent=4)
        for data in self.stream.compress_string('markdata', content):
            yield data
        yield self.stream.close_stream()
コード例 #2
0
ファイル: Download.py プロジェクト: ldv-klever/klever
class MarkGeneratorBase:
    type = None
    attrs_model = None
    tags_model = None

    def __init__(self, mark):
        assert self.type is not None, 'Wrong usage'
        self.mark = mark
        self.name = 'Mark-{}-{}.zip'.format(self.type, self.mark.identifier)
        self.stream = ZipStream()

    def common_data(self):
        return {
            'type': self.type,
            'identifier': str(self.mark.identifier),
            'is_modifiable': self.mark.is_modifiable
        }

    def version_data(self, version):
        data = {
            'comment': version.comment,
            'description': version.description,
            'attrs': self.attrs.get(version.id, []),
        }
        if self.tags is not None:
            data['tags'] = self.tags.get(version.id, [])
        return data

    @cached_property
    def attrs(self):
        assert self.attrs_model is not None, 'Wrong usage'
        mark_attrs = {}
        for mattr in self.attrs_model.objects.filter(
                mark_version__mark=self.mark).order_by('id'):
            mark_attrs.setdefault(mattr.mark_version_id, [])
            mark_attrs[mattr.mark_version_id].append({
                'name':
                mattr.name,
                'value':
                mattr.value,
                'is_compare':
                mattr.is_compare
            })
        return mark_attrs

    @cached_property
    def tags(self):
        if not self.tags_model:
            return None
        all_tags = {}
        for version_id, tag_name in self.tags_model.objects.filter(mark_version__mark=self.mark) \
                .values_list('mark_version_id', 'tag__name'):
            all_tags.setdefault(version_id, [])
            all_tags[version_id].append(tag_name)
        return all_tags

    def versions_queryset(self):
        return self.mark.versions.all()

    def __iter__(self):
        # Add main mark data
        content = json.dumps(self.common_data(),
                             ensure_ascii=False,
                             sort_keys=True,
                             indent=4)
        for data in self.stream.compress_string('mark.json', content):
            yield data

        # Add versions data
        for markversion in self.versions_queryset():
            content = json.dumps(self.version_data(markversion),
                                 ensure_ascii=False,
                                 sort_keys=True,
                                 indent=4)
            for data in self.stream.compress_string(
                    'version-{}.json'.format(markversion.version), content):
                yield data

        yield self.stream.close_stream()
コード例 #3
0
ファイル: Download.py プロジェクト: naumushv/klever
class JobArchiveGenerator:
    def __init__(self, job, decisions_ids=None):
        self.job = job
        self._decisions_ids = list(map(
            int, decisions_ids)) if decisions_ids else None
        self.name = 'Job-{}.zip'.format(self.job.identifier)
        self._arch_files = set()
        self.stream = ZipStream()

    def __iter__(self):
        # Job data
        yield from self.stream.compress_string('job.json',
                                               self.__get_job_data())
        yield from self.stream.compress_string(
            '{}.json'.format(Decision.__name__), self.__add_decisions_data())
        yield from self.stream.compress_string(
            '{}.json'.format(DecisionCache.__name__),
            self.__get_decision_cache())
        yield from self.stream.compress_string(
            '{}.json'.format(OriginalSources.__name__),
            self.__get_original_src())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportComponent.__name__),
            self.__get_reports_data())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportSafe.__name__), self.__get_safes_data())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportUnsafe.__name__), self.__get_unsafes_data())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportUnknown.__name__),
            self.__get_unknowns_data())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportAttr.__name__), self.__get_attrs_data())
        yield from self.stream.compress_string(
            '{}.json'.format(CoverageArchive.__name__),
            self.__get_coverage_data())

        self.__add_job_files()
        self.__add_additional_sources()

        for file_path, arcname in self._arch_files:
            yield from self.stream.compress_file(file_path, arcname)
        yield self.stream.close_stream()

    @cached_property
    def _decision_filter(self):
        if self._decisions_ids:
            return Q(decision_id__in=self._decisions_ids)
        return Q(decision__job_id=self.job.id)

    def __get_job_data(self):
        return self.__get_json(DownloadJobSerializer(instance=self.job).data)

    def __add_job_files(self):
        job_files = {}
        for fs in FileSystem.objects.filter(
                decision__job=self.job).select_related('file'):
            job_files[fs.file.hash_sum] = (fs.file.file.path,
                                           fs.file.file.name)
        for f_path, arcname in job_files.values():
            self._arch_files.add((f_path, arcname))

    def __add_decisions_data(self):
        if self._decisions_ids:
            qs_filter = Q(id__in=self._decisions_ids)
        else:
            qs_filter = Q(job_id=self.job.id)
        decisions_list = []
        for decision in Decision.objects.filter(qs_filter).select_related(
                'scheduler', 'configuration'):
            decisions_list.append(
                DownloadDecisionSerializer(instance=decision).data)
            self._arch_files.add((decision.configuration.file.path,
                                  decision.configuration.file.name))
        return self.__get_json(decisions_list)

    def __get_decision_cache(self):
        return self.__get_json(
            DecisionCacheSerializer(instance=DecisionCache.objects.filter(
                self._decision_filter),
                                    many=True).data)

    def __get_original_src(self):
        if self._decisions_ids:
            qs_filter = Q(reportcomponent__decision_id__in=self._decisions_ids)
        else:
            qs_filter = Q(reportcomponent__decision__job_id=self.job.id)
        sources = {}
        for src_arch in OriginalSources.objects.filter(qs_filter):
            sources[src_arch.identifier] = src_arch.archive.name
            self._arch_files.add(
                (src_arch.archive.path, src_arch.archive.name))
        return self.__get_json(sources)

    def __get_reports_data(self):
        reports = []
        for report in ReportComponent.objects.filter(self._decision_filter)\
                .select_related('parent', 'computer', 'original_sources', 'additional_sources').order_by('level'):
            report_data = DownloadReportComponentSerializer(
                instance=report).data

            # Add report files
            if report_data['log']:
                self._arch_files.add((report.log.path, report_data['log']))
            if report_data['verifier_files']:
                self._arch_files.add((report.verifier_files.path,
                                      report_data['verifier_files']))
            reports.append(report_data)

        return self.__get_json(reports)

    def __get_safes_data(self):
        safes_queryset = ReportSafe.objects.filter(
            self._decision_filter).select_related('parent').order_by('id')
        return self.__get_json(
            DownloadReportSafeSerializer(instance=safes_queryset,
                                         many=True).data)

    def __get_unsafes_data(self):
        reports = []
        for report in ReportUnsafe.objects.filter(
                self._decision_filter).select_related('parent').order_by('id'):
            report_data = DownloadReportUnsafeSerializer(instance=report).data
            if report_data['error_trace']:
                self._arch_files.add(
                    (report.error_trace.path, report_data['error_trace']))
            reports.append(report_data)
        return self.__get_json(reports)

    def __get_unknowns_data(self):
        reports = []
        for report in ReportUnknown.objects.filter(
                self._decision_filter).select_related('parent').order_by('id'):
            report_data = DownloadReportUnknownSerializer(instance=report).data
            if report_data['problem_description']:
                self._arch_files.add((report.problem_description.path,
                                      report_data['problem_description']))
            reports.append(report_data)
        return self.__get_json(reports)

    def __get_attrs_data(self):
        if self._decisions_ids:
            qs_filter = Q(report__decision_id__in=self._decisions_ids)
        else:
            qs_filter = Q(report__decision__job_id=self.job.id)

        attrs_data = {}
        for ra in ReportAttr.objects.filter(qs_filter).select_related(
                'data', 'report').order_by('id'):
            data = DownloadReportAttrSerializer(instance=ra).data
            if data['data_file']:
                self._arch_files.add((ra.data.file.path, data['data_file']))
            attrs_data.setdefault(ra.report.decision_id, {})
            attrs_data[ra.report.decision_id].setdefault(
                ra.report.identifier, [])
            attrs_data[ra.report.decision_id][ra.report.identifier].append(
                data)
        return self.__get_json(attrs_data)

    def __get_coverage_data(self):
        if self._decisions_ids:
            qs_filter = Q(report__decision_id__in=self._decisions_ids)
        else:
            qs_filter = Q(report__decision__job_id=self.job.id)

        coverage_data = []
        for carch in CoverageArchive.objects.filter(qs_filter).select_related(
                'report').order_by('id'):
            coverage_data.append({
                'decision': carch.report.decision_id,
                'report': carch.report.identifier,
                'identifier': carch.identifier,
                'archive': carch.archive.name,
                'name': carch.name
            })
            self._arch_files.add((carch.archive.path, carch.archive.name))
        return self.__get_json(coverage_data)

    def __add_additional_sources(self):
        for src_arch in AdditionalSources.objects.filter(
                self._decision_filter):
            self._arch_files.add(
                (src_arch.archive.path, src_arch.archive.name))

    def __get_json(self, data):
        return json.dumps(data, ensure_ascii=False, sort_keys=True, indent=2)