Exemplo n.º 1
0
class SeveralMarksGenerator:
    def __init__(self, marks):
        self.marks = marks
        self.stream = ZipStream()
        self.name = 'KleverMarks.zip'

    def generate_mark(self, markgen):
        buf = b''
        for data in self.stream.compress_stream(markgen.name, markgen):
            buf += data
            if len(buf) > CHUNK_SIZE:
                yield buf
                buf = b''
        if len(buf) > 0:
            yield buf

    def __iter__(self):
        for mark in self.marks:
            if isinstance(mark, MarkSafe):
                markgen = SafeMarkGenerator(mark)
            elif isinstance(mark, MarkUnsafe):
                markgen = UnsafeMarkGenerator(mark)
            elif isinstance(mark, MarkUnknown):
                markgen = UnknownMarkGenerator(mark)
            else:
                continue
            yield from self.generate_mark(markgen)
        yield self.stream.close_stream()
Exemplo n.º 2
0
class AllMarksGenerator:
    def __init__(self):
        curr_time = now()
        self.name = 'Marks--%s-%s-%s.zip' % (curr_time.day, curr_time.month,
                                             curr_time.year)
        self.stream = ZipStream()

    def generators(self):
        for mark in MarkSafe.objects.all():
            yield SafeMarkGenerator(mark)
        for mark in MarkUnsafe.objects.all():
            yield UnsafeMarkGenerator(mark)
        for mark in MarkUnknown.objects.all():
            yield UnknownMarkGenerator(mark)

    def __iter__(self):
        for markgen in self.generators():
            buf = b''
            for data in self.stream.compress_stream(markgen.name, markgen):
                buf += data
                if len(buf) > CHUNK_SIZE:
                    yield buf
                    buf = b''
            if len(buf) > 0:
                yield buf
        yield self.stream.close_stream()
Exemplo n.º 3
0
class KleverCoreArchiveGen:
    def __init__(self, decision):
        self.decision = decision
        self.arcname = 'VJ__{}.zip'.format(decision.identifier)
        self.stream = ZipStream()

    def __iter__(self):
        for file_inst in FileSystem.objects.filter(
                decision=self.decision).select_related('file'):
            arch_name = '/'.join(['root', file_inst.name])
            file_src = '/'.join(
                [settings.MEDIA_ROOT, file_inst.file.file.name])
            for data in self.stream.compress_file(file_src, arch_name):
                yield data
        yield self.stream.close_stream()
Exemplo n.º 4
0
class AllMarksGen(object):
    def __init__(self):
        curr_time = now()
        self.name = 'Marks--%s-%s-%s.zip' % (curr_time.day, curr_time.month,
                                             curr_time.year)
        self.stream = ZipStream()

    def __iter__(self):
        for table in [MarkSafe, MarkUnsafe, MarkUnknown]:
            for mark in table.objects.filter(~Q(version=0)):
                markgen = MarkArchiveGenerator(mark)
                buf = b''
                for data in self.stream.compress_stream(markgen.name, markgen):
                    buf += data
                    if len(buf) > CHUNK_SIZE:
                        yield buf
                        buf = b''
                if len(buf) > 0:
                    yield buf
        yield self.stream.close_stream()
Exemplo n.º 5
0
class JobsArchivesGen:
    def __init__(self, jobs_to_download):
        self.jobs = jobs_to_download
        self.stream = ZipStream()
        self.name = 'KleverJobs.zip'

    def generate_job(self, jobgen):
        buf = b''
        for data in self.stream.compress_stream(jobgen.name, jobgen):
            buf += data
            if len(buf) > CHUNK_SIZE:
                yield buf
                buf = b''
        if len(buf) > 0:
            yield buf

    def __iter__(self):
        for job_id in self.jobs:
            jobgen = JobArchiveGenerator(
                self.jobs[job_id]['instance'],
                decisions_ids=self.jobs[job_id]['decisions'])
            yield from self.generate_job(jobgen)
        yield self.stream.close_stream()
Exemplo n.º 6
0
class MarkGeneratorBase:
    type = None
    attrs_model = None
    tags_model = None

    def __init__(self, mark):
        assert self.type is not None, 'Wrong usage'
        self.mark = mark
        self.name = 'Mark-{}-{}.zip'.format(self.type, self.mark.identifier)
        self.stream = ZipStream()

    def common_data(self):
        return {
            'type': self.type,
            'identifier': str(self.mark.identifier),
            'is_modifiable': self.mark.is_modifiable
        }

    def version_data(self, version):
        data = {
            'comment': version.comment,
            'description': version.description,
            'attrs': self.attrs.get(version.id, []),
        }
        if self.tags is not None:
            data['tags'] = self.tags.get(version.id, [])
        return data

    @cached_property
    def attrs(self):
        assert self.attrs_model is not None, 'Wrong usage'
        mark_attrs = {}
        for mattr in self.attrs_model.objects.filter(
                mark_version__mark=self.mark).order_by('id'):
            mark_attrs.setdefault(mattr.mark_version_id, [])
            mark_attrs[mattr.mark_version_id].append({
                'name':
                mattr.name,
                'value':
                mattr.value,
                'is_compare':
                mattr.is_compare
            })
        return mark_attrs

    @cached_property
    def tags(self):
        if not self.tags_model:
            return None
        all_tags = {}
        for version_id, tag_name in self.tags_model.objects.filter(mark_version__mark=self.mark) \
                .values_list('mark_version_id', 'tag__name'):
            all_tags.setdefault(version_id, [])
            all_tags[version_id].append(tag_name)
        return all_tags

    def versions_queryset(self):
        return self.mark.versions.all()

    def __iter__(self):
        # Add main mark data
        content = json.dumps(self.common_data(),
                             ensure_ascii=False,
                             sort_keys=True,
                             indent=4)
        for data in self.stream.compress_string('mark.json', content):
            yield data

        # Add versions data
        for markversion in self.versions_queryset():
            content = json.dumps(self.version_data(markversion),
                                 ensure_ascii=False,
                                 sort_keys=True,
                                 indent=4)
            for data in self.stream.compress_string(
                    'version-{}.json'.format(markversion.version), content):
                yield data

        yield self.stream.close_stream()
Exemplo n.º 7
0
class MarkArchiveGenerator:
    def __init__(self, mark):
        self.mark = mark
        if isinstance(self.mark, MarkUnsafe):
            self.type = 'unsafe'
        elif isinstance(self.mark, MarkSafe):
            self.type = 'safe'
        elif isinstance(self.mark, MarkUnknown):
            self.type = 'unknown'
        else:
            return
        self.name = 'Mark-%s-%s.zip' % (self.type, self.mark.identifier[:10])
        self.stream = ZipStream()

    def __iter__(self):
        for markversion in self.mark.versions.all():
            version_data = {
                'status': markversion.status,
                'comment': markversion.comment,
                'description': markversion.description
            }
            if self.type == 'unknown':
                version_data['function'] = markversion.function
                version_data['problem'] = markversion.problem_pattern
                version_data['is_regexp'] = markversion.is_regexp
                if markversion.link is not None:
                    version_data['link'] = markversion.link
            else:
                version_data['attrs'] = []
                for aname, aval, compare in markversion.attrs.order_by('id')\
                        .values_list('attr__name__name', 'attr__value', 'is_compare'):
                    version_data['attrs'].append({
                        'attr': aname,
                        'value': aval,
                        'is_compare': compare
                    })

                version_data['tags'] = list(
                    tag for tag, in markversion.tags.values_list('tag__tag'))
                version_data['verdict'] = markversion.verdict

                if self.type == 'unsafe':
                    version_data['function'] = markversion.function.name
                    with markversion.error_trace.file.file as fp:
                        version_data['error_trace'] = fp.read().decode('utf8')

            content = json.dumps(version_data,
                                 ensure_ascii=False,
                                 sort_keys=True,
                                 indent=4)
            for data in self.stream.compress_string(
                    'version-%s' % markversion.version, content):
                yield data
        common_data = {
            'is_modifiable': self.mark.is_modifiable,
            'mark_type': self.type,
            'format': self.mark.format,
            'identifier': self.mark.identifier
        }
        if self.type == 'unknown':
            common_data['component'] = self.mark.component.name
        content = json.dumps(common_data,
                             ensure_ascii=False,
                             sort_keys=True,
                             indent=4)
        for data in self.stream.compress_string('markdata', content):
            yield data
        yield self.stream.close_stream()
Exemplo n.º 8
0
class FilesForCompetitionArchive:
    obj_attr = 'Program fragment'
    requirement_attr = 'Requirements specification'

    def __init__(self, decision, filters):
        self.decision = decision
        self._attrs = self.__get_attrs()
        self._archives = self.__get_archives()
        self._archives_to_upload = []
        self.__get_archives_to_upload(filters)
        self.stream = ZipStream()
        self.name = 'svcomp.zip'

    def __iter__(self):
        cnt = 0
        names_in_use = set()
        for arch_path, name_pattern in self._archives_to_upload:
            if name_pattern in names_in_use:
                cnt += 1
                arch_name = '%s_%s.zip' % (name_pattern, cnt)
            else:
                arch_name = '%s.zip' % name_pattern
            names_in_use.add(name_pattern)

            for data in self.stream.compress_file(arch_path, arch_name):
                yield data

        yield self.stream.close_stream()

    def __get_archives(self):
        archives = {}
        for report in ReportComponent.objects.filter(decision=self.decision, verification=True)\
                .exclude(verifier_files='').only('id', 'verifier_files'):
            archives[report.id] = report.verifier_files.path
        return archives

    def __get_attrs(self):
        # Select attributes for all safes, unsafes and unknowns
        attrs = {}
        for report_id, a_name, a_value in ReportAttr.objects\
                .filter(report__decision=self.decision, name__in=[self.obj_attr, self.requirement_attr]) \
                .exclude(report__reportunsafe=None, report__reportsafe=None, report__reportunknown=None) \
                .values_list('report_id', 'name', 'value'):
            if report_id not in attrs:
                attrs[report_id] = {}
            attrs[report_id][a_name] = a_value
        return attrs

    def __add_archive(self, r_type, r_id, p_id):
        if p_id in self._archives and r_id in self._attrs \
                and self.obj_attr in self._attrs[r_id] \
                and self.requirement_attr in self._attrs[r_id]:

            ver_obj = self._attrs[r_id][self.obj_attr].replace('~', 'HOME').replace('/', '---')
            ver_requirement = self._attrs[r_id][self.requirement_attr].replace(':', '-')
            dirname = 'Unknowns' if r_type == 'f' else 'Unsafes' if r_type == 'u' else 'Safes'

            self._archives_to_upload.append(
                (self._archives[p_id], '{0}/{1}__{2}__{3}'.format(dirname, r_type, ver_requirement, ver_obj))
            )

    def __get_archives_to_upload(self, filters):
        common_filters = {'decision': self.decision, 'parent__reportcomponent__verification': True}
        if filters.get('safes'):
            for r_id, p_id in ReportSafe.objects.filter(**common_filters).values_list('id', 'parent_id'):
                self.__add_archive('s', r_id, p_id)
        if filters.get('unsafes'):
            for r_id, p_id in ReportUnsafe.objects.filter(**common_filters).values_list('id', 'parent_id'):
                self.__add_archive('u', r_id, p_id)
        if filters.get('problems'):
            for problem_data in filters['problems']:
                if problem_data.get('component') and problem_data.get('problem'):
                    unknowns_qs = ReportUnknown.objects.filter(
                        markreport_set__problem=problem_data['problem'],
                        component=problem_data['component'], **common_filters
                    )
                else:
                    unknowns_qs = ReportUnknown.objects.filter(cache__marks_total=0, **common_filters)
                for r_id, p_id in unknowns_qs.values_list('id', 'parent_id'):
                    self.__add_archive('f', r_id, p_id)
        elif filters.get('unknowns'):
            for r_id, p_id in ReportUnknown.objects.filter(**common_filters).values_list('id', 'parent_id'):
                self.__add_archive('f', r_id, p_id)
Exemplo n.º 9
0
class JobArchiveGenerator:
    def __init__(self, job, decisions_ids=None):
        self.job = job
        self._decisions_ids = list(map(
            int, decisions_ids)) if decisions_ids else None
        self.name = 'Job-{}.zip'.format(self.job.identifier)
        self._arch_files = set()
        self.stream = ZipStream()

    def __iter__(self):
        # Job data
        yield from self.stream.compress_string('job.json',
                                               self.__get_job_data())
        yield from self.stream.compress_string(
            '{}.json'.format(Decision.__name__), self.__add_decisions_data())
        yield from self.stream.compress_string(
            '{}.json'.format(DecisionCache.__name__),
            self.__get_decision_cache())
        yield from self.stream.compress_string(
            '{}.json'.format(OriginalSources.__name__),
            self.__get_original_src())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportComponent.__name__),
            self.__get_reports_data())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportSafe.__name__), self.__get_safes_data())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportUnsafe.__name__), self.__get_unsafes_data())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportUnknown.__name__),
            self.__get_unknowns_data())
        yield from self.stream.compress_string(
            '{}.json'.format(ReportAttr.__name__), self.__get_attrs_data())
        yield from self.stream.compress_string(
            '{}.json'.format(CoverageArchive.__name__),
            self.__get_coverage_data())

        self.__add_job_files()
        self.__add_additional_sources()

        for file_path, arcname in self._arch_files:
            yield from self.stream.compress_file(file_path, arcname)
        yield self.stream.close_stream()

    @cached_property
    def _decision_filter(self):
        if self._decisions_ids:
            return Q(decision_id__in=self._decisions_ids)
        return Q(decision__job_id=self.job.id)

    def __get_job_data(self):
        return self.__get_json(DownloadJobSerializer(instance=self.job).data)

    def __add_job_files(self):
        job_files = {}
        for fs in FileSystem.objects.filter(
                decision__job=self.job).select_related('file'):
            job_files[fs.file.hash_sum] = (fs.file.file.path,
                                           fs.file.file.name)
        for f_path, arcname in job_files.values():
            self._arch_files.add((f_path, arcname))

    def __add_decisions_data(self):
        if self._decisions_ids:
            qs_filter = Q(id__in=self._decisions_ids)
        else:
            qs_filter = Q(job_id=self.job.id)
        decisions_list = []
        for decision in Decision.objects.filter(qs_filter).select_related(
                'scheduler', 'configuration'):
            decisions_list.append(
                DownloadDecisionSerializer(instance=decision).data)
            self._arch_files.add((decision.configuration.file.path,
                                  decision.configuration.file.name))
        return self.__get_json(decisions_list)

    def __get_decision_cache(self):
        return self.__get_json(
            DecisionCacheSerializer(instance=DecisionCache.objects.filter(
                self._decision_filter),
                                    many=True).data)

    def __get_original_src(self):
        if self._decisions_ids:
            qs_filter = Q(reportcomponent__decision_id__in=self._decisions_ids)
        else:
            qs_filter = Q(reportcomponent__decision__job_id=self.job.id)
        sources = {}
        for src_arch in OriginalSources.objects.filter(qs_filter):
            sources[src_arch.identifier] = src_arch.archive.name
            self._arch_files.add(
                (src_arch.archive.path, src_arch.archive.name))
        return self.__get_json(sources)

    def __get_reports_data(self):
        reports = []
        for report in ReportComponent.objects.filter(self._decision_filter)\
                .select_related('parent', 'computer', 'original_sources', 'additional_sources').order_by('level'):
            report_data = DownloadReportComponentSerializer(
                instance=report).data

            # Add report files
            if report_data['log']:
                self._arch_files.add((report.log.path, report_data['log']))
            if report_data['verifier_files']:
                self._arch_files.add((report.verifier_files.path,
                                      report_data['verifier_files']))
            reports.append(report_data)

        return self.__get_json(reports)

    def __get_safes_data(self):
        safes_queryset = ReportSafe.objects.filter(
            self._decision_filter).select_related('parent').order_by('id')
        return self.__get_json(
            DownloadReportSafeSerializer(instance=safes_queryset,
                                         many=True).data)

    def __get_unsafes_data(self):
        reports = []
        for report in ReportUnsafe.objects.filter(
                self._decision_filter).select_related('parent').order_by('id'):
            report_data = DownloadReportUnsafeSerializer(instance=report).data
            if report_data['error_trace']:
                self._arch_files.add(
                    (report.error_trace.path, report_data['error_trace']))
            reports.append(report_data)
        return self.__get_json(reports)

    def __get_unknowns_data(self):
        reports = []
        for report in ReportUnknown.objects.filter(
                self._decision_filter).select_related('parent').order_by('id'):
            report_data = DownloadReportUnknownSerializer(instance=report).data
            if report_data['problem_description']:
                self._arch_files.add((report.problem_description.path,
                                      report_data['problem_description']))
            reports.append(report_data)
        return self.__get_json(reports)

    def __get_attrs_data(self):
        if self._decisions_ids:
            qs_filter = Q(report__decision_id__in=self._decisions_ids)
        else:
            qs_filter = Q(report__decision__job_id=self.job.id)

        attrs_data = {}
        for ra in ReportAttr.objects.filter(qs_filter).select_related(
                'data', 'report').order_by('id'):
            data = DownloadReportAttrSerializer(instance=ra).data
            if data['data_file']:
                self._arch_files.add((ra.data.file.path, data['data_file']))
            attrs_data.setdefault(ra.report.decision_id, {})
            attrs_data[ra.report.decision_id].setdefault(
                ra.report.identifier, [])
            attrs_data[ra.report.decision_id][ra.report.identifier].append(
                data)
        return self.__get_json(attrs_data)

    def __get_coverage_data(self):
        if self._decisions_ids:
            qs_filter = Q(report__decision_id__in=self._decisions_ids)
        else:
            qs_filter = Q(report__decision__job_id=self.job.id)

        coverage_data = []
        for carch in CoverageArchive.objects.filter(qs_filter).select_related(
                'report').order_by('id'):
            coverage_data.append({
                'decision': carch.report.decision_id,
                'report': carch.report.identifier,
                'identifier': carch.identifier,
                'archive': carch.archive.name,
                'name': carch.name
            })
            self._arch_files.add((carch.archive.path, carch.archive.name))
        return self.__get_json(coverage_data)

    def __add_additional_sources(self):
        for src_arch in AdditionalSources.objects.filter(
                self._decision_filter):
            self._arch_files.add(
                (src_arch.archive.path, src_arch.archive.name))

    def __get_json(self, data):
        return json.dumps(data, ensure_ascii=False, sort_keys=True, indent=2)
Exemplo n.º 10
0
class FilesForCompetitionArchive:
    obj_attr = 'Program fragment'
    requirement_attr = 'Requirement'

    def __init__(self, job, filters):
        try:
            self.root = ReportRoot.objects.get(job=job)
        except ObjectDoesNotExist:
            raise BridgeException(_('The job is not decided'))
        self._attrs = self.__get_attrs()
        self._archives = self.__get_archives()
        self.filters = filters
        self._archives_to_upload = []
        self.__get_archives_to_upload()
        self.stream = ZipStream()

    def __iter__(self):
        cnt = 0
        names_in_use = set()
        for arch_path, name_pattern in self._archives_to_upload:

            # TODO: original extension (currently it's supposed that verification files are zip archives)
            if name_pattern in names_in_use:
                cnt += 1
                arch_name = '%s_%s.zip' % (name_pattern, cnt)
            else:
                arch_name = '%s.zip' % name_pattern

            for data in self.stream.compress_file(arch_path, arch_name):
                yield data

        yield self.stream.close_stream()

    def __get_archives(self):
        archives = {}
        for c in ReportComponent.objects.filter(root=self.root, verification=True).exclude(verifier_input='')\
                .only('id', 'verifier_input'):
            if c.verifier_input:
                archives[c.id] = c.verifier_input.path
        return archives

    def __get_attrs(self):
        names = {}
        for a_name in AttrName.objects.filter(name__in=[self.obj_attr, self.requirement_attr]):
            names[a_name.id] = a_name.name

        attrs = {}
        # Select attributes for all safes, unsafes and unknowns
        for r_id, n_id, a_val in ReportAttr.objects.filter(report__root=self.root, attr__name_id__in=names)\
                .exclude(report__reportunsafe=None, report__reportsafe=None, report__reportunknown=None)\
                .values_list('report_id', 'attr__name_id', 'attr__value'):
            if r_id not in attrs:
                attrs[r_id] = {}
            attrs[r_id][names[n_id]] = a_val

        return attrs

    def __add_archive(self, r_type, r_id, p_id):
        if p_id in self._archives and r_id in self._attrs \
                and self.obj_attr in self._attrs[r_id] \
                and self.requirement_attr in self._attrs[r_id]:

            ver_obj = self._attrs[r_id][self.obj_attr].replace('~', 'HOME').replace('/', '---')
            ver_requirement = self._attrs[r_id][self.requirement_attr].replace(':', '-')
            dirname = 'Unknowns' if r_type == 'f' else 'Unsafes' if r_type == 'u' else 'Safes'

            self._archives_to_upload.append(
                (self._archives[p_id], '{0}/{1}__{2}__{3}'.format(dirname, r_type, ver_requirement, ver_obj))
            )

    def __get_archives_to_upload(self):
        for f_t in self.filters:
            if isinstance(f_t, list) and f_t:
                for problem in f_t:
                    comp_id, problem_id = problem.split('_')[0:2]
                    if comp_id == problem_id == '0':
                        queryset = ReportUnknown.objects.annotate(mr_len=Count('markreport_set'))\
                            .filter(root=self.root, mr_len=0).exclude(parent__parent=None)\
                            .values_list('id', 'parent_id')
                    else:
                        queryset = ReportUnknown.objects \
                            .filter(root=self.root, markreport_set__problem_id=problem_id, component_id=comp_id)\
                            .exclude(parent__parent=None).values_list('id', 'parent_id')
                    for args in queryset:
                        self.__add_archive('f', *args)
            else:
                model = ReportUnsafe if f_t == 'u' else ReportSafe if f_t == 's' else ReportUnknown
                for args in model.objects.filter(root=self.root).exclude(parent__parent=None)\
                        .values_list('id', 'parent_id'):
                    self.__add_archive('f' if isinstance(f_t, list) else f_t, *args)