示例#1
0
    def analysis_estimation(
            self, worker: Worker,
            options: gui.estimation.Options) -> EstimationResult:

        CHUNK_SIZE = 64
        with Core() as core:
            worker.interrupt = lambda: core.shutdown(
            )  # register interrupt hook

            rows: List[estimation_result.PackedResponse] = []
            worker.set_work_size(len(self.subjects))
            for i in range(0, len(self.subjects), CHUNK_SIZE):
                request = estimation_result.Request(
                    subjects=self.subjects[i:i + CHUNK_SIZE],
                    models=options.models,
                    disable_parallelism=options.disable_parallelism,
                )

                responses = core.call('estimation', estimation_result.RequestC,
                                      estimation_result.PackedResponsesC,
                                      request)
                rows.extend(responses)

                worker.set_progress(len(rows))

            ds = EstimationResult(
                self.name + ' (model est.)',
                self.alternatives,
            )
            ds.subjects = rows
            return ds
示例#2
0
    def load_from_file(self, worker: Worker, fname: str) -> None:
        with bz2.open(fname, 'rb') as f:
            f = cast(FileIn, f)  # assert we're doing input

            sig = f.read(len(PREST_SIGNATURE))
            if sig != PREST_SIGNATURE:
                raise PersistenceError('not a Prest workspace file')

            version = intC.decode(f)
            if version >= 3:
                prest_version = strC.decode(f)
            else:
                prest_version = None  # too old

            if version != FILE_FORMAT_VERSION:
                message = 'incompatible PWF version: expected {0}, received {1}'.format(
                    FILE_FORMAT_VERSION,
                    version,
                )

                if prest_version:
                    message += ' (saved by {0})'.format(prest_version)

                raise PersistenceError(message)

            work_size = intC.decode(f)
            worker.set_work_size(work_size)
            datasets = listCP(DatasetCP).decode(worker, f)

        # assign to self only once everything's gone all right
        self.datasets = datasets
示例#3
0
    def analysis_integrity_check(self, worker: Worker,
                                 _config: None) -> dataset.AnalysisResult:
        worker.set_work_size(len(self.subjects))

        subjects: List[dataset.integrity_check.Subject] = []

        with Core() as core:
            worker.interrupt = lambda: core.shutdown()

            for i, subject in enumerate(self.subjects):
                subj_issues = core.call('integrity-check', PackedSubjectC,
                                        dataset.integrity_check.SubjectC,
                                        subject)

                if subj_issues.issues:
                    subjects.append(subj_issues)

                worker.set_progress(i + 1)

        if subjects:
            ds = dataset.integrity_check.IntegrityCheck(
                self.name + ' (integrity check)', self.alternatives)
            ds.subjects = subjects
            return ds
        else:
            return dataset.ShowMessageBox(
                type=dataset.MessageBoxType.INFORMATION,
                title='Integrity check',
                message='No integrity issues found.',
            )
示例#4
0
    def analysis_merge_choices(self, worker: Worker,
                               _config: None) -> 'ExperimentalData':
        subjects: List[PackedSubject] = []
        observ_count: int = 0

        # we group by pairs (menu, default)
        MenuDef = Tuple[FrozenSet[int], Optional[int]]

        worker.set_work_size(len(self.subjects))
        for i, subject_packed in enumerate(self.subjects):
            subject = Subject.unpack(subject_packed)

            choices: List[ChoiceRow] = []
            menu_idx: Dict[MenuDef, int] = {}
            deferrals_seen: Set[MenuDef] = set()

            for cr in subject.choices:
                # deferrals are kept separately
                if not cr.choice:
                    if (cr.menu, cr.default) in deferrals_seen:
                        # this deferral has already been seen, just skip it
                        continue
                    else:
                        # this is the first time we've seen deferral at this menu
                        # add that deferral to the output
                        # but don't add it to the index
                        choices.append(cr)
                        deferrals_seen.add((cr.menu, cr.default))
                        observ_count += 1
                        continue

                idx = menu_idx.get((cr.menu, cr.default))
                if idx is None:
                    # not there yet
                    menu_idx[cr.menu, cr.default] = len(choices)
                    choices.append(cr)
                    observ_count += 1
                else:
                    # already there, expand the choice
                    choices[idx] = ChoiceRow(
                        menu=cr.menu,
                        default=cr.default,
                        choice=cr.choice | choices[idx].choice,
                    )

            subjects.append(
                Subject(
                    name=subject.name,
                    alternatives=subject.alternatives,
                    choices=choices,
                ).pack())
            worker.set_progress(i + 1)

        ds = ExperimentalData(name=self.name + ' (merged)',
                              alternatives=self.alternatives)
        ds.subjects = subjects
        ds.observ_count = observ_count
        return ds
示例#5
0
    def save_to_file(self, worker: Worker, fname: str) -> None:
        with bz2.open(fname, 'wb') as f:
            f = cast(FileOut, f)  # assert we're doing output

            f.write(PREST_SIGNATURE)
            intC.encode(f, FILE_FORMAT_VERSION)  # version
            strC.encode(f, branding.VERSION)

            work_size = listCP(DatasetCP).get_size(self.datasets)
            intC.encode(f, work_size)

            worker.set_work_size(work_size)
            listCP(DatasetCP).encode(worker, f, self.datasets)
示例#6
0
    def analysis_summary_stats(self, worker: Worker,
                               _config: None) -> ExperimentStats:
        subjects = []
        worker.set_work_size(len(self.subjects))

        with Core() as core:
            worker.interrupt = lambda: core.shutdown()

            for i, subject in enumerate(self.subjects):
                subjects.append(
                    core.call("summary", PackedSubjectC,
                              dataset.experiment_stats.SubjectC, subject))
                worker.set_progress(i + 1)

        ds = ExperimentStats(
            name=self.name + ' (info)',
            alternatives=self.alternatives,
        )
        ds.subjects = subjects
        return ds
示例#7
0
    def analysis_tuple_intrans_alts(self, worker: Worker,
                                    _config: None) -> TupleIntransAlts:
        subjects = []
        worker.set_work_size(len(self.subjects))

        with Core() as core:
            worker.interrupt = lambda: core.shutdown()

            for i, subject in enumerate(self.subjects):
                subjects.append(
                    core.call(
                        'tuple-intrans-alts',
                        PackedSubjectC,
                        dataset.tuple_intrans_alts.SubjectC,
                        subject,
                    ))
                worker.set_progress(i + 1)

        ds = TupleIntransAlts(self.name + ' (inconsistent alternative tuples)',
                              self.alternatives)
        ds.subjects = subjects
        return ds
示例#8
0
    def analysis_consistency(self, worker: Worker,
                             _config: None) -> ConsistencyResult:
        with Core() as core:
            worker.interrupt = lambda: core.shutdown()  # interrupt hook

            rows = []

            worker.set_work_size(len(self.subjects))
            for i, subject in enumerate(self.subjects):
                response = core.call('consistency', PackedSubjectC,
                                     dataset.consistency_result.SubjectRawC,
                                     subject)
                rows.append(response)

                worker.set_progress(i + 1)

        ds = ConsistencyResult(
            self.name + ' (consistency)',
            self.alternatives,
        )
        ds.load_from_core(rows)
        return ds
示例#9
0
    def analysis_simulation(
            self, worker: Worker,
            options: 'gui.copycat_simulation.Options') -> 'ExperimentalData':
        subjects: List[PackedSubject] = []

        with Core() as core:
            worker.interrupt = lambda: core.shutdown(
            )  # register interrupt hook

            worker.set_work_size(len(self.subjects) * options.multiplicity)
            position = 0
            for subject_packed in self.subjects:
                for j in range(options.multiplicity):
                    response = simulation.run(
                        core,
                        simulation.Request(
                            name='random%d' % (j + 1),
                            alternatives=self.
                            alternatives,  # we don't use subject.alternatives here
                            gen_menus=simulation.GenMenus(
                                generator=simulation.Copycat(subject_packed),
                                defaults=False,  # this will be ignored, anyway
                            ),
                            gen_choices=options.gen_choices,
                            preserve_deferrals=options.preserve_deferrals,
                        ))

                    subjects.append(response.subject_packed)

                    position += 1
                    if position % 1024 == 0:
                        worker.set_progress(position)

        ds = ExperimentalData(name=options.name,
                              alternatives=self.alternatives)
        ds.subjects = subjects
        ds.observ_count = options.multiplicity * self.observ_count
        return ds
示例#10
0
    def export(self, fname: str, fformat: str, variant: ExportVariant,
               worker: Worker) -> None:
        worker.set_work_size(variant.size)
        position = 0

        if '*.csv' in fformat:
            with open(fname, 'w') as f:
                # Python's CSV module and line endings is a mess.
                #
                # Opening the file in binary mode doesn't work (writerow() fails).
                # In text mode, you get \r\r\n on Windows.
                #
                # Hence we force the line terminator here to be '\n', on all platforms
                # and leave the line ending translation to the underlying /file/ layer.
                #
                w = csv.writer(f, quoting=csv.QUOTE_ALL, lineterminator='\n')
                w.writerow(variant.column_names)
                for row in variant.get_rows():
                    if row:
                        w.writerow(row)
                    else:
                        # progress
                        position += 1
                        worker.set_progress(position)

        elif '*.xlsx' in fformat:
            wb = openpyxl.Workbook()
            wb.properties.creator = branding.PREST_VERSION
            ws = wb.active

            ws.append(variant.column_names)
            for row in variant.get_rows():
                if row:
                    ws.append(row)
                else:
                    # progress
                    position += 1
                    worker.set_progress(position)

            # autosize columns
            # who knows what the units are but it approximately fits
            # furthermore, we fudge the numbers by 1 unit because that looks better
            for column_number, column_cells in enumerate(ws.columns, start=1):
                length = max(
                    (len(str(cell.value or '')) for cell in column_cells),
                    default=5) + 1

                if length < 4:
                    length = 4

                ws.column_dimensions[openpyxl.utils.cell.get_column_letter(
                    column_number)].width = length

            wb.save(fname)

        else:
            raise Exception('unknown file export format: %s' % fformat)
示例#11
0
    def analysis_consistency(self, worker : Worker, _config : None) -> BudgetaryConsistency:
        with Core() as core:
            worker.interrupt = lambda: core.shutdown()  # interrupt hook

            rows = []

            worker.set_work_size(len(self.subjects))
            for i, subject in enumerate(self.subjects):
                response = core.call(
                    'budgetary-consistency',
                    SubjectC,
                    dataset.budgetary_consistency.SubjectC,
                    subject
                )
                rows.append(response)

                worker.set_progress(i+1)

        ds = BudgetaryConsistency(
            self.name + ' (consistency)',
            self.alternatives,
            rows,
        )
        return ds
示例#12
0
 def decode(worker: Worker, f: FileIn) -> Any:
     result = dec(f)
     worker.step()
     return result
示例#13
0
 def encode(worker: Worker, f: FileOut, x: Any) -> None:
     enc(f, x)
     worker.step()