def check_export(tmpdir, ds, variant_name, csv_fname): if OVERWRITE_EXPECTED_FILES: out_fname = csv_fname else: out_fname = os.path.join(tmpdir, '%s.csv' % variant_name) variant = ds._get_export_variant(variant_name) assert variant ds.export(out_fname, '*.csv', variant, MockWorker()) csv = load_raw_csv(csv_fname) expected_cols = csv_columns(csv[0], csv[1:]) csv = load_raw_csv(out_fname) cols = csv_columns(csv[0], csv[1:]) keys_expected = set(expected_cols.keys()) keys = set(cols.keys()) common_keys = keys_expected & keys all_keys = keys_expected | keys assert 2 * len(common_keys) >= len(all_keys) # at least 1/2 common keys # run with pytest -s instead #print(common_keys) for key in [ 'subject' ] + sorted(common_keys): # sort to make error locations deterministic assert len(expected_cols[key]) == len(cols[key]), key assert expected_cols[key] == cols[key], key
def test_consistency_analysis(tmpdir, name, alts, subj_count): rows = load_raw_csv('docs/src/_static/examples/%s.csv' % name) ds = ExperimentalData.from_csv('dataset', rows[1:], (0, 1, None, 2)) assert ds.alternatives == alts.split() assert len(ds.subjects) == subj_count dsc = ds.analysis_consistency(MockWorker(), None) assert len(dsc.subjects) == len(ds.subjects) check_export(tmpdir, dsc, 'summary', 'gui/test/expected/%s-cons-summary.csv' % name) check_export(tmpdir, dsc, 'congruence violations (wide)', 'gui/test/expected/%s-cons-garp.csv' % name) check_export(tmpdir, dsc, 'strict general cycles (wide)', 'gui/test/expected/%s-cons-sarp.csv' % name) check_export(tmpdir, dsc, 'strict binary cycles (wide)', 'gui/test/expected/%s-cons-sarp-bin.csv' % name) check_export(tmpdir, dsc, 'binary cycles (wide)', 'gui/test/expected/%s-cons-garp-bin.csv' % name) dst_menus = ds.analysis_tuple_intrans_menus(MockWorker(), None) dst_alts = ds.analysis_tuple_intrans_alts(MockWorker(), None) assert len(dst_menus.subjects) == len(ds.subjects) assert len(dst_alts.subjects) == len(ds.subjects)
def load_from_csv(fname : str) -> Budgetary: lines = dataset.load_raw_csv(fname) if not lines: raise BudgetaryError("the CSV file is empty") header, *rows = lines if (len(header)-1) % 2 != 0: raise BudgetaryError("budgetary datasets should have an even number of numeric columns") n_alts = (len(header)-1) // 2 alternatives = [f'{i+1}' for i in range(n_alts)] subjects : Dict[str,Tuple[List[np.array],List[np.array]]] = dict() # ordered for line_no, row in enumerate(rows, start=2): if len(row) != len(header): raise BudgetaryError(f'{fname}, line {line_no}: incorrect number of columns') subj_name, *cols = row if subj_name not in subjects: subjects[subj_name] = ([], []) prices, amounts = subjects[subj_name] prices.append(np.array([float(x) for x in cols[:n_alts]], dtype=np.float32)) amounts.append(np.array([float(x) for x in cols[n_alts:]], dtype=np.float32)) ds = Budgetary(os.path.basename(fname), alternatives) ds.subjects = [ Subject(name=n, prices=np.vstack(ps), amounts=np.vstack(ams)) for (n,(ps,ams)) in subjects.items() ] ds.update_nr_observations() return ds
def run(self): fname: Optional[str] = None def work(): assert fname is not None ds = self.make_dataset(name=os.path.basename(fname)) self.main_win.add_dataset(ds) fname, _something = QFileDialog.getOpenFileName( self, "Import CSV", filter="CSV files (*.csv)") if not fname: return rows = dataset.load_raw_csv(fname) if not rows: QMessageBox.warning( self, "CSV import", "The input file seems to be empty", ) return self.fill_rows(rows) self.accepted.connect(self.catch_exc(work)) self.exec_()
def test_integrity(tmpdir): rows = load_raw_csv('docs/src/_static/examples/integrity.csv') ds = ExperimentalData.from_csv('dataset', rows[1:], (0, 1, None, 2)) nds = ds.analysis_integrity_check(MockWorker(), None) assert isinstance(nds, dataset.integrity_check.IntegrityCheck) assert len(nds.subjects) == 1 assert nds.subjects[0].name == 'a' assert nds.subjects[0].issues == [ dataset.integrity_check.RepeatedMenu(menu={0, 1}, ), dataset.integrity_check.ChoiceNotInMenu( menu={0, 1}, choice=2, ), ]
def estimate(args): rows = load_raw_csv(args.fname_in) ds = ExperimentalData.from_csv('dataset', rows[1:], (0, 1, None, 2)) AVAILABLE_MODELS = [ preorder(strict=True, total=True), preorder(strict=False, total=True), unattractive(strict=True, total=True), unattractive(strict=False, total=True), preorder(strict=True, total=False), preorder(strict=False, total=False), UndominatedChoice(strict=True), UndominatedChoice(strict=False), PartiallyDominantChoice(fc=True), PartiallyDominantChoice(fc=False), Overload(PreorderParams(strict=True, total=True)), Overload(PreorderParams(strict=False, total=True)), StatusQuoUndominatedChoice(), TopTwo(), SequentiallyRationalizableChoice(), Swaps(), ] if not args.models: print('Please specify a model using -m:') for m in AVAILABLE_MODELS: print(' ' + str(m)) sys.exit(1) if args.models == 'all': models = AVAILABLE_MODELS else: models = [m for m in AVAILABLE_MODELS if str(m) in args.models] if not models: raise Exception('bad model spec') dsm = ds.analysis_estimation( ProgressWorker(), EstimationOpts( models=models, disable_parallelism=args.sequential, disregard_deferrals=args.disregard_deferrals, )) variant = dsm._get_export_variant(args.export_variant) dsm.export(args.fname_out, '*.csv', variant, MockWorker())
def test_model_estimation(tmpdir, name, alts, subj_count): indices: Tuple[Optional[int], ...] if name in ('status-quo', ): indices = (0, 1, 2, 3) else: indices = (0, 1, None, 2) rows = load_raw_csv('docs/src/_static/examples/%s.csv' % name) ds = ExperimentalData.from_csv('aug', rows[1:], indices) models = [ preorder(strict=True, total=True), preorder(strict=False, total=True), unattractive(strict=True, total=True), unattractive(strict=False, total=True), preorder(strict=True, total=False), preorder(strict=False, total=False), UndominatedChoice(strict=True), UndominatedChoice(strict=False), PartiallyDominantChoice(fc=True), PartiallyDominantChoice(fc=False), Overload(PreorderParams(strict=True, total=True)), Overload(PreorderParams(strict=False, total=True)), ] if all(cr.default is not None for subj in map(SubjectC.decode_from_memory, ds.subjects) for cr in subj.choices): models.append(StatusQuoUndominatedChoice()) dsm = ds.analysis_estimation( MockWorker(), EstimationOpts(models, disable_parallelism=False, disregard_deferrals=False)) check_export(tmpdir, dsm, 'compact (human-friendly)', 'gui/test/expected/%s-models-compact.csv' % name) check_export(tmpdir, dsm, 'detailed (machine-friendly)', 'gui/test/expected/%s-models-detailed.csv' % name)
def consistency(args): rows = load_raw_csv(args.fname_in) ds = ExperimentalData.from_csv('dataset', rows[1:], (0, 1, None, 2)) dsm = ds.analysis_consistency(ProgressWorker(), None) variant = dsm._get_export_variant(args.export_variant) dsm.export(args.fname_out, '*.csv', variant, MockWorker())