# "evaluated": False
            }
        },
        {"$sort": {"dataset_name": +1, "test_temperature": -1}}
    ]
)

result = list(result)
for r in result:
    del r["evaluated_model_created_at"]
    del r["evaluated_model_updated_at"]

model_identifier_dicts = [SimpleNamespace(**record) for record in result]

print(len(model_identifier_dicts))

for md in model_identifier_dicts:
    print(md)

input("continue?")

EvaluatorClass = RealWorldEvaluator
tst = None
for model_identifier in model_identifier_dicts:
    if tst is None or ev.dm_name != model_identifier.dataset_name:
        _, _, tst, TEXT = load_real_dataset(model_identifier.dataset_name)
        ev = EvaluatorClass(None, None, tst, parser=TEXT,
                            mode="eval", dm_name=model_identifier.dataset_name)
    print('********************* evaluating {} *********************'.format(model_identifier))
    ev.final_evaluate(model_identifier)
Ejemplo n.º 2
0
    def get_persample_nll(self, samples, samples_loc, temperature):
        return np.ones(len(samples)) * np.inf

    def get_saving_path(self):
        return self.saving_path

    def get_name(self):
        return 'vae'

    def load(self):
        from previous_works.rvae.train import load as lo
        lo(self.model)

    def reset_model(self):
        from previous_works.rvae.train import create_model as crm
        self.model = crm(self.parameters)


if __name__ == "__main__":
    from data_management.data_manager import load_real_dataset

    train_ds, valid_ds, test_ds, parser = load_real_dataset('ptb')
    print(train_ds[0].text)

    m = VAE(parser)
    m.delete_saved_model()
    m.init_model((train_ds.text, valid_ds.text))
    m.train()
    m.load()
    m.delete()
Ejemplo n.º 3
0
        model_name=args.model_names[i],
        run=args.runs[i],
        train_temperature=args.train_temperatures[i],
        test_temperature=args.test_temperatures[i],
        restore_type=args.restore_types[i],
    )
    for i in range(len(args.model_names))
]


Evaluator.update_config(args.data)

if args.mode == 'real':
    from evaluators.real_evaluator import RealWorldEvaluator
    EvaluatorClass = RealWorldEvaluator
    trn, vld, tst, TEXT = load_real_dataset(args.data)
elif args.mode == 'oracle':
    from evaluators.oracle_evaluator import OracleEvaluator
    EvaluatorClass = OracleEvaluator,
    trn, vld, tst, TEXT = load_oracle_dataset()


if args.runs is not None:
    assert not (True in [run >= Evaluator.TOTAL_RUNS for run in args.runs])


if args.action == 'train':
    del tst
    for model_identifier in model_identifier_dicts:
        print('********************* training {} *********************'.format(model_identifier))
        print(len(trn), len(vld))
    def delete_saved_model(self):
        if os.path.exists(self.get_saving_path()):
            shutil.rmtree(self.get_saving_path())
            print("saved model at %s deleted!" % self.get_saving_path())
        os.mkdir(self.get_saving_path())
        print("saving path created at %s!" % self.get_saving_path())

    def get_saving_path(self):
        pass

    def load(self):
        pass

    def get_name(self):
        return self.__class__.__name__


class DummyModel(BaseModel):
    def __init__(self, model_identifier: SimpleNamespace,
                 parser: ReversibleField):
        super().__init__(model_identifier, parser)
        self.model_name = model_identifier.model_name.lower()

    def get_name(self):
        return self.model_name


if __name__ == '__main__':
    train_ds, valid_ds, test_ds, TEXT = load_real_dataset('coco')
    print(train_ds[0].text)