def infer(self, model: BaseSpace, dataset, mask="train"): boxmodel = model.wrap() self.trainer = NodeClassificationFullTrainer( model=boxmodel, optimizer=torch.optim.Adam, lr=0.005, max_epoch=300, early_stopping_round=30, weight_decay=5e-4, device="auto", init=False, feval=self.evaluation, loss=self.loss_f, lr_scheduler_type=None, ) try: self.trainer.train(dataset) with torch.no_grad(): return self.estimator.infer(boxmodel.model, dataset, mask) except RuntimeError as e: if "cuda" in str(e) or "CUDA" in str(e): INF = 100 fin = [ -INF if eva.is_higher_better else INF for eva in self.evaluation ] return fin, 0 else: raise e
class TrainEstimator(BaseEstimator): """ An estimator which trans from scratch Parameters ---------- loss_f : str The name of loss funciton in PyTorch evaluation : list of Evaluation The evaluation metrics in module/train/evaluation """ def __init__(self, loss_f="nll_loss", evaluation=[Acc()]): super().__init__(loss_f, evaluation) self.evaluation = evaluation self.estimator = OneShotEstimator(self.loss_f, self.evaluation) def infer(self, model: BaseSpace, dataset, mask="train"): boxmodel = model.wrap() self.trainer = NodeClassificationFullTrainer( model=boxmodel, optimizer=torch.optim.Adam, lr=0.005, max_epoch=300, early_stopping_round=30, weight_decay=5e-4, device="auto", init=False, feval=self.evaluation, loss=self.loss_f, lr_scheduler_type=None, ) try: self.trainer.train(dataset) with torch.no_grad(): return self.estimator.infer(boxmodel.model, dataset, mask) except RuntimeError as e: if "cuda" in str(e) or "CUDA" in str(e): INF = 100 fin = [ -INF if eva.is_higher_better else INF for eva in self.evaluation ] return fin, 0 else: raise e
def test_node_trainer(): dataset = build_dataset_from_name("cora") dataset = to_pyg_dataset(dataset) node_trainer = NodeClassificationFullTrainer( model='gcn', init=False, lr=1e-2, weight_decay=5e-4, max_epoch=200, early_stopping_round=200, ) node_trainer.num_features = dataset[0].x.size(1) node_trainer.num_classes = dataset[0].y.max().item() + 1 node_trainer.initialize() print(node_trainer.encoder.encoder) print(node_trainer.decoder.decoder) node_trainer.train(dataset, True) result = node_trainer.evaluate(dataset, "test", "acc") print("Acc:", result)
accs = [] model_hp, decoder_hp = get_encoder_decoder_hp(args.model) for seed in tqdm(range(args.repeat)): set_seed(seed) trainer = NodeClassificationFullTrainer( model=args.model, num_features=num_features, num_classes=num_classes, device=args.device, init=False, feval=['acc'], loss="nll_loss", ).duplicate_from_hyper_parameter({ "trainer": { "max_epoch": args.epoch, "early_stopping_round": args.epoch + 1, "lr": args.lr, "weight_decay": args.weight_decay, }, "encoder": model_hp, "decoder": decoder_hp }) trainer.train(dataset, False) output = trainer.predict(dataset, 'test') acc = (output == label[test_mask]).float().mean().item() accs.append(acc) print('{:.4f} ~ {:.4f}'.format(np.mean(accs), np.std(accs)))
import os os.environ["AUTOGL_BACKEND"] = "dgl" from autogl.datasets import build_dataset_from_name from autogl.solver import AutoNodeClassifier from autogl.module.train import NodeClassificationFullTrainer from autogl.backend import DependentBackend key = "y" if DependentBackend.is_pyg() else "label" cora = build_dataset_from_name("cora") solver = AutoNodeClassifier(graph_models=("gin", ), default_trainer=NodeClassificationFullTrainer( decoder=None, init=False, max_epoch=200, early_stopping_round=201, lr=0.01, weight_decay=0.0, ), hpo_module=None, device="auto") solver.fit(cora, evaluation_method=["acc"]) result = solver.predict(cora) print((result == cora[0].nodes.data[key][ cora[0].nodes.data["test_mask"]].cpu().numpy()).astype('float').mean())