def init_model(self, model_type="mpad", lr=0.1, **kwargs): # Store model type self.model_type = model_type.lower() # Initiate model if model_type.lower() == "mpad": self.model = MPAD(**kwargs) else: raise AssertionError("Currently only MPAD is supported as model") self.model_args = kwargs self.optimizer = optim.Adam(self.model.parameters(), lr=lr) self.scheduler = optim.lr_scheduler.StepLR( self.optimizer, step_size=50, gamma=0.5 ) self.criterion = torch.nn.CrossEntropyLoss()
y_val = [y[i] for i in val_index] adj_test = [adj[i] for i in test_index] features_test = [features[i] for i in test_index] y_test = [y[i] for i in test_index] adj_train, features_train, batch_n_graphs_train, y_train = generate_batches(adj_train, features_train, y_train, args.batch_size, args.use_master_node) adj_val, features_val, batch_n_graphs_val, y_val = generate_batches(adj_val, features_val, y_val, args.batch_size, args.use_master_node) adj_test, features_test, batch_n_graphs_test, y_test = generate_batches(adj_test, features_test, y_test, args.batch_size, args.use_master_node) n_train_batches = ceil(n_train/args.batch_size) n_val_batches = ceil(n_val/args.batch_size) n_test_batches = ceil(n_test/args.batch_size) # Model and optimizer model = MPAD(embeddings.shape[1], args.message_passing_layers, args.hidden, args.penultimate, nclass, args.dropout, embeddings, args.use_master_node) parameters = filter(lambda p: p.requires_grad, model.parameters()) optimizer = optim.Adam(parameters, lr=args.lr) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.5) if args.cuda: model.cuda() adj_train = [x.cuda() for x in adj_train] features_train = [x.cuda() for x in features_train] batch_n_graphs_train = [x.cuda() for x in batch_n_graphs_train] y_train = [x.cuda() for x in y_train] adj_val = [x.cuda() for x in adj_val] features_val = [x.cuda() for x in features_val] batch_n_graphs_val = [x.cuda() for x in batch_n_graphs_val] y_val = [x.cuda() for x in y_val]