def make_cluster_dataloader(self, X, y, shuffle): # return a data loader based on ClusterGCN dataset = ClusterDataset(X, y, self.edge_index, self.edge_weight, num_nodes=self.hparams.num_nodes, batch_size=batch_size, shuffle=shuffle) return DataLoader(dataset, batch_size=None)
opt = parser.parse_args() if opt.clusters < 0: # === pre-clustered environment according to environment file =========== from room_dataset import RoomDataset testset = RoomDataset("test", training=False) ensemble = ExpertEnsemble(testset.num_experts) else: # === large, connected environment, perform clustering ================== from cluster_dataset import ClusterDataset testset = ClusterDataset("test", num_clusters=opt.clusters, training=False) ensemble = ExpertEnsemble( testset.num_experts, gating_capacity=2 ) # for clustering environments we use a gating network with higher capacity testset_loader = torch.utils.data.DataLoader(testset, shuffle=False, num_workers=6) if opt.testrefined: # load individual, refined experts ensemble.load_experts(opt.session, True) elif opt.testinit:
help='robust square root loss after this threshold') opt = parser.parse_args() if opt.clusters < 0: # === pre-clustered environment according to environment file =========== from dataset import RoomDataset trainset = RoomDataset("training", scene=opt.expert) else: # === large, connected environment, perform clustering ================== from cluster_dataset import ClusterDataset trainset = ClusterDataset("training", num_clusters=opt.clusters, cluster=opt.expert) trainset_loader = torch.utils.data.DataLoader(trainset, shuffle=True, num_workers=6) model = Expert(torch.zeros((3, ))) model.load_state_dict( torch.load('expert_e%d_%s.net' % (opt.expert, opt.session))) print("Successfully loaded model.") model.cuda() model.train()
if opt.clusters < 0: # === pre-clustered environment according to environment file =========== from room_dataset import RoomDataset trainset = RoomDataset("training", training=True) ensemble = ExpertEnsemble(trainset.num_experts, lr=opt.learningrate) else: # === large, connected environment, perform clustering ================== from cluster_dataset import ClusterDataset trainset = ClusterDataset("training", num_clusters=opt.clusters, training=True) ensemble = ExpertEnsemble( trainset.num_experts, lr=opt.learningrate, gating_capacity=2 ) # for clustering environments we use a gating network with higher capacity trainset_loader = torch.utils.data.DataLoader(trainset, shuffle=True, num_workers=6) ensemble.load_experts(opt.session, opt.refined) if opt.expertselection: model_file = 'es_%s.net' % (opt.session) train_log = open('log_es_%s.txt' % (opt.session), 'w', 1) else: model_file = 'esac_%s.net' % (opt.session)