def test_dataloader(self):
     if self.testset is not None:
         return GeoLoader(self.testset, batch_size=1, num_workers=1)
     else:
         return None
 def val_dataloader(self):
     if self.valset is not None:
         num_val_workers = 0 if ("gpus" in self.hparams.keys() and self.hparams["gpus"] > 0) else 8
         return GeoLoader(self.valset, batch_size=1, num_workers=num_val_workers)
     else:
         return None
 def val_dataloader(self):
     print(self.valset)
     if self.valset is not None:
         return GeoLoader(self.valset, batch_size=1, num_workers=1)
     else:
         return None
 def train_dataloader(self):
     if self.trainset is not None:
         return GeoLoader(self.trainset, batch_size=1, num_workers=8)
     else:
         return None
Ejemplo n.º 5
0
            "acc_top1": [],
            "acc_top5": [],
        }
    }
    ce = nn.CrossEntropyLoss()

    batch_sampler = data_torch.get_directory_batch_sampler(train_set, batch_size)

    from pprint import pprint as pp
    update = 0
    for batch in batch_sampler:
        net.train()
        batch_data = train_set.get_multiple(batch)
        goals = list(map(lambda x: x[0]['goal_ids'], batch_data))
        tactics = list(map(lambda x: x[1]['tac_id'], batch_data))
        geo_loader = GeoLoader(goals, batch_size=batch_size)
        tactic_tensor = torch.cat(tactics, 0)
        # print(tactic_tensor)
        for graphs in geo_loader:
            graphs.to(device)
            tactic_tensor = tactic_tensor.to(device)
            update += 1
            optimizer.zero_grad()
            out = net(graphs)
            loss = ce(out, tactic_tensor)
            loss.backward()
            optimizer.step()

        if update % 10 == 1:
            print("*"* 100)
            print("Train loss", loss.cpu().item())