Exemple #1
0
class ModelWrapper(object):
    def __init__(self, config, data):
        self.config = config
        self.data = data  # data loader
        self.model = BaseModel(config).to(config.device)
        self.loss = nn.BCEWithLogitsLoss(reduction="sum")

    # save function that saves the checkpoint in the path defined in the config file
    def save(self, best: bool, epoch: int, optimizer: torch.optim.Optimizer):
        filename = 'best.tar' if best else 'last.tar'
        print("Saving model as {}...".format(filename), end=' ')
        torch.save({'epoch': epoch,
                    'model_state_dict': self.model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict()},
                   os.path.join(self.config.checkpoint_dir, filename))
        print("Model saved.")

    # load latest checkpoint from the experiment path defined in the config file
    def load(self, best: bool):
        """
        :param best: boolean, to load best model or last checkpoint
        :return: tuple of optimizer_state_dict, epoch
        """
        # self.model = models.base_model.BaseModel(self.config).to('cuda')
        filename = 'best.tar' if best else 'last.tar'
        print("Loading {}...".format(filename), end=' ')
        checkpoint = torch.load(os.path.join(self.config.checkpoint_dir, filename))
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.model.to(torch.device('cuda'))
        print("Model loaded.")

        return checkpoint['optimizer_state_dict'], checkpoint['epoch']

    def loss_and_results(self, scores, labels):
        loss = self.loss(scores, labels.float())
        pred_scores = torch.sigmoid(scores).detach().cpu()
        # correct_predictions = torch.eq(torch.argmax(scores, dim=1), labels).sum().cpu().item()
        correct_predictions = torch.eq((pred_scores > 0.5).clone().detach().float(), labels.cpu()).sum().cpu().item()

        return loss, correct_predictions, pred_scores.numpy()

    def run_model_get_loss_and_results(self, input, labels):
        result = self.loss_and_results(self.model(input), labels)
        return result

    def train(self):
        self.model.train()

    def eval(self):
        self.model.eval()
Exemple #2
0
def test(model: BaseModel, dataset, device, tracker: BaseTracker, checkpoint: ModelCheckpoint, log):
    model.eval()
    tracker.reset("test")
    loader = dataset.test_dataloader()
    with Ctq(loader) as tq_test_loader:
        for data in tq_test_loader:
            data = data.to(device)
            with torch.no_grad():
                model.set_input(data)
                model.forward()

            tracker.track(model)
            tq_test_loader.set_postfix(**tracker.get_metrics(), color=COLORS.TEST_COLOR)

    metrics = tracker.publish()
    tracker.print_summary()
    checkpoint.save_best_models_under_current_metrics(model, metrics)
Exemple #3
0
    opts.render_ids = test_ops.render_ids
    opts.gpu_ids = test_ops.gpu_ids

    torch_devices = [int(gpu_id.strip()) for gpu_id in opts.gpu_ids.split(",")]
    print(torch_devices)
    device = "cuda:" + str(torch_devices[0])

    if "sync" in opts.norm_G:
        model = convert_model(model)
        model = nn.DataParallel(model, torch_devices).to(device)
    else:
        model = nn.DataParallel(model, torch_devices).to(device)

    #  Load the original model to be tested
    model_to_test = BaseModel(model, opts)
    model_to_test.eval()

    # Allow for different image sizes
    state_dict = model_to_test.state_dict()
    pretrained_dict = {
        k: v
        for k, v in torch.load(MODEL_PATH)["state_dict"].items()
        if not ("xyzs" in k) and not ("ones" in k)
    }
    state_dict.update(pretrained_dict)

    model_to_test.load_state_dict(state_dict)

    print(opts)
    # Update parameters
    opts.render_ids = test_ops.render_ids
class ModelWrapper(object):
    def __init__(self, config, data):
        self.config = config
        self.data = data
        self.model = BaseModel(config).cuda()

    # save function that saves the checkpoint in the path defined in the config file
    def save(self, best: bool, epoch: int, optimizer: torch.optim.Optimizer):
        filename = 'best.tar' if best else 'last.tar'
        print("Saving model as {}...".format(filename), end=' ')
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': self.model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict()
            }, os.path.join(self.config.checkpoint_dir, filename))
        print("Model saved.")

    # load latest checkpoint from the experiment path defined in the config file
    def load(self, best: bool):
        """
        :param best: boolean, to load best model or last checkpoint
        :return: tuple of optimizer_state_dict, epoch
        """
        # self.model = models.base_model.BaseModel(self.config).to('cuda')
        filename = 'best.tar' if best else 'last.tar'
        print("Loading {}...".format(filename), end=' ')
        checkpoint = torch.load(
            os.path.join(self.config.checkpoint_dir, filename))
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.model.to(torch.device('cuda'))
        print("Model loaded.")

        return checkpoint['optimizer_state_dict'], checkpoint['epoch']

    def loss_and_results(self, scores, labels):
        """
        :param scores: shape NxC
        :param labels: shape Nx1 for classification, shape NxC for regression (QM9)
        :return: tuple of (loss tensor, dists numpy array) for QM9
                          (loss tensor, number of correct predictions) for classification graphs
        """
        if self.config.dataset_name == 'QM9':
            differences = (scores - labels).abs().sum(dim=0)
            loss = differences.sum()
            dists = differences.detach().cpu().numpy()
            return loss, dists
        else:
            loss = F.cross_entropy(scores, labels, reduction='sum')
            correct_predictions = torch.eq(torch.argmax(scores, dim=1),
                                           labels).sum().cpu().item()
            return loss, correct_predictions

    def run_model_get_loss_and_results(self, input, labels):
        return self.loss_and_results(self.model(input), labels)

    def train(self):
        self.model.train()

    def eval(self):
        self.model.eval()