def test_rmse(model, data_obj=None, device="cpu"):
    if torch.cuda.is_available():
        device = torch.device(torch.cuda.current_device())
        
    dset_test = data_obj.load_test_data()

    test_loader = DataframeDataLoader(
        dset_test,
        batch_size=4,
        shuffle=False,
    )

    SSE = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            inputs, targets = data
            inputs = Variable(inputs.permute(0, 2, 1).cuda()).contiguous()
            targets = Variable(targets.cuda())
            
            # inputs, targets = inputs.to(device), Variable(targets.cuda()).to(device)
            
            outputs = model(inputs).squeeze()
            total += targets.size(0)
            SSE += np.sum(np.power((outputs - targets).cpu().numpy(), 2))

    return (np.sqrt(SSE/total))
Ejemplo n.º 2
0
def predict_cgm(data_obj, model: nn.Module) -> np.ndarray:

    dset_test = data_obj.load_test_data()

    test_loader = DataframeDataLoader(
        dset_test,
        batch_size=8,
        shuffle=False,
    )
    device = "cpu"
    if torch.cuda.is_available():
        device = "cuda:0"
        if torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
    model = model.to(device)
    model.eval()
    outputs = []
    #loss = 0
    with torch.no_grad():
        for (data, target) in test_loader:
            data = Variable(data.permute(0, 2, 1)).contiguous()
            data = data.to(device)
            target = Variable(target.unsqueeze_(1))
            output = model(data)

            outputs.append(output.detach().cpu())
    return np.concatenate(outputs).squeeze()
    def __init__(self, dataObject, model: nn.Module):

        # Extract test data from data obejct
        self.test_set = dataObject.load_test_data()

        self.n_elements = count_iterable(self.test_set)
        self.test_loader = DataframeDataLoader(
            self.test_set,
            batch_size=self.n_elements,
            shuffle=False,
        )
        self.test_df = self.test_loader.sample_dataframe
        self.test_predictions_delta = predict_cgm(dataObject, model)
        self.test_predictions_absolute = self.test_predictions_delta + self.test_df[
            'CGM']
def predict_cgm(data_obj, model: nn.Module) -> np.ndarray:

    dset_test = data_obj.load_test_data()

    test_loader = DataframeDataLoader(
        dset_test,
        batch_size=8,
        shuffle=False,
    )

    model.eval()
    outputs = []
    #loss = 0
    with torch.no_grad():
        for (data, target) in test_loader:
            data = Variable(data.permute(0, 2, 1)).contiguous()
            target = Variable(target.unsqueeze_(1))
            output = model(data)

            outputs.append(output)

    return np.concatenate(outputs).squeeze()
def train_cgm(config: dict,
              data_obj=None,
              max_epochs=10,
              n_epochs_stop=5,
              grace_period=5,
              useRayTune=True,
              checkpoint_dir=None):
    '''
    max_epochs : Maximum allowed epochs
    n_epochs_stop : Number of epochs without imporvement in validation error before the training terminates
    grace_period : Number of epochs before termination is allowed

    '''
    # Build network
    model = DilatedNet(h1=config["h1"], h2=config["h2"])

    # Move model between cpu and gpu
    device = "cpu"
    if torch.cuda.is_available():
        device = "cuda:0"
        if torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
    model.to(device)

    # Optimizser and loss criterion
    #criterion = nn.SmoothL1Loss(reduction='sum')
    criterion = nn.MSELoss(reduction='sum')
    optimizer = optim.RMSprop(model.parameters(),
                              lr=config['lr'],
                              weight_decay=config['wd'])  # n

    if checkpoint_dir:
        model_state, optimizer_state = torch.load(
            os.path.join(checkpoint_dir, "checkpoint"))
        model.load_state_dict(model_state)
        optimizer.load_state_dict(optimizer_state)

    # Load data
    trainset, valset = data_obj.load_train_and_val()

    train_loader = DataframeDataLoader(
        trainset,
        batch_size=int(config['batch_size']),
        shuffle=True,
        drop_last=True,
    )

    val_loader = DataframeDataLoader(
        valset,
        batch_size=int(config['batch_size']),
        shuffle=False,
    )

    min_val_loss = np.Inf
    epochs_no_improve = 0
    early_stop = False

    try:
        for epoch in range(max_epochs):  # loop over the dataset multiple times
            epoch_loss = 0.0
            running_loss = 0.0
            epoch_steps = 0
            for i, data in enumerate(train_loader, 0):

                # get the inputs; data is a list of [inputs, targets]
                inputs, targets = data
                inputs = Variable(inputs.permute(0, 2, 1)).contiguous()

                if targets.size(0) == int(config['batch_size']):

                    inputs, targets = inputs.to(device), targets.to(device)

                    # zero the parameter gradients
                    optimizer.zero_grad()

                    # forward + backward + optimize
                    outputs = model(inputs)
                    loss = criterion(outputs, targets.reshape(-1, 1))
                    loss.backward()
                    optimizer.step()

                    # print statistics
                    running_loss += loss.item()
                    epoch_loss += loss.item()
                    epoch_steps += 1

                    print_every = -50
                    if i % print_every == (print_every -
                                           1):  # print every nth mini-batches
                        print(
                            "[%d, %5d] Avg loss pr element in mini batch: %.3f"
                            % (epoch + 1, i + 1, running_loss /
                               (print_every * int(config['batch_size']))))
                        running_loss = 0.0

            # Validation loss
            val_loss = 0.0
            val_steps = 0
            for i, data in enumerate(val_loader, 0):
                with torch.no_grad():
                    inputs, targets = data
                    inputs = Variable(inputs.permute(0, 2, 1)).contiguous()

                    if targets.size(0) == int(config['batch_size']):
                        inputs, targets = inputs.to(device), targets.to(device)

                        outputs = model(inputs)

                        loss = criterion(outputs, targets.reshape(-1, 1))
                        val_loss += loss.cpu().numpy()
                        val_steps += 1

            if useRayTune:
                with tune.checkpoint_dir(epoch) as checkpoint_dir:
                    path = os.path.join(checkpoint_dir, "checkpoint")
                    torch.save((model.state_dict(), optimizer.state_dict()),
                               path)

                    tune.report(loss=(val_loss / val_steps))

            if (val_loss / val_steps) < min_val_loss:
                epoch_no_improve = 0
                min_val_loss = val_loss / val_steps

                if not useRayTune:
                    path = code_path / 'src' / 'model_state_tmp'
                    path.mkdir(exist_ok=True, parents=True)
                    path = path / 'checkpoint'
                    torch.save((model.state_dict(), optimizer.state_dict()),
                               path)
                    print("Saved better model!")

            else:
                epoch_no_improve += 1

            if not useRayTune:
                print('Epoch {0}'.format(epoch + 1), end='')
                print(f', Training loss: {(epoch_loss/epoch_steps):1.2E}',
                      end='')
                print(f', Validation loss: {(val_loss/val_steps):1.2E}')

            if epoch > grace_period and epoch_no_improve == n_epochs_stop:
                print('Early stopping!')
                early_stop = True
                break

            if early_stop:
                print("Stopped")
                break

    except KeyboardInterrupt:
        print('-' * 89)
        print('Forced early training exit')

    print("Finished Training")
Ejemplo n.º 6
0
    "wd": 4e-3,
}

model = DilatedNet(
    h1=config["h1"],
    h2=config["h2"],
    h3=config["h3"],
    h4=config["h4"],
)
model = model.cuda()
# Load training data
trainset, valset = data_obj.load_train_and_val()

train_loader = DataframeDataLoader(
    trainset,
    batch_size=int(config['batch_size']),
    shuffle=True,
    drop_last=True,
)

# Perform a single prediction
data = next(iter(train_loader))
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
inputs, targets = data
inputs, targets = inputs.to(device), targets.to(device)
# It is important to permute the dimensions of the input!!
inputs = Variable(inputs.permute(0, 2, 1)).contiguous()

output = model(inputs)

# %%
# ---------------------------------------------------------------------