Esempio n. 1
0
def train_inner_epoch(X_train, y_train, model, optimizer, batchsize):
    sum_loss = 0
    model.train()
    aux_crit = nn.L1Loss()
    criterion = nn.L1Loss(reduction='none')
    perm = np.random.permutation(len(X_train))
    instance_loss = np.zeros(len(X_train), dtype=np.float32)
    for i in range(0, len(X_train), batchsize):
        local_perm = perm[i:i + batchsize]
        X_batch = torch.from_numpy(X_train[local_perm]).cuda()
        y_batch = torch.from_numpy(y_train[local_perm]).cuda()

        model.zero_grad()
        mask, aux = model(X_batch)

        X_batch = spec_utils.crop_center(mask, X_batch, False)
        y_batch = spec_utils.crop_center(mask, y_batch, False)
        base_loss = criterion(X_batch * mask, y_batch)
        aux_loss = aux_crit(X_batch * aux, y_batch)

        loss = base_loss.mean() * 0.9 + aux_loss * 0.1
        loss.backward()
        optimizer.step()

        abs_diff_np = base_loss.detach().cpu().numpy()
        instance_loss[local_perm] = abs_diff_np.mean(axis=(1, 2, 3))
        sum_loss += float(loss.detach().cpu().numpy()) * len(X_batch)

    return sum_loss / len(X_train), instance_loss
Esempio n. 2
0
def val_inner_epoch(dataloader, model):
    sum_loss = 0
    model.eval()
    criterion = nn.L1Loss()
    with torch.no_grad():
        for X_batch, y_batch in dataloader:
            X_batch = X_batch.cuda()
            y_batch = y_batch.cuda()
            mask = model.predict(X_batch)
            X_batch = spec_utils.crop_center(mask, X_batch, False)
            y_batch = spec_utils.crop_center(mask, y_batch, False)

            loss = criterion(X_batch * mask, y_batch)
            sum_loss += float(loss.detach().cpu().numpy()) * len(X_batch)

    return sum_loss / len(dataloader.dataset)
Esempio n. 3
0
    def __call__(self, x, skip=None):
        x = F.interpolate(x,
                          scale_factor=2,
                          mode='bilinear',
                          align_corners=True)
        if skip is not None:
            x = spec_utils.crop_center(x, skip)
        h = self.conv(x)

        if self.dropout is not None:
            h = self.dropout(h)

        return h
Esempio n. 4
0
def val_inner_epoch(dataloader, model, device):
    model.eval()
    sum_loss = 0
    crit = nn.L1Loss()

    with torch.no_grad():
        for X_batch, y_batch in dataloader:
            X_batch = X_batch.to(device)
            y_batch = y_batch.to(device)

            pred = model.predict(X_batch)

            y_batch = spec_utils.crop_center(y_batch, pred)
            loss = crit(pred, y_batch)

            sum_loss += loss.item() * len(X_batch)

    return sum_loss / len(dataloader.dataset)