def validate(loader, num_classes, device, net, criterion): num_samples = 0 running_loss = 0 iou = MeanIoU(range(num_classes)) net.eval() for images, masks, tiles in tqdm(loader, desc="Validate", unit="batch", ascii=True): images = images.to(device) masks = masks.to(device) assert images.size()[2:] == masks.size()[1:], "resolutions for images and masks are in sync" num_samples += int(images.size(0)) outputs = net(images) assert outputs.size()[2:] == masks.size()[1:], "resolutions for predictions and masks are in sync" assert outputs.size()[1] == num_classes, "classes for predictions and dataset are in sync" loss = criterion(outputs, masks) running_loss += loss.item() for mask, output in zip(masks, outputs): iou.add(mask.float(), output.max(0)[1].float()) assert num_samples > 0, "dataset contains validation images and labels" return {"loss": running_loss / num_samples, "iou": iou.get()}
def validate(loader, num_classes, device, net, criterion): num_samples = 0 running_loss = 0 iou = MeanIoU(range(num_classes)) net.eval() for images, masks, tiles in tqdm(loader, desc='Validate', unit='batch', ascii=True): images = images.to(device) masks = masks.to(device) assert images.size()[2:] == masks.size()[1:], 'resolutions for images and masks are in sync' num_samples += int(images.size(0)) outputs = net(images) assert outputs.size()[2:] == masks.size()[1:], 'resolutions for predictions and masks are in sync' assert outputs.size()[1] == num_classes, 'classes for predictions and dataset are in sync' loss = criterion(outputs, masks) running_loss += loss.item() for mask, output in zip(masks, outputs): mask = mask.data.cpu().numpy() prediction = output.data.max(0)[1].cpu().numpy() iou.add(mask.ravel(), prediction.ravel()) assert num_samples > 0, 'dataset contains validation images and labels' return {'loss': running_loss / num_samples, 'iou': iou.get()}
def validate(loader, num_classes, device, net, criterion): num_samples = 0 running_loss = 0 iou = MeanIoU(range(num_classes)) net.eval() for images, masks, tiles in tqdm(loader, desc='Validate', unit='batch', ascii=True): images = images.to(device) masks = masks.to(device) assert images.size()[2:] == masks.size()[1:], 'resolutions for images and masks are in sync' num_samples += int(images.size(0)) outputs = net(images) assert outputs.size()[2:] == masks.size()[1:], 'resolutions for predictions and masks are in sync' assert outputs.size()[1] == num_classes, 'classes for predictions and dataset are in sync' loss = criterion(outputs, masks) running_loss += loss.item() for mask, output in zip(masks, outputs): mask = mask.data.cpu().numpy() prediction = output.data.max(0)[1].cpu().numpy() iou.add(mask.ravel(), prediction.ravel()) return {'loss': running_loss / num_samples, 'iou': iou.get()}
def train(loader, num_classes, device, net, optimizer, scheduler, criterion): num_samples = 0 running_loss = 0 iou = MeanIoU(range(num_classes)) net.train() scheduler.step() for images, masks, tiles in tqdm(loader, desc="Train", unit="batch", ascii=True): images = images.to(device) masks = masks.to(device) assert images.size()[2:] == masks.size( )[1:], "resolutions for images and masks are in sync" num_samples += int(images.size(0)) optimizer.zero_grad() outputs = net(images) assert outputs.size()[2:] == masks.size( )[1:], "resolutions for predictions and masks are in sync" assert outputs.size( )[1] == num_classes, "classes for predictions and dataset are in sync" loss = criterion(outputs, masks) loss.backward() optimizer.step() running_loss += loss.item() for mask, output in zip(masks, outputs): mask = mask.data.cpu().numpy() prediction = output.data.max(0)[1].cpu().numpy() iou.add(mask.ravel(), prediction.ravel()) assert num_samples > 0, "dataset contains training images and labels" return {"loss": running_loss / num_samples, "iou": iou.get()}