Ejemplo n.º 1
0
def val_seg_latency(model,
                    dataset_loader,
                    criterion=None,
                    num_classes=21,
                    device='cuda'):
    model.eval()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()
    batch_time = AverageMeter()
    end = time.time()

    total_batches = 100
    miou_class = MIOU(num_classes=num_classes)

    if criterion:
        losses = AverageMeter()

    with torch.no_grad():
        for i, (inputs, target) in enumerate(dataset_loader):
            inputs = inputs.to(device=device)
            target = target.to(device=device)
            end = time.time()
            outputs = model(inputs)
            batch_time.update(time.time() - end)

            if criterion:
                if device == 'cuda':
                    loss = criterion(outputs, target).mean()
                    if isinstance(outputs, (list, tuple)):
                        target_dev = outputs[0].device
                        outputs = gather(outputs, target_device=target_dev)
                else:
                    loss = criterion(outputs, target)
                losses.update(loss.item(), inputs.size(0))

            inter, union = miou_class.get_iou(outputs, target)
            inter_meter.update(inter)
            union_meter.update(union)

            # measure elapsed time

            if i % 10 == 0:  # print after every 100 batches
                iou = inter_meter.sum / (union_meter.sum + 1e-10)
                miou = iou.mean() * 100
                loss_ = losses.avg if criterion is not None else 0
                print_log_message(
                    "[%d/%d]\t\tBatch Time:%.4f\t\tLoss:%.4f\t\tmiou:%.4f" %
                    (i, len(dataset_loader), batch_time.avg, loss_, miou))
            if i >= total_batches:
                break
    iou = inter_meter.sum / (union_meter.sum + 1e-10)
    miou = iou.mean() * 100

    print_info_message('Mean IoU: {0:.2f}'.format(miou))
    if criterion:
        return miou, losses.avg
    else:
        return miou, 0
Ejemplo n.º 2
0
def train_seg(model,
              dataset_loader,
              optimizer,
              criterion,
              num_classes,
              epoch,
              device='cuda'):
    losses = AverageMeter()
    batch_time = AverageMeter()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()
    end = time.time()
    model.train()

    miou_class = MIOU(num_classes=num_classes)

    for i, (inputs, target) in enumerate(dataset_loader):
        inputs = inputs.to(device=device)
        target = target.to(device=device)

        outputs = model(inputs)

        if device == 'cuda':
            loss = criterion(outputs, target).mean()
            if isinstance(outputs, (list, tuple)):
                target_dev = outputs[0].device
                outputs = gather(outputs, target_device=target_dev)
        else:
            loss = criterion(outputs, target)

        inter, union = miou_class.get_iou(outputs, target)

        inter_meter.update(inter)
        union_meter.update(union)

        losses.update(loss.item(), inputs.size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % 10 == 0:  # print after every 100 batches
            iou = inter_meter.sum / (union_meter.sum + 1e-10)
            miou = iou.mean() * 100
            print_log_message(
                "Epoch: %d[%d/%d]\t\tBatch Time:%.4f\t\tLoss:%.4f\t\tmiou:%.4f"
                % (epoch, i, len(dataset_loader), batch_time.avg, losses.avg,
                   miou))

    iou = inter_meter.sum / (union_meter.sum + 1e-10)
    miou = iou.mean() * 100
    return miou, losses.avg
Ejemplo n.º 3
0
def evaluate(args, model, image_list, seg_classes, device):
    im_size = tuple(args.im_size)

    # get color map for pascal dataset
    if args.dataset == 'pascal':
        from utilities.color_map import VOCColormap
        cmap = VOCColormap().get_color_map_voc()
    else:
        cmap = None

    model.eval()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()

    miou_class = MIOU(num_classes=seg_classes)

    for i, imgName in tqdm(enumerate(image_list)):
        img = Image.open(imgName).convert('RGB')
        w, h = img.size

        img = data_transform(img, im_size)
        img = img.unsqueeze(0)  # add a batch dimension
        img = img.to(device)
        img_out = model(img)
        img_out = img_out.squeeze(0)  # remove the batch dimension
        img_out = img_out.max(0)[1].byte()  # get the label map
        img_out = img_out.to(device='cpu').numpy()

        if args.dataset == 'city':
            # cityscape uses different IDs for training and testing
            # so, change from Train IDs to actual IDs
            img_out = relabel(img_out)

        img_out = Image.fromarray(img_out)
        # resize to original size
        img_out = img_out.resize((w, h), Image.NEAREST)

        # pascal dataset accepts colored segmentations
        if args.dataset == 'pascal':
            img_out.putpalette(cmap)

        # save the segmentation mask
        name = imgName.split('/')[-1]
        img_extn = imgName.split('.')[-1]
        name = '{}/{}'.format(args.savedir, name.replace(img_extn, 'png'))
        img_out.save(name)
Ejemplo n.º 4
0
def val_seg_cls(model, dataset_loader, criterion_seg=None, criterion_cls=None, num_classes=21, cls_loss_weight=1.0, device='cuda', use_depth=False):
    model.eval()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()
    batch_time = AverageMeter()
    end = time.time()

    miou_class = MIOU(num_classes=num_classes)

    if criterion_seg:
        losses = AverageMeter()
        cls_losses = AverageMeter()
        seg_losses = AverageMeter()

    with torch.no_grad():
        for i, batch in enumerate(dataset_loader):
            inputs = batch[0].to(device=device)
            target = batch[1].to(device=device)
            
            if use_depth:
                depth = batch[2].to(device=device)
                outputs_seg, outputs_cls = model(inputs, depth)
            else:
                outputs_seg, outputs_cls = model(inputs)

            cls_ids = batch[3].to(device=device)

            if criterion_seg and criterion_cls:
                if device == 'cuda':
                    loss_seg = criterion_seg(outputs_seg, target).mean()
                    loss_cls = criterion_cls(outputs_cls, cls_ids).mean()

                    loss = loss_seg + cls_loss_weight * loss_cls

                    if isinstance(outputs_seg, (list, tuple)):
                        target_dev = outputs[0].device
                        outputs_seg = gather(outputs_seg, target_device=target_dev)
                else:
                    loss_seg = criterion_seg(outputs_seg, target)
                    loss_cls = criterion_cls(outputs_cls, cls_ids)

                    loss = loss_seg + cls_loss_weight * loss_cls

                losses.update(loss.item(), inputs.size(0))
                seg_losses.update(loss_seg.item(), inputs.size(0))
                cls_losses.update(loss_cls.item(), inputs.size(0))

            inter, union = miou_class.get_iou(outputs_seg, target)
            inter_meter.update(inter)
            union_meter.update(union)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % 10 == 0:  # print after every 100 batches
                iou = inter_meter.sum / (union_meter.sum + 1e-10)
                miou = iou.mean() * 100
                loss_ = losses.avg if criterion_seg is not None else 0
                print_log_message("[%d/%d]\t\tBatch Time:%.4f\t\tLoss:%.4f\t\tmiou:%.4f" %
                      (i, len(dataset_loader), batch_time.avg, loss_, miou))

    iou = inter_meter.sum / (union_meter.sum + 1e-10)
    miou = iou.mean() * 100

    print_info_message('Mean IoU: {0:.2f}'.format(miou))
    if criterion_seg and criterion_cls:
        return miou, losses.avg, seg_losses.avg, cls_losses.avg
    else:
        return miou, 0, 0, 0
Ejemplo n.º 5
0
def train_seg_cls(model, dataset_loader, optimizer, criterion_seg, num_classes, epoch, criterion_cls, cls_loss_weight=1.0, device='cuda', use_depth=False):
    losses = AverageMeter()
    cls_losses = AverageMeter()
    seg_losses = AverageMeter()
    batch_time = AverageMeter()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()
    end = time.time()
    model.train()

    miou_class = MIOU(num_classes=num_classes)

    for i, batch in enumerate(dataset_loader):
        inputs = batch[0].to(device=device)
        target = batch[1].to(device=device)

        if use_depth:
            depth = batch[2].to(device=device)
            outputs_seg, outputs_cls = model(inputs, depth)
        else:
            outputs_seg, outputs_cls = model(inputs)

        cls_ids = batch[3].to(device=device)

        if device == 'cuda':
            loss_seg = criterion_seg(outputs_seg, target).mean()

            loss_cls = criterion_cls(outputs_cls, cls_ids).mean()
            loss = loss_seg + cls_loss_weight * loss_cls


            if isinstance(outputs_seg, (list, tuple)):
                target_dev = outputs[0].device
                outputs_seg = gather(outputs_seg, target_device=target_dev)
        else:
            loss_seg = criterion_seg(outputs_seg, target)

            loss_cls = criterion_cls(outputs_cls, cls_ids)
            loss = loss_seg + cls_loss_weight * loss_cls

        inter, union = miou_class.get_iou(outputs_seg, target)

        inter_meter.update(inter)
        union_meter.update(union)

        losses.update(loss.item(), inputs.size(0))
        seg_losses.update(loss_seg.item(), inputs.size(0))
        cls_losses.update(loss_cls.item(), inputs.size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % 10 == 0:  # print after every 100 batches
            iou = inter_meter.sum / (union_meter.sum + 1e-10)
            miou = iou.mean() * 100
            print_log_message("Epoch: %d[%d/%d]\t\tBatch Time:%.4f\t\tLoss:%.4f\t\tmiou:%.4f" %
                  (epoch, i, len(dataset_loader), batch_time.avg, losses.avg, miou))

    iou = inter_meter.sum / (union_meter.sum + 1e-10)
    miou = iou.mean() * 100

    return miou, losses.avg, seg_losses.avg, cls_losses.avg
Ejemplo n.º 6
0
def val_seg_per_image(model,
                      dataset_loader,
                      criterion=None,
                      num_classes=21,
                      device='cuda',
                      use_depth=False):
    model.eval()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()
    batch_time = AverageMeter()
    end = time.time()

    miou_class = MIOU(num_classes=num_classes - 1)

    if criterion:
        losses = AverageMeter()

    accuracy_list = {}
    with torch.no_grad():
        for i, batch in enumerate(dataset_loader):
            inputs = batch[0].to(device=device)
            target = batch[1].to(device=device)

            if use_depth:
                depth = batch[2].to(device=device)
                outputs = model(inputs, depth)
            else:
                outputs = model(inputs)

            if criterion:
                if device == 'cuda':
                    loss = criterion(outputs, target).mean()
                    if isinstance(outputs, (list, tuple)):
                        target_dev = outputs[0].device
                        outputs = gather(outputs, target_device=target_dev)
                else:
                    loss = criterion(outputs, target)

                losses.update(loss.item(), inputs.size(0))

            inter, union = miou_class.get_iou(outputs, target)
            inter_meter.update(inter)
            union_meter.update(union)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            iou = inter_meter.sum / (union_meter.sum + 1e-10)
            miou = iou.mean() * 100
            loss_ = losses.avg if criterion is not None else 0
            #            print_log_message("[%d/%d]\t\tBatch Time:%.4f\t\tLoss:%.4f\t\tmiou:%.4f" %
            #                  (i, len(dataset_loader), batch_time.avg, loss_, miou))

            accuracy_list[i] = miou

    iou = inter_meter.sum / (union_meter.sum + 1e-10)
    miou = iou.mean() * 100

    print_info_message('Mean IoU: {0:.2f}'.format(miou))

    return accuracy_list
Ejemplo n.º 7
0
def main(args):
    crop_size = args.crop_size
    assert isinstance(crop_size, tuple)
    print_info_message('Running Model at image resolution {}x{} with batch size {}'.format(crop_size[0], crop_size[1],
                                                                                           args.batch_size))
    if not os.path.isdir(args.savedir):
        os.makedirs(args.savedir)

    writer = SummaryWriter(log_dir=args.savedir)

    num_gpus = torch.cuda.device_count()
    device = 'cuda' if num_gpus > 0 else 'cpu'

    from data_loader.segmentation.greenhouse import color_encoding as color_encoding_greenhouse
    from data_loader.segmentation.greenhouse import color_palette
    from data_loader.segmentation.camvid import color_encoding as color_encoding_camvid

    # Outsource
    os_model_name_list = [args.os_model1, args.os_model2, args.os_model3]
    os_weights_name_list = [args.os_weights1, args.os_weights2, args.os_weights3]
    os_data_name_list = [args.outsource1, args.outsource2, args.outsource3]
    os_model_name_list = [x for x in os_model_name_list if x is not None]
    os_weights_name_list = [x for x in os_weights_name_list if x is not None] 
    os_data_name_list = [x for x in os_data_name_list if x is not None]
    os_model_list = []
    print(os_model_name_list)
    print(os_weights_name_list)
    print(os_data_name_list)
    for os_m, os_w, os_d in zip(os_model_name_list, os_weights_name_list, os_data_name_list):
        if os_d == 'camvid':
            os_seg_classes = 13
        elif os_d == 'cityscapes':
            os_seg_classes = 20
        elif os_d == 'forest' or os_d == 'greenhouse':
            os_seg_classes = 5

        os_model = import_os_model(args, os_model=os_m, os_weights=os_w, os_seg_classes=os_seg_classes)
        os_model_list.append(os_model)

    from data_loader.segmentation.greenhouse import GreenhouseRGBDSegmentation, GREENHOUSE_CLASS_LIST
    seg_classes = len(GREENHOUSE_CLASS_LIST)
    val_dataset = GreenhouseRGBDSegmentation(root='./vision_datasets/greenhouse/', list_name=args.val_list, use_traversable=False, 
                                             train=False, size=crop_size, use_depth=args.use_depth,
                                             normalize=args.normalize)

    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False,
                                             pin_memory=True, num_workers=args.workers)

    start_epoch = 0
    best_miou = 0.0
    losses = AverageMeter()
    ce_losses = AverageMeter()
    nid_losses = AverageMeter()
    batch_time = AverageMeter()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()
    miou_class = MIOU(num_classes=seg_classes)
    with torch.no_grad():
        for i, batch in enumerate(val_loader):
            inputs = batch[0].to(device=device)
            target = batch[1].to(device=device)
            name   = batch[2]
            
            output_list = []
            for m, os_data in zip(os_model_list, os_data_name_list):
                # Output: Numpy, KLD: Numpy
                output, _ = get_output(m, inputs) 

                output = output.transpose(1,2,0)
                amax_output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

                # save visualized seg maps & predication prob map
                if os_data == 'camvid':
                    amax_output = id_camvid_to_greenhouse[amax_output]
                elif os_data == 'cityscapes':
                    amax_output = id_cityscapes_to_greenhouse[amax_output]
                elif os_data == 'forest':
                    amax_output = id_forest_to_greenhouse[amax_output]

                output_list.append(amax_output)

            amax_output = merge_outputs(np.array(output_list), 
                seg_classes=5, thresh='all')
            
            # Output the generated label images
            if args.output_image:
                for path_name in name:
#                    path_name = name[0]
                    image_name = path_name.split('/')[-1]
                    image_name = image_name.rsplit('.', 1)[0]
                    amax_output_img_color = colorize_mask(amax_output, color_palette)
                    amax_output_img_color.save('%s/%s_color.png' % (args.savedir, image_name))

                    for output_i, name_i in zip(output_list, os_data_name_list):
                        amax_output_img_color = colorize_mask(output_i, color_palette)
                        amax_output_img_color.save('%s/%s_color_%s.png' % (args.savedir, image_name, name_i))

            outputs_argmax = torch.from_numpy(amax_output)
            
            inter, union = miou_class.get_iou(outputs_argmax, target)
            inter_meter.update(inter)
            union_meter.update(union)

            # measure elapsed time
            print("Batch {}/{} finished".format(i+1, len(val_loader)))
    
    iou = inter_meter.sum / (union_meter.sum + 1e-10) * 100
    miou = iou[[1, 2, 3]].mean()
    writer.add_scalar('label_eval/IoU', miou, 0)
    writer.add_scalar('label_eval/plant', iou[1], 0)
    writer.add_scalar('label_eval/artificial_object', iou[2], 0)
    writer.add_scalar('label_eval/ground', iou[3], 0)

    writer.close()
Ejemplo n.º 8
0
def test(testloader,
         model,
         criterion,
         device,
         optimizer,
         class_encoding,
         writer_idx,
         class_weights=None,
         writer=None):
    """Create the model and start the evaluation process."""
    ## scorer
    h, w = map(int, args.input_size.split(','))
    test_image_size = (h, w)

    # For logging the training status
    losses = AverageMeter()
    batch_time = AverageMeter()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()

    miou_class = MIOU(num_classes=4)

    kld_layer = PixelwiseKLD()

    from data_loader.segmentation.greenhouse import GreenhouseRGBDSegmentation
    ds = GreenhouseRGBDSegmentation(list_name=args.data_test_list,
                                    train=False,
                                    use_traversable=True,
                                    use_depth=args.use_depth)

    testloader = data.DataLoader(ds,
                                 batch_size=32,
                                 shuffle=False,
                                 pin_memory=args.pin_memory)

    ## model for evaluation
    model.eval()

    ## upsampling layer
    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=test_image_size,
                             mode='bilinear',
                             align_corners=True)
    else:
        interp = nn.Upsample(size=test_image_size, mode='bilinear')

    ## output of deeplab is logits, not probability
    softmax2d = nn.Softmax2d()

    ## evaluation process
    start_eval = time.time()
    total_loss = 0
    ious = 0

    # TODO: Change this (implement the same function in 'utility/utils.py', or uncomment the code below with slight modification)
    #    total_loss, (iou, miou) = util.run_validation(model, testloader, criterion, metric, device, writer, interp)
    with torch.no_grad():
        for index, batch in enumerate(testloader):
            #image, label, depth, name, reg_weights = batch

            images = batch[0].to(device)
            labels = batch[1].to(device)
            if args.use_depth:
                depths = batch[2].to(device)
                reg_weights = batch[4]
            else:
                reg_weights = batch[3]

            # Upsample the output of the model to the input size
            # TODO: Change the input according to the model type
            interp = None
            if interp is not None:
                if args.use_depth:
                    if args.model == 'espdnet':
                        pred = interp(model(images, depths))
                    elif args.model == 'espdnetue':
                        (pred, pred_aux) = interp(model(images, depths))
                else:
                    if args.model == 'espdnet':
                        pred = interp(model(images))
                    elif args.model == 'espdnetue':
                        (pred, pred_aux) = interp(model(images))
                    elif args.model == 'deeplabv3':
                        output = model(images)
                        pred = interp(output['out'])
                        pred_aux = interp(output['aux'])
            else:
                if args.use_depth:
                    if args.model == 'espdnet':
                        pred = model(images, depths)
                    elif args.model == 'espdnetue':
                        (pred, pred_aux) = model(images, depths)
                else:
                    if args.model == 'espdnet':
                        pred = model(images)
                    elif args.model == 'espdnetue':
                        (pred, pred_aux) = model(images)
                    elif args.model == 'deeplabv3':
                        output = model(images)
                        pred = output['out']
                        pred_aux = output['aux']

            loss = criterion(
                pred,
                labels)  # torch.max returns a tuple of (maxvalues, indices)

            inter, union = miou_class.get_iou(pred, labels)

            inter_meter.update(inter)
            union_meter.update(union)

            losses.update(loss.item(), images.size(0))

    iou = inter_meter.sum / (union_meter.sum + 1e-10)
    miou = iou.mean() * 100

    writer.add_scalar('traversability_mask/test/mean_IoU', miou, writer_idx)
    writer.add_scalar('traversability_mask/test/loss', losses.avg, writer_idx)
    writer.add_scalar('traversability_mask/test/traversable_plant_IoU', iou[0],
                      writer_idx)
    writer.add_scalar('traversability_mask/test/other_plant_mean_IoU', iou[1],
                      writer_idx)
    writer.add_scalar('traversability_mask/test/artificial_object_mean_IoU',
                      iou[2], writer_idx)
    writer.add_scalar('traversability_mask/test/ground_mean_IoU', iou[3],
                      writer_idx)

    #    if args.dataset == 'greenhouse':
    #        # TODO: Check
    if args.use_depth:
        # model, images, depths=None, labels=None, predictions=None, class_encoding=None, writer=None, epoch=None, data=None, device=None
        in_training_visualization_img(model,
                                      images=images,
                                      depths=depths,
                                      labels=labels,
                                      class_encoding=class_encoding,
                                      writer=writer,
                                      epoch=writer_idx,
                                      data='traversability_mask/test',
                                      device=device)
    else:
        in_training_visualization_img(model,
                                      images=images,
                                      labels=labels,
                                      class_encoding=class_encoding,
                                      writer=writer,
                                      epoch=writer_idx,
                                      data='traversability_mask/test',
                                      device=device)

    return miou
Ejemplo n.º 9
0
def train(trainloader,
          model,
          criterion,
          device,
          optimizer,
          class_encoding,
          writer_idx,
          class_weights=None,
          writer=None):
    """Create the model and start the training."""
    epoch_loss = 0

    # For logging the training status
    losses = AverageMeter()
    nid_losses = AverageMeter()
    kld_losses = AverageMeter()
    batch_time = AverageMeter()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()

    miou_class = MIOU(num_classes=4)

    model.train()

    kld_layer = PixelwiseKLD()
    with tqdm(total=len(trainloader)) as pbar:
        for i_iter, batch in enumerate(tqdm(trainloader)):
            images = batch[0].to(device)
            labels = batch[1].to(device)
            if args.use_depth:
                depths = batch[2].to(device)

            optimizer.zero_grad()

            # Upsample the output of the model to the input size
            # TODO: Change the input according to the model type
            interp = None
            if interp is not None:
                if args.use_depth:
                    if args.model == 'espdnet':
                        pred = interp(model(images, depths))
                    elif args.model == 'espdnetue':
                        (pred, pred_aux) = interp(model(images, depths))
                else:
                    if args.model == 'espdnet':
                        pred = interp(model(images))
                    elif args.model == 'espdnetue':
                        (pred, pred_aux) = interp(model(images))
                    elif args.model == 'deeplabv3':
                        output = model(images)
                        pred = interp(output['out'])
                        pred_aux = interp(output['aux'])
            else:
                if args.use_depth:
                    if args.model == 'espdnet':
                        pred = model(images, depths)
                    elif args.model == 'espdnetue':
                        (pred, pred_aux) = model(images, depths)
                else:
                    if args.model == 'espdnet':
                        pred = model(images)
                    elif args.model == 'espdnetue':
                        (pred, pred_aux) = model(images)
                    elif args.model == 'deeplabv3':
                        output = model(images)
                        pred = output['out']
                        pred_aux = output['aux']

    #        print(pred.size())
    # Model regularizer
            kld = kld_layer(pred, pred_aux)
            kld_losses.update(kld.mean().item(), 1)
            if args.use_uncertainty:
                loss = criterion(pred + 0.5 * pred_aux, labels,
                                 kld) * 20 + kld.mean()
            else:
                loss = criterion(pred + 0.5 * pred_aux, labels)  # + kld.mean()

            inter, union = miou_class.get_iou(pred, labels)

            inter_meter.update(inter)
            union_meter.update(union)

            losses.update(loss.item(), images.size(0))

            # Optimise
            loss.backward()
            optimizer.step()

    iou = inter_meter.sum / (union_meter.sum + 1e-10)
    miou = iou.mean() * 100

    # Write summary
    writer.add_scalar('traversability_mask/train/loss', losses.avg, writer_idx)
    writer.add_scalar('traversability_mask/train/nid_loss', nid_losses.avg,
                      writer_idx)
    writer.add_scalar('traversability_mask/train/mean_IoU', miou, writer_idx)
    writer.add_scalar('traversability_mask/train/traversable_plant_IoU',
                      iou[0], writer_idx)
    writer.add_scalar('traversability_mask/train/other_plant_mean_IoU', iou[1],
                      writer_idx)
    writer.add_scalar('traversability_mask/train/artificial_object_mean_IoU',
                      iou[2], writer_idx)
    writer.add_scalar('traversability_mask/train/ground_mean_IoU', iou[3],
                      writer_idx)
    writer.add_scalar('traversability_mask/train/learning_rate',
                      optimizer.param_groups[0]['lr'], writer_idx)
    #    writer.add_scalar('uest/train/kld', kld_losses.avg, writer_idx)

    #
    # Investigation of labels
    #

    # Before label conversion
    if args.use_depth:
        # model, images, depths=None, labels=None, predictions=None, class_encoding=None, writer=None, epoch=None, data=None, device=None
        in_training_visualization_img(model,
                                      images=images,
                                      depths=depths,
                                      labels=labels.long(),
                                      class_encoding=class_encoding,
                                      writer=writer,
                                      epoch=writer_idx,
                                      data='traversability_mask/train',
                                      device=device)
    else:
        in_training_visualization_img(model,
                                      images=images,
                                      labels=labels.long(),
                                      class_encoding=class_encoding,
                                      writer=writer,
                                      epoch=writer_idx,
                                      data='traversability_mask/train',
                                      device=device)

    writer_idx += 1

    print('taking snapshot ...')

    return writer_idx
Ejemplo n.º 10
0
def val_seg_ue(model,
               dataset_loader,
               criterion=None,
               num_classes=21,
               device='cuda',
               use_depth=False,
               add_criterion=None,
               greenhouse_use_trav=False):
    model.eval()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()
    batch_time = AverageMeter()
    end = time.time()

    miou_class = MIOU(num_classes=num_classes - 1)

    if criterion:
        losses = AverageMeter()

    with torch.no_grad():
        for i, batch in enumerate(dataset_loader):
            inputs = batch[0].to(device=device)
            target = batch[1].to(device=device)

            if use_depth:
                depth = batch[2].to(device=device)
                outputs = model(inputs, depth)
            else:
                outputs = model(inputs)

            if isinstance(outputs, OrderedDict):
                out_aux = outputs['aux']
                outputs = outputs['out']
            else:
                out_aux = outputs[1]
                outputs = outputs[0]

            outputs = outputs + 0.5 * out_aux

            if criterion:
                if device == 'cuda':
                    loss = criterion(outputs, target).mean()
                    if add_criterion is not None:
                        loss += add_criterion(inputs, outputs)
                    if isinstance(outputs, (list, tuple)):
                        target_dev = outputs[0].device
                        outputs = gather(outputs, target_device=target_dev)
                else:
                    loss = criterion(outputs, target)
                    if add_criterion is not None:
                        loss += add_criterion(inputs, outputs)

                losses.update(loss.item(), inputs.size(0))

            inter, union = miou_class.get_iou(outputs, target)
            inter_meter.update(inter)
            union_meter.update(union)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % 10 == 0:  # print after every 100 batches
                iou = inter_meter.sum / (union_meter.sum + 1e-10)
                miou = iou.mean() * 100
                loss_ = losses.avg if criterion is not None else 0
                print_log_message(
                    "[%d/%d]\t\tBatch Time:%.4f\t\tLoss:%.4f\t\tmiou:%.4f" %
                    (i, len(dataset_loader), batch_time.avg, loss_, miou))

    iou = inter_meter.sum / (union_meter.sum + 1e-10)
    if greenhouse_use_trav:
        miou = iou.mean() * 100
    else:
        #        miou = np.array(iou)[1, 2, 3].mean() * 100
        miou = iou[[1, 2, 3]].mean() * 100
#        miou = iou.mean() * 100

    print_info_message('Mean IoU: {0:.2f}'.format(miou))
    if criterion:
        return iou, losses.avg
    else:
        return iou, 0
Ejemplo n.º 11
0
def train_seg_ue(model,
                 dataset_loader,
                 optimizer,
                 criterion,
                 num_classes,
                 epoch,
                 device='cuda',
                 use_depth=False,
                 add_criterion=None,
                 weight=1.0,
                 greenhouse_use_trav=False):
    losses = AverageMeter()
    ce_losses = AverageMeter()
    nid_losses = AverageMeter()
    batch_time = AverageMeter()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()
    end = time.time()
    model.train()

    miou_class = MIOU(num_classes=num_classes - 1)
    kld_layer = PixelwiseKLD()
    print("train_seg_ue()")
    b = 0.015
    for i, batch in enumerate(dataset_loader):
        inputs = batch[0].to(device=device)
        target = batch[1].to(device=device)

        if use_depth:
            depth = batch[2].to(device=device)
            outputs = model(inputs, depth)
        else:
            outputs = model(inputs)

        if isinstance(outputs, OrderedDict):
            out_aux = outputs['aux']
            outputs = outputs['out']
        else:
            out_aux = outputs[1]
            outputs = outputs[0]

        kld = kld_layer(outputs, out_aux)
        outputs = outputs + 0.5 * out_aux

        if device == 'cuda':
            #            print("Target size {}".format(target.size()))
            #
            loss = criterion(outputs, target).mean()  # + kld.mean()
            if add_criterion is not None:
                loss2 = add_criterion(inputs, outputs.to(device)) * weight
                loss += loss2

            if isinstance(outputs, (list, tuple)):
                target_dev = outputs[0].device
                outputs = gather(outputs, target_device=target_dev)
        else:
            loss = criterion(outputs, target)  # + kld.mean()
            if add_criterion is not None:
                loss2 = add_criterion(inputs, outputs) * weight
                loss += loss2

        inter, union = miou_class.get_iou(outputs, target)

        inter_meter.update(inter)
        union_meter.update(union)

        loss = (loss - b).abs() + b
        losses.update(loss.item(), inputs.size(0))
        if add_criterion is not None:
            nid_losses.update(loss2.item(), 1)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % 10 == 0:  # print after every 100 batches
            iou = inter_meter.sum / (union_meter.sum + 1e-10)
            miou = iou.mean() * 100
            print_log_message(
                "Epoch: %d[%d/%d]\t\tBatch Time:%.4f\t\tLoss:%.4f\t\tmiou:%.4f\t\tNID loss:%.4f"
                % (epoch, i, len(dataset_loader), batch_time.avg, losses.avg,
                   miou, nid_losses.avg))

    iou = inter_meter.sum / (union_meter.sum + 1e-10)
    if greenhouse_use_trav:
        miou = iou.mean() * 100
    else:
        miou = iou[[1, 2, 3]].mean() * 100


#        miou = iou.mean() * 100

    return iou, losses.avg