Пример #1
0
def test(test_loader, data_list, model, cod_folder, coee_folder):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    check_makedirs(cod_folder)
    check_makedirs(coee_folder)
    for i, (input, _, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        with torch.no_grad():
            cod_pred, coee_pred = model(input)
        cod_pred, coee_pred = torch.sigmoid(cod_pred), torch.sigmoid(coee_pred)

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        cod = np.uint8(cod_pred.squeeze().detach().cpu().numpy() * 255)
        coee = np.uint8(coee_pred.squeeze().detach().cpu().numpy() * 255)

        image_path, _, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        cod_path = os.path.join(cod_folder, image_name + '.png')
        coee_path = os.path.join(coee_folder, image_name + '.png')
        cv2.imwrite(cod_path, cod)
        cv2.imwrite(coee_path, coee)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Пример #2
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        prediction = predict_whole_img(model, input)
        # prediction=prediction[0].numpy()
        prediction = np.argmax(prediction, axis=3)
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))

        for g in range(0, prediction.shape[0]):
            check_makedirs(gray_folder)
            gray = np.uint8(prediction[g])
            image_path, _ = data_list[i * args.batch_size_gen + g]
            image_name = image_path.split('/')[-1].split('.')[0]
            gray_path = os.path.join(gray_folder, image_name + '.png')
            cv2.imwrite(gray_path, gray)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Пример #3
0
def test_epoch(test_loader, model, epoch, criterion):
    test_loss = 0.0
    count = 0.0
    model.eval()

    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for data, label in test_loader:
        data, label = data.cuda(non_blocking=True), label.cuda(
            non_blocking=True).squeeze(1)
        data = data.permute(0, 2, 1)
        batch_size = data.size(0)
        logits = model(data)

        # Loss
        loss = criterion(logits, label)  # here use model's output directly
        if args.multiprocessing_distributed:
            loss = loss * batch_size
            _count = label.new_tensor([batch_size],
                                      dtype=torch.long).cuda(non_blocking=True)
            dist.all_reduce(loss), dist.all_reduce(_count)
            n = _count.item()
            loss = loss / n
        else:
            loss = torch.mean(loss)

        preds = logits.max(dim=1)[1]
        count += batch_size
        test_loss += loss.item() * batch_size

        intersection, union, target = intersectionAndUnionGPU(
            preds, label, args.classes)
        if args.multiprocessing_distributed:
            dist.all_reduce(intersection), dist.all_reduce(
                union), dist.all_reduce(target)
        intersection, union, target = intersection.cpu().numpy(), union.cpu(
        ).numpy(), target.cpu().numpy()
        intersection_meter.update(intersection), union_meter.update(
            union), target_meter.update(target)

    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    outstr = 'Test %d, loss: %.6f, test acc: %.6f, ' \
             'test avg acc: %.6f' % (epoch + 1,
                                     test_loss * 1.0 / count,
                                     allAcc,
                                     mAcc)

    if main_process():
        logger.info(outstr)
        # Write to tensorboard
        writer.add_scalar('loss_test', test_loss * 1.0 / count, epoch + 1)
        writer.add_scalar('mAcc_test', mAcc, epoch + 1)
        writer.add_scalar('allAcc_test', allAcc, epoch + 1)

    return allAcc
Пример #4
0
def calc_acc(data_list, pred_folder, edge_folder):
    r_mae = AverageMeter()
    e_mae = AverageMeter()

    for i, (image_path, target1_path, target2_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        pred1 = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                           cv2.IMREAD_GRAYSCALE)
        pred2 = cv2.imread(os.path.join(edge_folder, image_name + '.png'),
                           cv2.IMREAD_GRAYSCALE)

        target1 = cv2.imread(target1_path, cv2.IMREAD_GRAYSCALE)
        target2 = cv2.imread(target2_path, cv2.IMREAD_GRAYSCALE)

        if pred1.shape[1] != target1.shape[1] or pred1.shape[
                0] != target1.shape[0]:
            pred1 = cv2.resize(pred1, (target1.shape[1], target1.shape[0]))
            pred2 = cv2.resize(pred2, (target2.shape[1], target2.shape[0]))

        r_mae.update(calc_mae(pred1, target1))
        e_mae.update(calc_mae(pred2, target2))

        logger.info('Evaluating {0}/{1} on image {2}, mae {3:.4f}.'.format(
            i + 1, len(data_list), image_name + '.png', r_mae.avg))

    logger.info('Test result: r_mae / e_mae: {0:.3f}/{1:.3f}'.format(
        r_mae.avg, e_mae.avg))
Пример #5
0
def cal_acc(data_list, pred_folder, derain_folder, classes, names,
            result_txt_path):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    psnr_meter = AverageMeter()
    ssim_meter = AverageMeter()

    for i, (image_path, rain_image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        derain_pred = cv2.imread(
            os.path.join(derain_folder, image_name + '.png'))
        clear_target = cv2.imread(image_path)
        psnr, ssim = caculate_psnr_ssim(derain_pred, clear_target)

        seg_pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                              cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            seg_pred, target, classes)

        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        psnr_meter.update(psnr)
        ssim_meter.update(ssim)

        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, psnr {3:.4f}, ssim {4:.4f}, accuracy {5:.4f}.'
            .format(i + 1, len(data_list), image_name + '.png', psnr, ssim,
                    accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
    PSNR = psnr_meter.avg
    SSIM = ssim_meter.avg

    logger.info('Eval result: PSNR/SSIM {:.4f}/{:.4f}.'.format(PSNR, SSIM))
    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))

    with open(result_txt_path, 'w') as result_file:
        result_file.writelines(
            'Eval result: PSNR/SSIM {:.4f}/{:.4f}.\n'.format(PSNR, SSIM))
        result_file.writelines(
            'Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.\n'.format(
                mIoU, mAcc, allAcc))
        for i in range(classes):
            result_file.writelines(
                'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.\n'.
                format(i, iou_class[i], accuracy_class[i], names[i]))
Пример #6
0
def validate(val_loader, model, criterion):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    batch_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.eval()
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        data_time.update(time.time() - end)
        input = input.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)
        if target.shape[-1] == 1:
            target = target[:, 0]  # for cls
        output = model(input)
        loss = criterion(output, target)

        output = output.max(1)[1]
        intersection, union, target = intersectionAndUnionGPU(
            output, target, args.classes, args.ignore_label)
        intersection, union, target = intersection.cpu().numpy(), union.cpu(
        ).numpy(), target.cpu().numpy()
        intersection_meter.update(intersection), union_meter.update(
            union), target_meter.update(target)

        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        loss_meter.update(loss.item(), input.size(0))
        batch_time.update(time.time() - end)
        end = time.time()
        if (i + 1) % args.print_freq == 0:
            logger.info('Test: [{}/{}] '
                        'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                        'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                        'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) '
                        'Accuracy {accuracy:.4f}.'.format(
                            i + 1,
                            len(val_loader),
                            data_time=data_time,
                            batch_time=batch_time,
                            loss_meter=loss_meter,
                            accuracy=accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(args.classes):
        logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(
            i, iou_class[i], accuracy_class[i]))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return loss_meter.avg, mIoU, mAcc, allAcc
Пример #7
0
def validate(test_loader, query_loader, model, score_fun):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    iter_time = AverageMeter()
    data_time = AverageMeter()
    mAP = RetrievalMAP(compute_on_step=False)
    mRR = RetrievalMRR(compute_on_step=False)
    pAt10 = RetrievalPrecision(compute_on_step=False, k=10)

    model.eval()
    end = time.time()

    # To process each image only once, we store query info in memory (it's enough with just queries,
    # and they are relatively few compared to catalog, so less memory required)
    processed_queries = []
    for (query_input, query_target, query_index) in query_loader:
        query_input = query_input.cuda(non_blocking=True)
        query_target = query_target.cuda(non_blocking=True)
        query_index = query_index.cuda(non_blocking=True)
        query_embeding = model(query_input, getFeatVec=True)
        processed_queries.append((query_embeding, query_target, query_index))

    for i, (test_input, test_target) in enumerate(test_loader):
        data_time.update(time.time() - end)
        test_input = test_input.cuda(non_blocking=True)
        test_target = test_target.cuda(non_blocking=True)
        test_embeding = model(test_input, getFeatVec=True)

        for query_embeding, query_target, query_index in processed_queries:
            scores = score_fun(test_embeding, query_embeding)
            indices = torch.broadcast_to(query_index.unsqueeze(0),
                                         scores.size())
            target = test_target.unsqueeze(1) == query_target.unsqueeze(0)

            mAP(scores, target, indices)
            mRR(scores, target, indices)
            pAt10(scores, target, indices)

        iter_time.update(time.time() - end)
        end = time.time()

        if (i + 1) % args.print_freq == 0:
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Iter {iter_time.val:.3f} ({iter_time.avg:.3f})'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    iter_time=iter_time))
    map_value = mAP.compute()
    mrr_value = mRR.compute()
    pAt10_value = pAt10.compute()

    logger.info('Val result: mAP/mRR/P@10 {:.4f}/{:.4f}/{:.4f}.'.format(
        map_value, mrr_value, pAt10_value))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return map_value, mrr_value, pAt10_value
Пример #8
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, color_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _, image_paths) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        # print(np.amax(input), np.amin(input), np.median(input))
        # print(np.amax(image), np.amin(image), np.median(image))
        h, w, _ = image.shape
        prediction = np.zeros((h, w, classes), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model, image_scale, classes, crop_h,
                                        crop_w, h, w, mean, std)
        prediction /= len(scales)
        prediction = np.argmax(prediction, axis=2)
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        gray = np.uint8(prediction)
        color = colorize(gray, colors)
        image_path, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        color.save(color_path)
        cv2.imwrite(color_path.replace('.png', '_RGB_scale.png'), image_scale)
        # os.system('cp -r %s %s'%(image_path, color_path.replace('.png', '_RGB.png')))
        image_RGB = args.read_image(image_path)
        cv2.imwrite(color_path.replace('.png', '_RGB.png'), image_RGB)
        print('Result saved to %s; originally from %s' %
              (color_path, image_path))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Пример #9
0
def validate(epoch, isEval=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    dict_losses = DictAverageMeter()
    # switch to train mode
    evalStr = 'NoEval'
    if isEval:
        model.eval()
        evalStr = ''

    end = time.time()
    for i, data_raw in enumerate(val_dataloader):
        if i == opt.val_iters: break
        data = data_raw
        if opt.gpu_ids:
            data = map_data(lambda x: Variable(x.cuda(), volatile=True), data)
        else:
            data = map_data(lambda x: Variable(x, volatile=True), data)
        data = Data(*data)
        data_time.update(time.time() - end)
        output = model.forward(data)
        warpped = output.warpped
        loss = criterion(output, data)

        # measure accuracy and record loss
        losses.update(loss.data[0], opt.batch_size)
        dict_losses.update(criterion.summary(), opt.batch_size)

    all_loss = dict_losses.avg
    print('{evalStr}Validation: Epoch: [{0}]\t'
          'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
          'Total Time {1:.3f}\n\t'
          'ALl Loss {all_loss}'.format(epoch,
                                       time.time() - end,
                                       loss=losses,
                                       all_loss=all_loss,
                                       evalStr=evalStr))
    for sid in range(data.fm[0].shape[0]):
        visualize(data,
                  warpped,
                  global_step,
                  sid,
                  opt,
                  mode='both',
                  name='{}val'.format(evalStr))
    tl.log_value('{}val/Loss'.format(evalStr), losses.val, global_step)
    tl.log_value('{}val/Learning Rate'.format(evalStr),
                 scheduler.get_lr()[0], global_step)
    # tl.log_value('val/Batch Time', batch_time.val, global_step)
    tl.log_value('{}val/Data Time'.format(evalStr), data_time.val, global_step)
    for k, v in all_loss.items():
        tl.log_value('{}val/loss/'.format(evalStr) + k, v, global_step)
    model.train()
    return losses.val
Пример #10
0
def validate(val_loader, model, criterion):
    print('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    batch_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    accuracy_meter = AverageMeter()
    fscore_meter = AverageMeter()

    model.eval()
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        data_time.update(time.time() - end)
        input = input.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)
        output = model(input)
        loss = criterion(output, target)

        n = input.size(0)
        loss = torch.mean(loss)

        # metric
        accuracy, precision, recall, f_score, max_threshold = accuracy_metrics(
            output.detach().cpu().numpy(),
            target.detach().cpu().numpy(),
            threshold=args.binary_threshold,
            training=False)
        accuracy_meter.update(accuracy)
        fscore_meter.update(f_score)

        loss_meter.update(loss.item(), input.size(0))
        batch_time.update(time.time() - end)
        end = time.time()
        if (i + 1) % args.print_freq == 0:
            print('Test: [{}/{}] '
                  'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                  'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                  'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) '
                  'Accuracy {accuracy_meter.val:.4f}.'
                  'Fscore {fscore_meter.val:.4f}.'
                  'max_threshold {max_threshold:.2f}'.format(
                      i + 1,
                      len(val_loader),
                      data_time=data_time,
                      batch_time=batch_time,
                      loss_meter=loss_meter,
                      accuracy_meter=accuracy_meter,
                      fscore_meter=fscore_meter,
                      max_threshold=max_threshold))

    mAcc = accuracy_meter.avg
    mFscore = fscore_meter.avg
    print('Val result: mAcc/mFscore {:.4f}/{:.4f}.'.format(mAcc, mFscore))
    print('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return loss_meter.avg, mAcc, mFscore, max_threshold
Пример #11
0
    def train(self):
        bce_losses = AverageMeter()
        image_gradient_losses = AverageMeter()
        image_gradient_criterion = ImageGradientLoss().to(self.device)
        bce_criterion = nn.CrossEntropyLoss().to(self.device)

        for epoch in range(self.epoch, self.num_epoch):
            bce_losses.reset()
            image_gradient_losses.reset()
            for step, (image, gray_image, mask) in enumerate(self.data_loader):
                image = image.to(self.device)
                mask = mask.to(self.device)
                gray_image = gray_image.to(self.device)

                pred = self.net(image)

                pred_flat = pred.permute(0, 2, 3, 1).contiguous().view(
                    -1, self.num_classes)
                mask_flat = mask.squeeze(1).view(-1).long()

                # preds_flat.shape (N*224*224, 2)
                # masks_flat.shape (N*224*224, 1)
                image_gradient_loss = image_gradient_criterion(
                    pred, gray_image)
                bce_loss = bce_criterion(pred_flat, mask_flat)

                loss = bce_loss + self.gradient_loss_weight * image_gradient_loss

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                bce_losses.update(bce_loss.item(), self.batch_size)
                image_gradient_losses.update(
                    self.gradient_loss_weight * image_gradient_loss,
                    self.batch_size)
                iou = iou_loss(pred, mask)

                # save sample images
                if step % 10 == 0:
                    print(
                        f"Epoch: [{epoch}/{self.num_epoch}] | Step: [{step}/{self.image_len}] | "
                        f"Bce Loss: {bce_losses.avg:.4f} | Image Gradient Loss: {image_gradient_losses.avg:.4f} | "
                        f"IOU: {iou:.4f}")
                if step % self.sample_step == 0:
                    self.save_sample_imgs(image[0], mask[0],
                                          torch.argmax(pred[0], 0),
                                          self.sample_dir, epoch, step)
                    print('[*] Saved sample images')

            torch.save(
                self.net.state_dict(),
                f'{self.checkpoint_dir}/MobileHairNet_epoch-{epoch}.pth')
Пример #12
0
def retrieval_validate(image_loader, sketch_loader, model, score_fun):
    if main_process():
        logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    iter_time = AverageMeter()
    data_time = AverageMeter()
    mAP = RetrievalMAP(compute_on_step=False)

    model.eval()
    end = time.time()

    # To process each image only once, we store query info in memory (it's enough with just queries,
    # and they are relatively few compared to catalog, so less memory required)
    processed_queries = []
    for (sketch_input, sketch_target, sketch_index) in sketch_loader:
        sketch_input = sketch_input.cuda(non_blocking=True)
        sketch_target = sketch_target.cuda(non_blocking=True)
        sketch_index = sketch_index.cuda(non_blocking=True)
        sketchFeatVec, _ = model(sketch_input, mode='sketch')
        processed_queries.append((sketchFeatVec, sketch_target, sketch_index))

    for i, (image_input, image_target) in enumerate(image_loader):
        data_time.update(time.time() - end)
        image_input = image_input.cuda(non_blocking=True)
        image_target = image_target.cuda(non_blocking=True)
        imageFeatVec, _ = model(image_input, mode='image')

        for sketchFeatVec, sketch_target, sketch_index in processed_queries:
            scores = score_fun(imageFeatVec, sketchFeatVec)
            indices = torch.broadcast_to(sketch_index.unsqueeze(0),
                                         scores.size())
            target = image_target.unsqueeze(1) == sketch_target.unsqueeze(0)

            mAP(scores, target, indices)

        iter_time.update(time.time() - end)
        end = time.time()

        if (i + 1) % args.test_print_freq == 0 and main_process():
            logger.info(
                'Test batch: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Iter {iter_time.val:.3f} ({iter_time.avg:.3f})'.format(
                    i + 1,
                    len(image_loader),
                    data_time=data_time,
                    iter_time=iter_time))
    map_value = mAP.compute()  # computes across all distributed processes
    if main_process():
        logger.info('Val result: mAP {:.4f}.'.format(map_value))
        logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return map_value
Пример #13
0
def test(test_loader, data_list, model, classes, base_size, crop_h, crop_w,
         scales, binary_folder):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        prediction = np.zeros((h, w), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model, image_scale, classes, crop_h,
                                        crop_w, h, w)

        prediction /= len(scales)

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(binary_folder)
        image_path, _ = data_list[i]
        image_name = os.path.split(image_path)[-1]
        image_name = image_name.replace('png', 'npy')
        save_path = os.path.join(binary_folder, image_name)
        np.save(save_path, prediction)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Пример #14
0
def cal_acc(data_list, pred_folder, classes):
    accuracy_meter = AverageMeter()
    fscore_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = os.path.split(image_path)[-1]
        pred = cv2.imread(os.path.join(pred_folder, image_name),
                          cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        accuracy, precision, recall, f_score = accuracy_metrics(pred, target)
        accuracy_meter.update(accuracy)
        fscore_meter.update(f_score)

        logger.info('Evaluating {0}/{1} on image {2}, fscore {3:.4f}.'.format(
            i + 1, len(data_list), image_name + '.png', fscore_meter.val))

    mAcc = accuracy_meter.avg
    mFscore = fscore_meter.avg
    logger.info('Eval result: mAcc/mFscore {:.4f}/{:.4f}.'.format(mAcc /
                                                                  mFscore))
def cal_acc(data_list, pred_folder, classes, names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    colors = np.loadtxt(args.colors_path).astype('uint8')
    names = [line.rstrip('\n') for line in open(args.names_path)]

    for i, (image_path, target_path) in enumerate(data_list):
        fd_name=image_path.split('/')[-3]
        image_name = image_path.split('/')[-1].split('.')[0]
        pred = cv2.imread(os.path.join(pred_folder, image_name+'.png'), cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        
        color = colorize(target, colors)
        
        color_path = os.path.join('/local/xjqi/2ddata/tasks/semseg_50_bn/exp/ade20k/pspnet50/result/epoch_100/val/ss/gt/', fd_name+"_"+image_name + '.png')
        color.save(color_path)
        
        
        intersection, union, target = intersectionAndUnion(pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        #logger.info('Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name+'.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i], names[i]))
Пример #16
0
def cal_acc(data_list, pred_folder, classes, names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                          cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(
                i + 1, len(data_list), image_name + '.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
Пример #17
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, color_folder, colors):
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        prediction = np.zeros((h, w, classes), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model, image_scale, classes, crop_h,
                                        crop_w, h, w, mean, std)
        prediction /= len(scales)
        prediction = np.argmax(prediction, axis=2)
        batch_time.update(time.time() - end)
        end = time.time()

        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        gray = np.uint8(prediction)
        color = colorize(gray, colors)
        image_path, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        color.save(color_path)
def train_warmup(train_loader, model, optimizer, criterion, epoch, writer,
                 count):
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    phase = 'train'
    model.train()
    for i, (input, target) in enumerate(train_loader):
        ### Adjust learning rate
        lr = adjust_lr(optimizer,
                       epoch,
                       count,
                       train_cfg['init_lr'],
                       data_cfg['iterations_per_epoch'],
                       method='warmup')
        if train_cfg['cuda']:
            target = target.cuda()
            input = input.cuda()

        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # compute output
        with torch.set_grad_enabled(phase == 'train'):
            output = model(input_var)
            loss = criterion(output, target_var)
            acc1, acc5 = accu(output.data, target_var, topk=(1, 5))

        # measure accuracy and record loss
        losses.update(loss.item(), input.size(0))
        top1.update(acc1[0], input.size(0))
        top5.update(acc5[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        count += 1
        writer.add_scalar('lr', lr, global_step=count)
        writer = add_summery(writer, 'train', loss.data, acc1, acc5, count)

        if i % train_cfg['print_que'] == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'lr: [{3}]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      epoch,
                      i,
                      len(train_loader),
                      lr,
                      loss=losses,
                      top1=top1,
                      top5=top5))

    return count
def train(train_loader, model, optimizer, epoch, lr_schedule, summary_writer):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    model.train()

    end = time.time()
    for it, (inputs, targets) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        # update learning rate
        iteration = epoch * len(train_loader) + it
        for param_group in optimizer.param_groups:
            param_group["lr"] = lr_schedule[iteration]

        # ============ forward passes ... ============
        outputs = model(inputs.to(device))
        loss, loss_vars = model.calculate_loss(outputs)

        # ============ backward and optim step ... ============
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # ============ misc ... ============
        losses.update(loss.item(), inputs[0].size(0))
        batch_time.update(time.time() - end)
        end = time.time()
        if it % 50 == 0:
            # update the tensorboard
            summary_writer.add_scalar('lr', lr_schedule[iteration], iteration)
            for k, v in loss_vars.items():
                summary_writer.add_scalar(k, v, iteration)

            if it % 500 == 0:
                visuals_dict = model.get_current_visuals()
                for k, v in visuals_dict.items():
                    grid = torchvision.utils.make_grid(v)
                    summary_writer.add_image(k, grid, iteration)
            summary_writer.flush()

            # update the logger
            logger.info("Epoch: [{0}][{1}]\t"
                        "Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
                        "Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
                        "Loss {loss.val:.4f} ({loss.avg:.4f})\t"
                        "Lr: {lr:.4f}".format(
                            epoch,
                            it,
                            batch_time=batch_time,
                            data_time=data_time,
                            loss=losses,
                            lr=optimizer.param_groups[0]["lr"],
                        ))
    return (epoch, losses.avg)
Пример #20
0
def train(trainDataLoader, pfldNet, auxiliaryNet, optimizer, epoch, criterion,
          train_batchsize):
    print("===> Train:")
    losses = AverageMeter()
    for img, landmark_gt, attribute_gt, euler_angle_gt in trainDataLoader:
        img = img.to(device)
        landmark_gt = landmark_gt.to(device)
        attribute_gt = attribute_gt.to(device)
        euler_angle_gt = euler_angle_gt.to(device)
        pfldNet = pfldNet.to(device)
        auxiliaryNet = auxiliaryNet.to(device)
        features, landmarks = pfldNet(img)
        angle = auxiliaryNet(features)

        weighted_loss, loss = criterion(attribute_gt, landmark_gt,
                                        euler_angle_gt, angle, landmarks,
                                        opt.train_batchsize)

        optimizer.zero_grad()
        weighted_loss.backward()
        optimizer.step()
        losses.update(loss.item())

    return weighted_loss, loss
def cal_acc(data_list, pred_folder, classes, names, datasetname):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        scene_name = image_path.split('/')[-3]
        pred = cv2.imread(
            os.path.join(pred_folder, scene_name + '_' + image_name + '.png'),
            cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(
                i + 1, len(data_list), image_name + '.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)

    if datasetname == 'scannet':
        #ScanNet has 20 categories ignored, so we only calculate the valid categories.
        mIoU = 0.0  #np.mean(iou_class)
        mAcc = 0.0  #np.mean(accuracy_class)
        category_cnt = 0
        for i in range(iou_class.shape[0]):
            if i in [
                    1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33,
                    34, 36, 39
            ]:
                mIoU += iou_class[i]
                mAcc += accuracy_class[i]
                category_cnt += 1
        mIoU /= category_cnt
        mAcc /= category_cnt

    else:
        #For other datasets except for ScanNet, uncomment these two lines
        mIoU = np.mean(iou_class)
        mAcc = np.mean(accuracy_class)

    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
Пример #22
0
def validate(val_loader, model, criterion, if_print=False):
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    top3 = AverageMeter()
    model.eval()
    for i, (input, target) in enumerate(val_loader):
        # target = target.cuda(async=True)
        if train_cfg['cuda']:
            input_var = input.cuda()
            target_var = target.cuda()
        #input_var = torch.autograd.Variable(input_var)
        #target_var = torch.autograd.Variable(target_var)

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        acc1, acc3, acc5 = accu(output, target_var, topk=(1, 3, 5))

        losses.update(loss.data, input.size(0))
        top1.update(acc1[0], input.size(0))
        top5.update(acc5[0], input.size(0))
        top3.update(acc3[0], input.size(0))

        if if_print == True:
            print('Test: [{0}/{1}]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Acc@3 {top3.val:.3f} ({top3.avg:.3f})\t'
                  'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      i,
                      len(val_loader),
                      loss=losses,
                      top1=top1,
                      top3=top3,
                      top5=top5))

    print(
        ' * Acc@1 {top1.avg:.3f} Acc@3 {top3.avg:.3f} Acc@5 {top5.avg:.3f} Loss {loss.avg:.3f}'
        .format(top1=top1, top3=top3, top5=top5, loss=losses))

    return top1.avg, top3.avg, top5.avg, losses.avg
def val(dataset, model, optimizer, criterion, writer, count, epoch):
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    phase = 'test'
    iter_len = dataset.__len__()
    for i in range(0, iter_len // train_cfg['per_batch_size']):
        ### Adjust learning rate
        # lr = adjust_lr_test(optimizer, count, train_cfg['init_lr'])
        model.eval()
        batch_iterator = iter(
            DataLoader(dataset,
                       train_cfg['per_batch_size'],
                       shuffle=True,
                       num_workers=args.num_workers,
                       collate_fn=detection_collate))
        input, target = next(batch_iterator)
        if train_cfg['cuda']:
            target = target.cuda()
            input = input.cuda()

        # input_var = torch.autograd.Variable(input)
        # target_var = torch.autograd.Variable(target)

        # compute output
        with torch.set_grad_enabled(phase == phase):
            output = model(input)
            loss = criterion(output, target.long())
            acc1, acc5 = accu(output.data, target, topk=(1, 5))

        # measure accuracy and record loss
        losses.update(loss.item(), input.size(0))
        top1.update(acc1[0], input.size(0))
        top5.update(acc5[0], input.size(0))
        count += 1
        # compute gradient and do SGD step
    writer.add_scalar('val/loss', losses.avg, global_step=count)
    writer.add_scalar('val/acc_top1', acc1, global_step=count)
    writer.add_scalar('val/acc_top5', acc5, global_step=count)

    print('Epoch: [{0}]\t'
          'lr: [{1}]\t'
          'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
          'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
          'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
              epoch,
              optimizer.param_groups[0]['lr'],
              loss=losses,
              top1=top1,
              top5=top5))

    return losses.avg, acc1, acc5
Пример #24
0
def cal_acc(data_list,
            pred_folder,
            classes,
            names,
            is_med=False,
            label_mapping=None):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = get_image_name(target_path, is_med)
        pred_fp = os.path.join(pred_folder, image_name)
        # print('pred : %s \n target : %s' %(pred_fp, target_path))
        pred = cv2.imread(pred_fp, cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        if label_mapping is not None:
            target = convert_label(target, label_mapping)
        # print('pred: %s target: %s' %(np.unique(pred), np.unique(target)))

        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        dice = sum(intersection_meter.val[1:]) * 2 / (
            sum(intersection_meter.val[1:]) + sum(union_meter.val[1:]) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, accuracy {3:.4f}, dice {4:.4f}.'.
            format(i + 1, len(data_list), image_name, accuracy, dice))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    dice_class = intersection_meter.sum * 2 / (union_meter.sum +
                                               intersection_meter.sum + 1e-10)
    mDice = np.mean(dice_class[1:])
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info(
        'Eval result: mIoU/mAcc/allAcc/mDice {:.4f}/{:.4f}/{:.4f}/{:.4f}.'.
        format(mIoU, mAcc, allAcc, mDice))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy/dice {:.4f}/{:.4f}/{:.4f}, name: {}.'
            .format(i, iou_class[i], accuracy_class[i], dice_class[i],
                    names[i]))
Пример #25
0
def validate(val_loader, model, criterion):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    loss_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.eval()
    for i, (input, target) in tqdm(enumerate(val_loader),
                                   total=len(val_loader)):
        input = input.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)
        output = model(input)
        if args.zoom_factor != 8:
            output = F.interpolate(output,
                                   size=target.size()[1:],
                                   mode='bilinear',
                                   align_corners=True)
        loss = criterion(output, target)

        loss = torch.mean(loss)

        output = F.softmax(output).max(1)[1]
        intersection, union, target = intersectionAndUnionGPU(
            output, target, args.classes, args.ignore_label)
        intersection, union, target = intersection.cpu().numpy(), union.cpu(
        ).numpy(), target.cpu().numpy()
        intersection_meter.update(intersection), union_meter.update(
            union), target_meter.update(target)

        loss_meter.update(loss.item(), input.size(0))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
    logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(args.classes):
        logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(
            i, iou_class[i], accuracy_class[i]))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return loss_meter.avg, mIoU, mAcc, allAcc
Пример #26
0
def cal_acc(data_list, pred_folder, classes, names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    from sotabencheval.semantic_segmentation import ADE20KEvaluator
    evaluator = ADE20KEvaluator(model_name='PSPNet (ResNet-50)',
                                paper_arxiv_id='1612.01105')

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                          cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        cache_exists = evalintersectionAndUnion(pred, target, classes,
                                                evaluator)

        if cache_exists:
            break

        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)

    if cache_exists:
        evaluator.save()
        return

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    print('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        print('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
            i, iou_class[i], accuracy_class[i], ''))
Пример #27
0
def cal_acc(data_list, pred_folder, classes, names, pred_path_list=None, target_path_list=None):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    if args.test_in_nyu_label_space:
        classes = 41

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        if pred_path_list is not None:
            pred_path = pred_path_list[i]
        else:
            pred_path = os.path.join(pred_folder, image_name+'.png')
        pred = cv2.imread(pred_path, cv2.IMREAD_GRAYSCALE)
        if target_path_list is not None:
            target_path = target_path_list[i]
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        if i < 10:
            print(pred_path, target_path)

        intersection, union, target = intersectionAndUnion(pred, target, classes, args.ignore_label)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info('Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name+'.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
    for i in range(classes):
        print(len(iou_class), len(accuracy_class), len(names))
        logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i], names[i]))
Пример #28
0
def main():
    opt = TestOptions().parse()
    # preprocess data
    all_stable_frames, fps = get_images(opt.video_root + 'stable/' +
                                        str(opt.video_index) + '.avi')
    all_unstable_frames, fps = get_images(opt.video_root + 'unstable/' +
                                          str(opt.video_index) + '.avi')

    # generate data flow
    pred_frames_for_input = []
    singleVideoData = PreprocessDataSet(all_stable_frames, all_unstable_frames,
                                        pred_frames_for_input, opt)
    eval_data_loader = torch.utils.data.DataLoader(singleVideoData)
    model, criterion = create_model(opt)
    checkpoint = torch.load(opt.checkpoint_path,
                            map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])
    data_time = AverageMeter()
    end = time.time()
    # go through model to get output
    idx = 0
    pred_frames = []
    if opt.instnorm:
        model.train()
    else:
        model.eval()
    if opt.fake_test:
        print("fake test")
        pred_frames = []
        for i in range(50):
            for j, data in enumerate(eval_data_loader):
                if opt.gpu_ids:
                    data = map_data(
                        lambda x: Variable(x.cuda(), volatile=True), data)
                else:
                    data = map_data(lambda x: Variable(x, volatile=True), data)
                data_time.update(time.time() - end)
                data = Data(*data)
                output = model.forward(data)
                warpped = output.warpped
                pred_frames += data.prefix
                for u, w, t in zip(data.unstable, warpped, data.target):
                    pred_frames += (u, w, torch.abs(w - t))
                # visualize(data, warpped, i, 0, opt, 'save')

        pred_frames = list(map(lambda x: tensor2im(x.data), pred_frames))

    else:
        for i in range(0, len(all_stable_frames) - 1):
            if i % 100 == 0:
                print("=====> %d/%d" % (i, len(all_stable_frames)))
            for j, data in enumerate(eval_data_loader):
                if opt.gpu_ids:
                    data = map_data(
                        lambda x: Variable(x.cuda(), volatile=True), data)
                else:
                    data = map_data(lambda x: Variable(x, volatile=True), data)
                data_time.update(time.time() - end)
                data = Data(*data)
                # print(data)
                output = model.forward(data)
                warpped = output.warpped
                # save outputs
                # if (i < opt.prefix[0]):
                #     last_frame = all_stable_frames[0]
                # else:
                #     last_frame = pred_frames_for_input[len(pred_frames_for_input) + 1 - opt.prefix[0]]
                # print(data.prefix[-1][0].data.shape)
                last_frame = output_to_input([data.prefix[-1]], opt)
                pred_frames.append(
                    draw_imgs(output_to_input(warpped,
                                              opt), all_stable_frames[i],
                              all_unstable_frames[i], last_frame))
                pred_frames_for_input.append(output_to_input(warpped, opt))
                eval_data_loader = torch.utils.data.DataLoader(
                    PreprocessDataSet(all_stable_frames, all_unstable_frames,
                                      pred_frames_for_input, opt))
                # if i < 100: visualize(data, warpped, i, 0, opt, 'save')

    # print video
    generate_video(pred_frames, fps, opt)
Пример #29
0
def test(test_loader, data_list, model, classes, mean, std, gray_folder,
         color_folder, derain_folder, edge_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (_, input, _, _) in enumerate(test_loader):
        data_time.update(time.time() - end)

        with torch.no_grad():
            derain_outs, seg_outs, edge_outs = model(input)

        derain_outs = derain_outs.cpu().numpy()
        seg_outs = seg_outs.cpu().numpy()
        edge_outs = edge_outs.cpu().numpy()

        # process derain img
        derain_outs = np.transpose(derain_outs, (0, 2, 3, 1)).squeeze(axis=0)
        derain_outs *= std
        derain_outs += mean
        derain_outs = np.clip(derain_outs, a_max=255, a_min=0)
        derain_outs = derain_outs.astype('uint8')
        derain_outs = cv2.cvtColor(derain_outs.astype('uint8'),
                                   cv2.COLOR_RGB2BGR)

        # process seg pred
        seg_outs = np.transpose(seg_outs, (0, 2, 3, 1))
        seg_outs = np.argmax(seg_outs, axis=3).squeeze(axis=0)

        # process edge pred
        edge_outs = np.transpose(edge_outs,
                                 (0, 2, 3, 1)).squeeze(axis=3).squeeze(axis=0)
        edge_outs = np.clip(edge_outs, a_max=1, a_min=0)
        edge_outs = (edge_outs * 255).astype('uint8')

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        check_makedirs(derain_folder)
        check_makedirs(edge_folder)

        gray = np.uint8(seg_outs)
        color = colorize(gray, colors)
        image_path, _, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        derain_path = os.path.join(derain_folder, image_name + '.png')
        edge_path = os.path.join(edge_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        cv2.imwrite(derain_path, derain_outs)
        cv2.imwrite(edge_path, edge_outs)
        color.save(color_path)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Пример #30
0
def triplet_train(train_loader, model, criterion, optimizer, epoch,
                  classif_criterion):
    iter_time = AverageMeter()
    data_time = AverageMeter()
    transfer_time = AverageMeter()
    batch_time = AverageMeter()
    metric_time = AverageMeter()
    loss_meter = AverageMeter()
    loss_sketch_meter = AverageMeter()
    loss_positive_meter = AverageMeter()
    loss_negative_meter = AverageMeter()
    loss_triplet_meter = AverageMeter()
    top1_meter_sketch = AverageMeter()
    top1_meter_positive = AverageMeter()
    top1_meter_negative = AverageMeter()

    model.train()
    end = time.time()
    sum_epochs = args.contras_epochs + args.triplet_epochs
    max_iter = sum_epochs * len(train_loader)
    for i, (sketch_input, positive_input, negative_input, sketch_target,
            negative_target) in enumerate(train_loader):
        data_time.update(time.time() - end)
        if args.time_breakdown:
            last = time.time()

        sketch_input = sketch_input.cuda(non_blocking=True)
        positive_input = positive_input.cuda(non_blocking=True)
        negative_input = negative_input.cuda(non_blocking=True)
        sketch_target = sketch_target.cuda(non_blocking=True)
        negative_target = negative_target.cuda(non_blocking=True)
        if args.time_breakdown:
            torch.cuda.synchronize()
            transfer_time.update(time.time() - last)
            last = time.time()

        (sketchFeatVec, positiveFeatVec,
         negativeFeatVec), (sketch_output, positive_output,
                            negative_output) = model(sketch_input,
                                                     positive_input,
                                                     negative_input)
        loss_sketch = smooth_loss(
            sketch_output, sketch_target, args.label_smoothing
        ) if args.label_smoothing else classif_criterion(
            sketch_output, sketch_target)
        loss_positive = smooth_loss(
            positive_output, sketch_target, args.label_smoothing
        ) if args.label_smoothing else classif_criterion(
            positive_output, sketch_target)
        loss_negative = smooth_loss(
            negative_output, negative_target, args.label_smoothing
        ) if args.label_smoothing else classif_criterion(
            negative_output, negative_target)
        sketchFeatVec = F.normalize(sketchFeatVec)
        positiveFeatVec = F.normalize(positiveFeatVec)
        negativeFeatVec = F.normalize(negativeFeatVec)
        loss_triplet = criterion(sketchFeatVec, positiveFeatVec,
                                 negativeFeatVec)

        loss_triplet, loss_sketch, loss_positive, loss_negative = loss_triplet, loss_sketch * 0.5, loss_positive * 0.25, loss_negative * 0.25
        classif_decay = 0.5 * (1 + math.cos(epoch * math.pi / sum_epochs))
        loss = 2.0 * loss_triplet * epoch + classif_decay * (
            loss_sketch + loss_positive + loss_negative)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if args.time_breakdown:
            torch.cuda.synchronize()
            batch_time.update(time.time() - last)
            last = time.time()

        n = sketch_input.size(0)
        if args.multiprocessing_distributed:
            with torch.no_grad():
                loss, loss_triplet, loss_sketch, loss_positive, loss_negative = loss.detach(
                ) * n, loss_triplet.detach() * n, loss_sketch.detach(
                ) * n, loss_positive.detach() * n, loss_negative.detach() * n
                count = sketch_target.new_tensor([n], dtype=torch.long)
                dist.all_reduce(loss), dist.all_reduce(count), dist.all_reduce(
                    loss_triplet), dist.all_reduce(
                        loss_sketch), dist.all_reduce(
                            loss_positive), dist.all_reduce(loss_negative)
                n = count.item()
                loss, loss_triplet, loss_sketch, loss_positive, loss_negative = loss / n, loss_triplet / n, loss_sketch / n, loss_positive / n, loss_negative / n
        loss_meter.update(loss.item(), n)
        loss_triplet_meter.update(loss_triplet.item(), n)
        loss_sketch_meter.update(loss_sketch.item(), n)
        loss_positive_meter.update(loss_positive.item(), n)
        loss_negative_meter.update(loss_negative.item(), n)

        # classification metrics
        top1_s, = cal_accuracy(sketch_output, sketch_target, topk=(1, ))
        top1_p, = cal_accuracy(positive_output, sketch_target, topk=(1, ))
        top1_n, = cal_accuracy(negative_output, negative_target, topk=(1, ))
        n = sketch_input.size(0)
        if args.multiprocessing_distributed:
            with torch.no_grad():
                top1_s = top1_s * n
                top1_p = top1_p * n
                top1_n = top1_n * n
                count = sketch_target.new_tensor([n], dtype=torch.long)
                dist.all_reduce(top1_s), dist.all_reduce(
                    top1_p), dist.all_reduce(top1_n), dist.all_reduce(count)
                n = count.item()
                top1_s = top1_s / n
                top1_p = top1_p / n
                top1_n = top1_n / n
        top1_meter_sketch.update(top1_s.item(), n), top1_meter_positive.update(
            top1_p.item(), n), top1_meter_negative.update(top1_n.item(), n)

        if args.time_breakdown:
            torch.cuda.synchronize()
            metric_time.update(time.time() - last)
        iter_time.update(time.time() - end)
        end = time.time()

        # calculate remain time
        current_iter = epoch * len(train_loader) + i + 1
        remain_iter = max_iter - current_iter
        remain_time = remain_iter * iter_time.avg
        t_m, t_s = divmod(remain_time, 60)
        t_h, t_m = divmod(t_m, 60)
        remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m),
                                                    int(t_s))

        if ((i + 1) % args.print_freq == 0) and main_process():
            logger.info((
                'Epoch: [{}/{}][{}/{}] '
                'Time {iter_time.val:.3f} ({iter_time.avg:.3f}) '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' +
                ('Transfer {transfer_time.val:.3f} ({transfer_time.avg:.3f}) '
                 'Batch {batch_time.val:.4f} ({batch_time.avg:.4f}) '
                 'Metric {metric_time.val:.3f} ({metric_time.avg:.3f}) '
                 if args.time_breakdown else '') + 'Remain {remain_time} '
                'Loss total/triplet/classifSketch/classifPos/classifNeg '
                '{loss_meter.val:.4f}/{triplet_loss_meter.val:.4f}/{sketch_loss_meter.val:.4f}/{positive_loss_meter.val:.4f}/{negative_loss_meter.val:.4f} '
                'Top1classif sketch/positive/negative {top1_s.val:.3f}/{top1_p.val:.3f}/{top1_n.val:.3f}'
            ).format(epoch + 1,
                     sum_epochs,
                     i + 1,
                     len(train_loader),
                     iter_time=iter_time,
                     data_time=data_time,
                     transfer_time=transfer_time,
                     batch_time=batch_time,
                     metric_time=metric_time,
                     remain_time=remain_time,
                     triplet_loss_meter=loss_triplet_meter,
                     sketch_loss_meter=loss_sketch_meter,
                     positive_loss_meter=loss_positive_meter,
                     negative_loss_meter=loss_negative_meter,
                     loss_meter=loss_meter,
                     top1_s=top1_meter_sketch,
                     top1_p=top1_meter_positive,
                     top1_n=top1_meter_negative))

    if main_process():
        logger.info(
            'Train result at epoch [{}/{}]: Loss total/triplet/classifSketch/classifPos/classifNeg {:.4f}/{:.4f}/{:.4f}/{:.4f}/{:.4f}. '
            'Top1classif sketch/positive/negative {top1_s.avg:.3f}/{top1_p.avg:.3f}/{top1_n.avg:.3f}'
            .format(epoch + 1,
                    sum_epochs,
                    loss_meter.avg,
                    loss_triplet_meter.avg,
                    loss_sketch_meter.avg,
                    loss_positive_meter.avg,
                    loss_negative_meter.avg,
                    top1_s=top1_meter_sketch,
                    top1_p=top1_meter_positive,
                    top1_n=top1_meter_negative))
    return loss_meter.avg