コード例 #1
0
def test_epoch(test_loader, model, epoch, criterion):
    test_loss = 0.0
    count = 0.0
    model.eval()

    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for data, label in test_loader:
        data, label = data.cuda(non_blocking=True), label.cuda(
            non_blocking=True).squeeze(1)
        data = data.permute(0, 2, 1)
        batch_size = data.size(0)
        logits = model(data)

        # Loss
        loss = criterion(logits, label)  # here use model's output directly
        if args.multiprocessing_distributed:
            loss = loss * batch_size
            _count = label.new_tensor([batch_size],
                                      dtype=torch.long).cuda(non_blocking=True)
            dist.all_reduce(loss), dist.all_reduce(_count)
            n = _count.item()
            loss = loss / n
        else:
            loss = torch.mean(loss)

        preds = logits.max(dim=1)[1]
        count += batch_size
        test_loss += loss.item() * batch_size

        intersection, union, target = intersectionAndUnionGPU(
            preds, label, args.classes)
        if args.multiprocessing_distributed:
            dist.all_reduce(intersection), dist.all_reduce(
                union), dist.all_reduce(target)
        intersection, union, target = intersection.cpu().numpy(), union.cpu(
        ).numpy(), target.cpu().numpy()
        intersection_meter.update(intersection), union_meter.update(
            union), target_meter.update(target)

    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    outstr = 'Test %d, loss: %.6f, test acc: %.6f, ' \
             'test avg acc: %.6f' % (epoch + 1,
                                     test_loss * 1.0 / count,
                                     allAcc,
                                     mAcc)

    if main_process():
        logger.info(outstr)
        # Write to tensorboard
        writer.add_scalar('loss_test', test_loss * 1.0 / count, epoch + 1)
        writer.add_scalar('mAcc_test', mAcc, epoch + 1)
        writer.add_scalar('allAcc_test', allAcc, epoch + 1)

    return allAcc
コード例 #2
0
ファイル: test.py プロジェクト: lixin666/MGL
def test(test_loader, data_list, model, cod_folder, coee_folder):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    check_makedirs(cod_folder)
    check_makedirs(coee_folder)
    for i, (input, _, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        with torch.no_grad():
            cod_pred, coee_pred = model(input)
        cod_pred, coee_pred = torch.sigmoid(cod_pred), torch.sigmoid(coee_pred)

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        cod = np.uint8(cod_pred.squeeze().detach().cpu().numpy() * 255)
        coee = np.uint8(coee_pred.squeeze().detach().cpu().numpy() * 255)

        image_path, _, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        cod_path = os.path.join(cod_folder, image_name + '.png')
        coee_path = os.path.join(coee_folder, image_name + '.png')
        cv2.imwrite(cod_path, cod)
        cv2.imwrite(coee_path, coee)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
コード例 #3
0
ファイル: gen_video.py プロジェクト: aim-uofa/ETC-VideoSeg
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        prediction = predict_whole_img(model, input)
        # prediction=prediction[0].numpy()
        prediction = np.argmax(prediction, axis=3)
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))

        for g in range(0, prediction.shape[0]):
            check_makedirs(gray_folder)
            gray = np.uint8(prediction[g])
            image_path, _ = data_list[i * args.batch_size_gen + g]
            image_name = image_path.split('/')[-1].split('.')[0]
            gray_path = os.path.join(gray_folder, image_name + '.png')
            cv2.imwrite(gray_path, gray)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
コード例 #4
0
ファイル: test.py プロジェクト: lixin666/MGL
def calc_acc(data_list, pred_folder, edge_folder):
    r_mae = AverageMeter()
    e_mae = AverageMeter()

    for i, (image_path, target1_path, target2_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        pred1 = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                           cv2.IMREAD_GRAYSCALE)
        pred2 = cv2.imread(os.path.join(edge_folder, image_name + '.png'),
                           cv2.IMREAD_GRAYSCALE)

        target1 = cv2.imread(target1_path, cv2.IMREAD_GRAYSCALE)
        target2 = cv2.imread(target2_path, cv2.IMREAD_GRAYSCALE)

        if pred1.shape[1] != target1.shape[1] or pred1.shape[
                0] != target1.shape[0]:
            pred1 = cv2.resize(pred1, (target1.shape[1], target1.shape[0]))
            pred2 = cv2.resize(pred2, (target2.shape[1], target2.shape[0]))

        r_mae.update(calc_mae(pred1, target1))
        e_mae.update(calc_mae(pred2, target2))

        logger.info('Evaluating {0}/{1} on image {2}, mae {3:.4f}.'.format(
            i + 1, len(data_list), image_name + '.png', r_mae.avg))

    logger.info('Test result: r_mae / e_mae: {0:.3f}/{1:.3f}'.format(
        r_mae.avg, e_mae.avg))
コード例 #5
0
ファイル: train.py プロジェクト: yangsuCV/StabNet0.2
def validate(epoch, isEval=True):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    dict_losses = DictAverageMeter()
    # switch to train mode
    evalStr = 'NoEval'
    if isEval:
        model.eval()
        evalStr = ''

    end = time.time()
    for i, data_raw in enumerate(val_dataloader):
        if i == opt.val_iters: break
        data = data_raw
        if opt.gpu_ids:
            data = map_data(lambda x: Variable(x.cuda(), volatile=True), data)
        else:
            data = map_data(lambda x: Variable(x, volatile=True), data)
        data = Data(*data)
        data_time.update(time.time() - end)
        output = model.forward(data)
        warpped = output.warpped
        loss = criterion(output, data)

        # measure accuracy and record loss
        losses.update(loss.data[0], opt.batch_size)
        dict_losses.update(criterion.summary(), opt.batch_size)

    all_loss = dict_losses.avg
    print('{evalStr}Validation: Epoch: [{0}]\t'
          'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
          'Total Time {1:.3f}\n\t'
          'ALl Loss {all_loss}'.format(epoch,
                                       time.time() - end,
                                       loss=losses,
                                       all_loss=all_loss,
                                       evalStr=evalStr))
    for sid in range(data.fm[0].shape[0]):
        visualize(data,
                  warpped,
                  global_step,
                  sid,
                  opt,
                  mode='both',
                  name='{}val'.format(evalStr))
    tl.log_value('{}val/Loss'.format(evalStr), losses.val, global_step)
    tl.log_value('{}val/Learning Rate'.format(evalStr),
                 scheduler.get_lr()[0], global_step)
    # tl.log_value('val/Batch Time', batch_time.val, global_step)
    tl.log_value('{}val/Data Time'.format(evalStr), data_time.val, global_step)
    for k, v in all_loss.items():
        tl.log_value('{}val/loss/'.format(evalStr) + k, v, global_step)
    model.train()
    return losses.val
コード例 #6
0
def validate(test_loader, query_loader, model, score_fun):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    iter_time = AverageMeter()
    data_time = AverageMeter()
    mAP = RetrievalMAP(compute_on_step=False)
    mRR = RetrievalMRR(compute_on_step=False)
    pAt10 = RetrievalPrecision(compute_on_step=False, k=10)

    model.eval()
    end = time.time()

    # To process each image only once, we store query info in memory (it's enough with just queries,
    # and they are relatively few compared to catalog, so less memory required)
    processed_queries = []
    for (query_input, query_target, query_index) in query_loader:
        query_input = query_input.cuda(non_blocking=True)
        query_target = query_target.cuda(non_blocking=True)
        query_index = query_index.cuda(non_blocking=True)
        query_embeding = model(query_input, getFeatVec=True)
        processed_queries.append((query_embeding, query_target, query_index))

    for i, (test_input, test_target) in enumerate(test_loader):
        data_time.update(time.time() - end)
        test_input = test_input.cuda(non_blocking=True)
        test_target = test_target.cuda(non_blocking=True)
        test_embeding = model(test_input, getFeatVec=True)

        for query_embeding, query_target, query_index in processed_queries:
            scores = score_fun(test_embeding, query_embeding)
            indices = torch.broadcast_to(query_index.unsqueeze(0),
                                         scores.size())
            target = test_target.unsqueeze(1) == query_target.unsqueeze(0)

            mAP(scores, target, indices)
            mRR(scores, target, indices)
            pAt10(scores, target, indices)

        iter_time.update(time.time() - end)
        end = time.time()

        if (i + 1) % args.print_freq == 0:
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Iter {iter_time.val:.3f} ({iter_time.avg:.3f})'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    iter_time=iter_time))
    map_value = mAP.compute()
    mrr_value = mRR.compute()
    pAt10_value = pAt10.compute()

    logger.info('Val result: mAP/mRR/P@10 {:.4f}/{:.4f}/{:.4f}.'.format(
        map_value, mrr_value, pAt10_value))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return map_value, mrr_value, pAt10_value
コード例 #7
0
ファイル: test.py プロジェクト: Jerrypiglet/semseg
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, color_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _, image_paths) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        # print(np.amax(input), np.amin(input), np.median(input))
        # print(np.amax(image), np.amin(image), np.median(image))
        h, w, _ = image.shape
        prediction = np.zeros((h, w, classes), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model, image_scale, classes, crop_h,
                                        crop_w, h, w, mean, std)
        prediction /= len(scales)
        prediction = np.argmax(prediction, axis=2)
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        gray = np.uint8(prediction)
        color = colorize(gray, colors)
        image_path, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        color.save(color_path)
        cv2.imwrite(color_path.replace('.png', '_RGB_scale.png'), image_scale)
        # os.system('cp -r %s %s'%(image_path, color_path.replace('.png', '_RGB.png')))
        image_RGB = args.read_image(image_path)
        cv2.imwrite(color_path.replace('.png', '_RGB.png'), image_RGB)
        print('Result saved to %s; originally from %s' %
              (color_path, image_path))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
コード例 #8
0
def retrieval_validate(image_loader, sketch_loader, model, score_fun):
    if main_process():
        logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    iter_time = AverageMeter()
    data_time = AverageMeter()
    mAP = RetrievalMAP(compute_on_step=False)

    model.eval()
    end = time.time()

    # To process each image only once, we store query info in memory (it's enough with just queries,
    # and they are relatively few compared to catalog, so less memory required)
    processed_queries = []
    for (sketch_input, sketch_target, sketch_index) in sketch_loader:
        sketch_input = sketch_input.cuda(non_blocking=True)
        sketch_target = sketch_target.cuda(non_blocking=True)
        sketch_index = sketch_index.cuda(non_blocking=True)
        sketchFeatVec, _ = model(sketch_input, mode='sketch')
        processed_queries.append((sketchFeatVec, sketch_target, sketch_index))

    for i, (image_input, image_target) in enumerate(image_loader):
        data_time.update(time.time() - end)
        image_input = image_input.cuda(non_blocking=True)
        image_target = image_target.cuda(non_blocking=True)
        imageFeatVec, _ = model(image_input, mode='image')

        for sketchFeatVec, sketch_target, sketch_index in processed_queries:
            scores = score_fun(imageFeatVec, sketchFeatVec)
            indices = torch.broadcast_to(sketch_index.unsqueeze(0),
                                         scores.size())
            target = image_target.unsqueeze(1) == sketch_target.unsqueeze(0)

            mAP(scores, target, indices)

        iter_time.update(time.time() - end)
        end = time.time()

        if (i + 1) % args.test_print_freq == 0 and main_process():
            logger.info(
                'Test batch: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Iter {iter_time.val:.3f} ({iter_time.avg:.3f})'.format(
                    i + 1,
                    len(image_loader),
                    data_time=data_time,
                    iter_time=iter_time))
    map_value = mAP.compute()  # computes across all distributed processes
    if main_process():
        logger.info('Val result: mAP {:.4f}.'.format(map_value))
        logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return map_value
コード例 #9
0
ファイル: train.py プロジェクト: higolel/pfld-lel
def train(trainDataLoader, pfldNet, auxiliaryNet, optimizer, epoch, criterion,
          train_batchsize):
    print("===> Train:")
    losses = AverageMeter()
    for img, landmark_gt, attribute_gt, euler_angle_gt in trainDataLoader:
        img = img.to(device)
        landmark_gt = landmark_gt.to(device)
        attribute_gt = attribute_gt.to(device)
        euler_angle_gt = euler_angle_gt.to(device)
        pfldNet = pfldNet.to(device)
        auxiliaryNet = auxiliaryNet.to(device)
        features, landmarks = pfldNet(img)
        angle = auxiliaryNet(features)

        weighted_loss, loss = criterion(attribute_gt, landmark_gt,
                                        euler_angle_gt, angle, landmarks,
                                        opt.train_batchsize)

        optimizer.zero_grad()
        weighted_loss.backward()
        optimizer.step()
        losses.update(loss.item())

    return weighted_loss, loss
コード例 #10
0
def test(model, criterion, names):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    batch_time = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.eval()
    device = torch.device('cuda')
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    for data, label in test_loader:

        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    print(outstr)
    """
コード例 #11
0
def test(test_loader, data_list, model, classes, base_size, crop_h, crop_w,
         scales, binary_folder):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        prediction = np.zeros((h, w), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model, image_scale, classes, crop_h,
                                        crop_w, h, w)

        prediction /= len(scales)

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(binary_folder)
        image_path, _ = data_list[i]
        image_name = os.path.split(image_path)[-1]
        image_name = image_name.replace('png', 'npy')
        save_path = os.path.join(binary_folder, image_name)
        np.save(save_path, prediction)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
コード例 #12
0
def cal_acc(data_list, pred_folder, classes):
    accuracy_meter = AverageMeter()
    fscore_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = os.path.split(image_path)[-1]
        pred = cv2.imread(os.path.join(pred_folder, image_name),
                          cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        accuracy, precision, recall, f_score = accuracy_metrics(pred, target)
        accuracy_meter.update(accuracy)
        fscore_meter.update(f_score)

        logger.info('Evaluating {0}/{1} on image {2}, fscore {3:.4f}.'.format(
            i + 1, len(data_list), image_name + '.png', fscore_meter.val))

    mAcc = accuracy_meter.avg
    mFscore = fscore_meter.avg
    logger.info('Eval result: mAcc/mFscore {:.4f}/{:.4f}.'.format(mAcc /
                                                                  mFscore))
コード例 #13
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, color_folder, colors):
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        prediction = np.zeros((h, w, classes), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model, image_scale, classes, crop_h,
                                        crop_w, h, w, mean, std)
        prediction /= len(scales)
        prediction = np.argmax(prediction, axis=2)
        batch_time.update(time.time() - end)
        end = time.time()

        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        gray = np.uint8(prediction)
        color = colorize(gray, colors)
        image_path, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        color.save(color_path)
コード例 #14
0
ファイル: train.py プロジェクト: flowcut/PointWeb
def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    device = torch.device('cuda')
    best_test_acc = 0

    model.train()
    train_pred = []
    train_true = []
    count = 0
    train_loss = 0.0
    for data, label in train_loader:
        data, label = data.to(device), label.to(device).squeeze()
        #data = data.permute(0, 2, 1)
        #print('data_shape:', data.shape)
        batch_size = data.size()[0]
        optimizer.zero_grad()
        logits = model(data)
        loss = criterion(logits, label)
        loss.backward()
        optimizer.step()
        preds = logits.max(dim=1)[1]
        count += batch_size
        train_loss += loss.item() * batch_size
        train_true.append(label.cpu().numpy())
        train_pred.append(preds.detach().cpu().numpy())
    train_true = np.concatenate(train_true)
    train_pred = np.concatenate(train_pred)
    outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
                                                                            train_loss*1.0/count,
                                                                            metrics.accuracy_score(
                                                                            train_true, train_pred),
                                                                            metrics.balanced_accuracy_score(
                                                                            train_true, train_pred))
    # io.cprint(outstr)
    print(outstr)
    return train_loss*1.0/count, metrics.accuracy_score(train_true, train_pred), metrics.balanced_accuracy_score(train_true, train_pred)

    """end = time.time()
コード例 #15
0
ファイル: train.py プロジェクト: zots0127/ASGNet
def train(train_loader, model, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    main_loss_meter = AverageMeter()
    aux_loss_meter = AverageMeter()
    loss_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.train()
    end = time.time()
    max_iter = args.epochs * len(train_loader)
    vis_key = 0
    print('Warmup: {}'.format(args.warmup))
    for i, (input, target, s_input, s_mask, s_init_seed, subcls) in enumerate(train_loader):
        data_time.update(time.time() - end)
        current_iter = epoch * len(train_loader) + i + 1
        index_split = -1
        if args.base_lr > 1e-6:
            poly_learning_rate(optimizer, args.base_lr, current_iter, max_iter, power=args.power, index_split=index_split, warmup=args.warmup, warmup_step=len(train_loader)//2)

        s_input = s_input.cuda(non_blocking=True)
        s_mask = s_mask.cuda(non_blocking=True)
        input = input.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)
        s_init_seed = s_init_seed.cuda(non_blocking=True)
        
        output, main_loss, aux_loss = model(s_x=s_input, s_y=s_mask, x=input, y=target, s_seed=s_init_seed)

        if not args.multiprocessing_distributed:
            main_loss, aux_loss = torch.mean(main_loss), torch.mean(aux_loss)
        loss = main_loss + args.aux_weight * aux_loss
        optimizer.zero_grad()

        loss.backward()
        optimizer.step()
        n = input.size(0)
        if args.multiprocessing_distributed:
            main_loss, aux_loss, loss = main_loss.detach() * n, aux_loss * n, loss * n 
            count = target.new_tensor([n], dtype=torch.long)
            dist.all_reduce(main_loss), dist.all_reduce(aux_loss), dist.all_reduce(loss), dist.all_reduce(count)
            n = count.item()
            main_loss, aux_loss, loss = main_loss / n, aux_loss / n, loss / n

        intersection, union, target = intersectionAndUnionGPU(output, target, args.classes, args.ignore_label)
        if args.multiprocessing_distributed:
            dist.all_reduce(intersection), dist.all_reduce(union), dist.all_reduce(target)
        intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy()
        intersection_meter.update(intersection), union_meter.update(union), target_meter.update(target)

        accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        main_loss_meter.update(main_loss.item(), n)
        aux_loss_meter.update(aux_loss.item(), n)
        loss_meter.update(loss.item(), n)
        batch_time.update(time.time() - end)
        end = time.time()

        remain_iter = max_iter - current_iter
        remain_time = remain_iter * batch_time.avg
        t_m, t_s = divmod(remain_time, 60)
        t_h, t_m = divmod(t_m, 60)
        remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m), int(t_s))

        if (i + 1) % args.print_freq == 0 and main_process():
            logger.info('Epoch: [{}/{}][{}/{}] '
                        'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                        'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                        'Remain {remain_time} '
                        'MainLoss {main_loss_meter.val:.4f} '
                        'AuxLoss {aux_loss_meter.val:.4f} '                        
                        'Loss {loss_meter.val:.4f} '
                        'Accuracy {accuracy:.4f}.'.format(epoch+1, args.epochs, i + 1, len(train_loader),
                                                          batch_time=batch_time,
                                                          data_time=data_time,
                                                          remain_time=remain_time,
                                                          main_loss_meter=main_loss_meter,
                                                          aux_loss_meter=aux_loss_meter,
                                                          loss_meter=loss_meter,
                                                          accuracy=accuracy))
        if main_process():
            writer.add_scalar('loss_train_batch', main_loss_meter.val, current_iter)
            writer.add_scalar('aux_loss_train_batch', aux_loss_meter.val, current_iter)
            writer.add_scalar('mIoU_train_batch', np.mean(intersection / (union + 1e-10)), current_iter)
            writer.add_scalar('mAcc_train_batch', np.mean(intersection / (target + 1e-10)), current_iter)
            writer.add_scalar('allAcc_train_batch', accuracy, current_iter)

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    if main_process():
        logger.info('Train result at epoch [{}/{}]: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(epoch, args.epochs, mIoU, mAcc, allAcc))
        for i in range(args.classes):
            logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(i, iou_class[i], accuracy_class[i]))        
    return main_loss_meter.avg, aux_loss_meter.avg, mIoU, mAcc, allAcc
コード例 #16
0
def triplet_train(train_loader, model, criterion, optimizer, epoch,
                  classif_criterion):
    iter_time = AverageMeter()
    data_time = AverageMeter()
    transfer_time = AverageMeter()
    batch_time = AverageMeter()
    metric_time = AverageMeter()
    loss_meter = AverageMeter()
    loss_sketch_meter = AverageMeter()
    loss_positive_meter = AverageMeter()
    loss_negative_meter = AverageMeter()
    loss_triplet_meter = AverageMeter()
    top1_meter_sketch = AverageMeter()
    top1_meter_positive = AverageMeter()
    top1_meter_negative = AverageMeter()

    model.train()
    end = time.time()
    sum_epochs = args.contras_epochs + args.triplet_epochs
    max_iter = sum_epochs * len(train_loader)
    for i, (sketch_input, positive_input, negative_input, sketch_target,
            negative_target) in enumerate(train_loader):
        data_time.update(time.time() - end)
        if args.time_breakdown:
            last = time.time()

        sketch_input = sketch_input.cuda(non_blocking=True)
        positive_input = positive_input.cuda(non_blocking=True)
        negative_input = negative_input.cuda(non_blocking=True)
        sketch_target = sketch_target.cuda(non_blocking=True)
        negative_target = negative_target.cuda(non_blocking=True)
        if args.time_breakdown:
            torch.cuda.synchronize()
            transfer_time.update(time.time() - last)
            last = time.time()

        (sketchFeatVec, positiveFeatVec,
         negativeFeatVec), (sketch_output, positive_output,
                            negative_output) = model(sketch_input,
                                                     positive_input,
                                                     negative_input)
        loss_sketch = smooth_loss(
            sketch_output, sketch_target, args.label_smoothing
        ) if args.label_smoothing else classif_criterion(
            sketch_output, sketch_target)
        loss_positive = smooth_loss(
            positive_output, sketch_target, args.label_smoothing
        ) if args.label_smoothing else classif_criterion(
            positive_output, sketch_target)
        loss_negative = smooth_loss(
            negative_output, negative_target, args.label_smoothing
        ) if args.label_smoothing else classif_criterion(
            negative_output, negative_target)
        sketchFeatVec = F.normalize(sketchFeatVec)
        positiveFeatVec = F.normalize(positiveFeatVec)
        negativeFeatVec = F.normalize(negativeFeatVec)
        loss_triplet = criterion(sketchFeatVec, positiveFeatVec,
                                 negativeFeatVec)

        loss_triplet, loss_sketch, loss_positive, loss_negative = loss_triplet, loss_sketch * 0.5, loss_positive * 0.25, loss_negative * 0.25
        classif_decay = 0.5 * (1 + math.cos(epoch * math.pi / sum_epochs))
        loss = 2.0 * loss_triplet * epoch + classif_decay * (
            loss_sketch + loss_positive + loss_negative)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if args.time_breakdown:
            torch.cuda.synchronize()
            batch_time.update(time.time() - last)
            last = time.time()

        n = sketch_input.size(0)
        if args.multiprocessing_distributed:
            with torch.no_grad():
                loss, loss_triplet, loss_sketch, loss_positive, loss_negative = loss.detach(
                ) * n, loss_triplet.detach() * n, loss_sketch.detach(
                ) * n, loss_positive.detach() * n, loss_negative.detach() * n
                count = sketch_target.new_tensor([n], dtype=torch.long)
                dist.all_reduce(loss), dist.all_reduce(count), dist.all_reduce(
                    loss_triplet), dist.all_reduce(
                        loss_sketch), dist.all_reduce(
                            loss_positive), dist.all_reduce(loss_negative)
                n = count.item()
                loss, loss_triplet, loss_sketch, loss_positive, loss_negative = loss / n, loss_triplet / n, loss_sketch / n, loss_positive / n, loss_negative / n
        loss_meter.update(loss.item(), n)
        loss_triplet_meter.update(loss_triplet.item(), n)
        loss_sketch_meter.update(loss_sketch.item(), n)
        loss_positive_meter.update(loss_positive.item(), n)
        loss_negative_meter.update(loss_negative.item(), n)

        # classification metrics
        top1_s, = cal_accuracy(sketch_output, sketch_target, topk=(1, ))
        top1_p, = cal_accuracy(positive_output, sketch_target, topk=(1, ))
        top1_n, = cal_accuracy(negative_output, negative_target, topk=(1, ))
        n = sketch_input.size(0)
        if args.multiprocessing_distributed:
            with torch.no_grad():
                top1_s = top1_s * n
                top1_p = top1_p * n
                top1_n = top1_n * n
                count = sketch_target.new_tensor([n], dtype=torch.long)
                dist.all_reduce(top1_s), dist.all_reduce(
                    top1_p), dist.all_reduce(top1_n), dist.all_reduce(count)
                n = count.item()
                top1_s = top1_s / n
                top1_p = top1_p / n
                top1_n = top1_n / n
        top1_meter_sketch.update(top1_s.item(), n), top1_meter_positive.update(
            top1_p.item(), n), top1_meter_negative.update(top1_n.item(), n)

        if args.time_breakdown:
            torch.cuda.synchronize()
            metric_time.update(time.time() - last)
        iter_time.update(time.time() - end)
        end = time.time()

        # calculate remain time
        current_iter = epoch * len(train_loader) + i + 1
        remain_iter = max_iter - current_iter
        remain_time = remain_iter * iter_time.avg
        t_m, t_s = divmod(remain_time, 60)
        t_h, t_m = divmod(t_m, 60)
        remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m),
                                                    int(t_s))

        if ((i + 1) % args.print_freq == 0) and main_process():
            logger.info((
                'Epoch: [{}/{}][{}/{}] '
                'Time {iter_time.val:.3f} ({iter_time.avg:.3f}) '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' +
                ('Transfer {transfer_time.val:.3f} ({transfer_time.avg:.3f}) '
                 'Batch {batch_time.val:.4f} ({batch_time.avg:.4f}) '
                 'Metric {metric_time.val:.3f} ({metric_time.avg:.3f}) '
                 if args.time_breakdown else '') + 'Remain {remain_time} '
                'Loss total/triplet/classifSketch/classifPos/classifNeg '
                '{loss_meter.val:.4f}/{triplet_loss_meter.val:.4f}/{sketch_loss_meter.val:.4f}/{positive_loss_meter.val:.4f}/{negative_loss_meter.val:.4f} '
                'Top1classif sketch/positive/negative {top1_s.val:.3f}/{top1_p.val:.3f}/{top1_n.val:.3f}'
            ).format(epoch + 1,
                     sum_epochs,
                     i + 1,
                     len(train_loader),
                     iter_time=iter_time,
                     data_time=data_time,
                     transfer_time=transfer_time,
                     batch_time=batch_time,
                     metric_time=metric_time,
                     remain_time=remain_time,
                     triplet_loss_meter=loss_triplet_meter,
                     sketch_loss_meter=loss_sketch_meter,
                     positive_loss_meter=loss_positive_meter,
                     negative_loss_meter=loss_negative_meter,
                     loss_meter=loss_meter,
                     top1_s=top1_meter_sketch,
                     top1_p=top1_meter_positive,
                     top1_n=top1_meter_negative))

    if main_process():
        logger.info(
            'Train result at epoch [{}/{}]: Loss total/triplet/classifSketch/classifPos/classifNeg {:.4f}/{:.4f}/{:.4f}/{:.4f}/{:.4f}. '
            'Top1classif sketch/positive/negative {top1_s.avg:.3f}/{top1_p.avg:.3f}/{top1_n.avg:.3f}'
            .format(epoch + 1,
                    sum_epochs,
                    loss_meter.avg,
                    loss_triplet_meter.avg,
                    loss_sketch_meter.avg,
                    loss_positive_meter.avg,
                    loss_negative_meter.avg,
                    top1_s=top1_meter_sketch,
                    top1_p=top1_meter_positive,
                    top1_n=top1_meter_negative))
    return loss_meter.avg
コード例 #17
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, color_folder, derain_folder, edge_folder,
         colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (_, input, _, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        derain_prediction = np.zeros((h, w, 3), dtype=float)
        seg_prediction = np.zeros((h, w, classes), dtype=float)
        edge_prediction = np.zeros((h, w), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            temp_derain_prediction, temp_seg_prediction, temp_edge_prediction = scale_process(
                model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
            derain_prediction += temp_derain_prediction
            seg_prediction += temp_seg_prediction
            edge_prediction += temp_edge_prediction
        derain_prediction /= len(scales)
        seg_prediction /= len(scales)
        edge_prediction /= len(scales)
        # seg_prediction = np.argmax(seg_prediction, axis=2)

        # process derain img
        derain_outs = derain_prediction
        derain_outs *= std
        derain_outs += mean
        derain_outs = np.clip(derain_outs, a_max=255, a_min=0)
        derain_outs = derain_outs.astype('uint8')
        derain_outs = cv2.cvtColor(derain_outs.astype('uint8'),
                                   cv2.COLOR_RGB2BGR)

        # process seg pred
        seg_outs = seg_prediction
        seg_outs = np.argmax(seg_outs, axis=2).squeeze()

        # process edge pred
        edge_outs = edge_prediction
        edge_outs = np.clip(edge_outs, a_max=1, a_min=0)
        edge_outs = (edge_outs * 255).astype('uint8')

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        check_makedirs(derain_folder)
        check_makedirs(edge_folder)
        gray = np.uint8(seg_outs)
        color = colorize(gray, colors)
        image_path, _, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        derain_path = os.path.join(derain_folder, image_name + '.png')
        edge_path = os.path.join(edge_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        cv2.imwrite(derain_path, derain_outs)
        cv2.imwrite(edge_path, edge_outs)
        color.save(color_path)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
コード例 #18
0
def train(train_loader, model, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    accuracy_meter = AverageMeter()
    fscore_meter = AverageMeter()

    model.train()
    end = time.time()
    max_iter = args.epochs * len(train_loader)
    for i, (input, target) in enumerate(train_loader):
        data_time.update(time.time() - end)

        # data
        input = input.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)

        # loss
        logits, output, loss = model(input, target)
        if len(args.train_gpu) > 1:
            loss = torch.mean(loss)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        n = input.size(0)
        loss_meter.update(loss.item(), n)

        # metric
        accuracy, precision, recall, f_score = accuracy_metrics(
            output.detach().cpu().numpy(),
            target.detach().cpu().numpy(),
            threshold=args.binary_threshold,
            training=True)
        accuracy_meter.update(accuracy)
        fscore_meter.update(f_score)

        batch_time.update(time.time() - end)
        end = time.time()

        # learning rate
        current_iter = epoch * len(train_loader) + i + 1
        current_lr = poly_learning_rate(args.base_lr,
                                        current_iter,
                                        max_iter,
                                        power=args.power)
        writer.add_scalar('learning_rate', current_lr, current_iter)
        for param_group in optimizer.param_groups:
            param_group['lr'] = current_lr
        remain_iter = max_iter - current_iter
        remain_time = remain_iter * batch_time.avg
        t_m, t_s = divmod(remain_time, 60)
        t_h, t_m = divmod(t_m, 60)
        remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m),
                                                    int(t_s))

        if (i + 1) % args.print_freq == 0:
            logger.info('Epoch: [{}/{}][{}/{}] '
                        'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                        'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                        'Remain {remain_time} '
                        'Loss {loss_meter.val:.4f} '
                        'Accuracy {accuracy_meter.val:.4f}.'
                        'f-score {fscore_meter.val:.4f}.'.format(
                            epoch + 1,
                            args.epochs,
                            i + 1,
                            len(train_loader),
                            batch_time=batch_time,
                            data_time=data_time,
                            remain_time=remain_time,
                            loss_meter=loss_meter,
                            accuracy_meter=accuracy_meter,
                            fscore_meter=fscore_meter))

        writer.add_scalar('loss_train_batch', loss_meter.val, current_iter)
        writer.add_scalar('accuracy_train_batch', accuracy_meter.val,
                          current_iter)
        writer.add_scalar('fscore_train_batch', fscore_meter.val, current_iter)

    mAcc = accuracy_meter.avg
    mFscore = fscore_meter.avg
    logger.info(
        'Train result at epoch [{}/{}]: mAcc/mFscore {:.4f}/{:.4f}.'.format(
            epoch + 1, args.epochs, mAcc, mFscore))
    return loss_meter.avg, mAcc, mFscore
コード例 #19
0
def run_test(test_loader, data_list, model, classes, mean, std, base_size,
             crop_h, crop_w, scales, gray_folder, color_folder, colors,
             is_flip):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    is_test = 'test' in data_list[0][0]
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        prediction = np.zeros((h, w, classes), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model,
                                        image_scale,
                                        classes,
                                        crop_h,
                                        crop_w,
                                        h,
                                        w,
                                        mean,
                                        std,
                                        FLIP=is_flip)
        prediction /= len(scales)
        prediction = np.argmax(prediction, axis=2)
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        gray = np.uint8(prediction)
        color = colorize(gray, colors)
        image_path, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')

        if is_test:
            gray_labelid = trainID2labelID(gray)
            cv2.imwrite(gray_path, gray_labelid)
        else:
            cv2.imwrite(gray_path, gray)
        color.save(color_path)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
コード例 #20
0
ファイル: train.py プロジェクト: yangsuCV/StabNet0.2
def train(epoch):
    global global_step, criterion
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, data in enumerate(train_dataloader):
        # measure data loading time
        if opt.gpu_ids:
            data = map_data(lambda x: Variable(x.cuda()), data)
        else:
            data = map_data(lambda x: Variable(x), data)

        data_time.update(time.time() - end)
        data = Data(*data)
        output = model.forward(data)
        loss = criterion(output, data)
        # measure accuracy and record loss
        losses.update(loss.data[0], opt.batch_size)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)

        if (global_step + 1) % opt.print_freq == 0:
            all_loss = criterion.summary()
            util.diagnose_network(model.cnn)
            util.diagnose_network(model.fc_loc)
            visualize(data,
                      output.warpped,
                      global_step,
                      0,
                      opt,
                      mode='save',
                      name='train')

            print('Epoch: [{0}][{1}/{2}]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Learning Rate {learning_rate}\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\n\t'
                  'ALl Loss {all_loss}'.format(
                      epoch,
                      i,
                      len(train_dataloader),
                      batch_time=batch_time,
                      learning_rate=scheduler.get_lr(),
                      data_time=data_time,
                      loss=losses,
                      all_loss=all_loss))

        if (global_step + 1) % opt.log_freq == 0:
            all_loss = criterion.summary()
            tl.log_value('train/Loss', losses.val, global_step)
            tl.log_value('train/Learning Rate',
                         scheduler.get_lr()[0], global_step)
            # tl.log_value('train/Batch Time', batch_time.val, global_step)
            tl.log_value('train/Data Time', data_time.val, global_step)
            for k, v in all_loss.items():
                tl.log_value('train/loss/' + k, v, global_step)
            for sid in range(data.fm[0].shape[0]):
                visualize(data,
                          output.warpped,
                          global_step,
                          sid,
                          opt,
                          mode='log',
                          name='train')

        if (global_step + 1) % opt.val_freq == 0:
            validate(epoch)
            validate(epoch, False)

        # if global_step == 500:
        #     opt.id_loss_weight = 0
        #     criterion = sys.modules['loss'].Loss(opt)

        global_step += 1
        end = time.time()
コード例 #21
0
def validate(val_loader, model, criterion):
    print('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    batch_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    accuracy_meter = AverageMeter()
    fscore_meter = AverageMeter()

    model.eval()
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        data_time.update(time.time() - end)
        input = input.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)
        output = model(input)
        loss = criterion(output, target)

        n = input.size(0)
        loss = torch.mean(loss)

        # metric
        accuracy, precision, recall, f_score, max_threshold = accuracy_metrics(
            output.detach().cpu().numpy(),
            target.detach().cpu().numpy(),
            threshold=args.binary_threshold,
            training=False)
        accuracy_meter.update(accuracy)
        fscore_meter.update(f_score)

        loss_meter.update(loss.item(), input.size(0))
        batch_time.update(time.time() - end)
        end = time.time()
        if (i + 1) % args.print_freq == 0:
            print('Test: [{}/{}] '
                  'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                  'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                  'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) '
                  'Accuracy {accuracy_meter.val:.4f}.'
                  'Fscore {fscore_meter.val:.4f}.'
                  'max_threshold {max_threshold:.2f}'.format(
                      i + 1,
                      len(val_loader),
                      data_time=data_time,
                      batch_time=batch_time,
                      loss_meter=loss_meter,
                      accuracy_meter=accuracy_meter,
                      fscore_meter=fscore_meter,
                      max_threshold=max_threshold))

    mAcc = accuracy_meter.avg
    mFscore = fscore_meter.avg
    print('Val result: mAcc/mFscore {:.4f}/{:.4f}.'.format(mAcc, mFscore))
    print('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return loss_meter.avg, mAcc, mFscore, max_threshold
コード例 #22
0
def train(train_loader, model, optimizer, epoch):
    batch_time = AverageMeter()
コード例 #23
0
def validate(val_loader, model, criterion):
    if main_process():
        logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    ## step.1 设置评价参数,随时更新
    batch_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.eval()
    end = time.time()
    ## step.2 epoch内部循环
    for i, (input, target) in enumerate(val_loader):
        data_time.update(time.time() - end)
        input = input.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)
        output = model(input)
        if args.zoom_factor != 8:
            output = F.interpolate(output,
                                   size=target.size()[1:],
                                   mode='bilinear',
                                   align_corners=True)
        loss = criterion(output, target)

        n = input.size(0)
        if args.multiprocessing_distributed:
            loss = loss * n  # not considering ignore pixels
            count = target.new_tensor([n], dtype=torch.long)
            dist.all_reduce(loss), dist.all_reduce(count)
            n = count.item()
            loss = loss / n
        else:
            loss = torch.mean(loss)

        ## step.4 更新评价数据
        output = output.max(1)[1]
        intersection, union, target = intersectionAndUnionGPU(
            output, target, args.classes, args.ignore_label)
        if args.multiprocessing_distributed:
            dist.all_reduce(intersection), dist.all_reduce(
                union), dist.all_reduce(target)
        intersection, union, target = intersection.cpu().numpy(), union.cpu(
        ).numpy(), target.cpu().numpy()
        intersection_meter.update(intersection), union_meter.update(
            union), target_meter.update(target)

        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        loss_meter.update(loss.item(), input.size(0))
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % args.print_freq == 0) and main_process():
            logger.info('Test: [{}/{}] '
                        'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                        'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                        'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) '
                        'Accuracy {accuracy:.4f}.'.format(
                            i + 1,
                            len(val_loader),
                            data_time=data_time,
                            batch_time=batch_time,
                            loss_meter=loss_meter,
                            accuracy=accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
    if main_process():
        logger.info(
            'Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
                mIoU, mAcc, allAcc))
        for i in range(args.classes):
            logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(
                i, iou_class[i], accuracy_class[i]))
        logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return loss_meter.avg, mIoU, mAcc, allAcc
コード例 #24
0
def main():
    opt = TestOptions().parse()
    # preprocess data
    all_stable_frames, fps = get_images(opt.video_root + 'stable/' +
                                        str(opt.video_index) + '.avi')
    all_unstable_frames, fps = get_images(opt.video_root + 'unstable/' +
                                          str(opt.video_index) + '.avi')

    # generate data flow
    pred_frames_for_input = []
    singleVideoData = PreprocessDataSet(all_stable_frames, all_unstable_frames,
                                        pred_frames_for_input, opt)
    eval_data_loader = torch.utils.data.DataLoader(singleVideoData)
    model, criterion = create_model(opt)
    checkpoint = torch.load(opt.checkpoint_path,
                            map_location=lambda storage, loc: storage)
    model.load_state_dict(checkpoint['state_dict'])
    data_time = AverageMeter()
    end = time.time()
    # go through model to get output
    idx = 0
    pred_frames = []
    if opt.instnorm:
        model.train()
    else:
        model.eval()
    if opt.fake_test:
        print("fake test")
        pred_frames = []
        for i in range(50):
            for j, data in enumerate(eval_data_loader):
                if opt.gpu_ids:
                    data = map_data(
                        lambda x: Variable(x.cuda(), volatile=True), data)
                else:
                    data = map_data(lambda x: Variable(x, volatile=True), data)
                data_time.update(time.time() - end)
                data = Data(*data)
                output = model.forward(data)
                warpped = output.warpped
                pred_frames += data.prefix
                for u, w, t in zip(data.unstable, warpped, data.target):
                    pred_frames += (u, w, torch.abs(w - t))
                # visualize(data, warpped, i, 0, opt, 'save')

        pred_frames = list(map(lambda x: tensor2im(x.data), pred_frames))

    else:
        for i in range(0, len(all_stable_frames) - 1):
            if i % 100 == 0:
                print("=====> %d/%d" % (i, len(all_stable_frames)))
            for j, data in enumerate(eval_data_loader):
                if opt.gpu_ids:
                    data = map_data(
                        lambda x: Variable(x.cuda(), volatile=True), data)
                else:
                    data = map_data(lambda x: Variable(x, volatile=True), data)
                data_time.update(time.time() - end)
                data = Data(*data)
                # print(data)
                output = model.forward(data)
                warpped = output.warpped
                # save outputs
                # if (i < opt.prefix[0]):
                #     last_frame = all_stable_frames[0]
                # else:
                #     last_frame = pred_frames_for_input[len(pred_frames_for_input) + 1 - opt.prefix[0]]
                # print(data.prefix[-1][0].data.shape)
                last_frame = output_to_input([data.prefix[-1]], opt)
                pred_frames.append(
                    draw_imgs(output_to_input(warpped,
                                              opt), all_stable_frames[i],
                              all_unstable_frames[i], last_frame))
                pred_frames_for_input.append(output_to_input(warpped, opt))
                eval_data_loader = torch.utils.data.DataLoader(
                    PreprocessDataSet(all_stable_frames, all_unstable_frames,
                                      pred_frames_for_input, opt))
                # if i < 100: visualize(data, warpped, i, 0, opt, 'save')

    # print video
    generate_video(pred_frames, fps, opt)
コード例 #25
0
def test(test_loader, data_list, model, classes, mean, std, gray_folder,
         color_folder, derain_folder, edge_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (_, input, _, _) in enumerate(test_loader):
        data_time.update(time.time() - end)

        with torch.no_grad():
            derain_outs, seg_outs, edge_outs = model(input)

        derain_outs = derain_outs.cpu().numpy()
        seg_outs = seg_outs.cpu().numpy()
        edge_outs = edge_outs.cpu().numpy()

        # process derain img
        derain_outs = np.transpose(derain_outs, (0, 2, 3, 1)).squeeze(axis=0)
        derain_outs *= std
        derain_outs += mean
        derain_outs = np.clip(derain_outs, a_max=255, a_min=0)
        derain_outs = derain_outs.astype('uint8')
        derain_outs = cv2.cvtColor(derain_outs.astype('uint8'),
                                   cv2.COLOR_RGB2BGR)

        # process seg pred
        seg_outs = np.transpose(seg_outs, (0, 2, 3, 1))
        seg_outs = np.argmax(seg_outs, axis=3).squeeze(axis=0)

        # process edge pred
        edge_outs = np.transpose(edge_outs,
                                 (0, 2, 3, 1)).squeeze(axis=3).squeeze(axis=0)
        edge_outs = np.clip(edge_outs, a_max=1, a_min=0)
        edge_outs = (edge_outs * 255).astype('uint8')

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        check_makedirs(derain_folder)
        check_makedirs(edge_folder)

        gray = np.uint8(seg_outs)
        color = colorize(gray, colors)
        image_path, _, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        derain_path = os.path.join(derain_folder, image_name + '.png')
        edge_path = os.path.join(edge_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        cv2.imwrite(derain_path, derain_outs)
        cv2.imwrite(edge_path, edge_outs)
        color.save(color_path)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
コード例 #26
0
def cal_acc(data_list, pred_folder, classes, names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    from sotabencheval.semantic_segmentation import ADE20KEvaluator
    evaluator = ADE20KEvaluator(model_name='PSPNet (ResNet-50)',
                                paper_arxiv_id='1612.01105')

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                          cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        cache_exists = evalintersectionAndUnion(pred, target, classes,
                                                evaluator)

        if cache_exists:
            break

        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)

    if cache_exists:
        evaluator.save()
        return

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    print('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        print('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
            i, iou_class[i], accuracy_class[i], ''))
コード例 #27
0
def cal_acc(data_list, pred_folder, derain_folder, classes, names,
            result_txt_path):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    psnr_meter = AverageMeter()
    ssim_meter = AverageMeter()

    for i, (image_path, rain_image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        derain_pred = cv2.imread(
            os.path.join(derain_folder, image_name + '.png'))
        clear_target = cv2.imread(image_path)
        psnr, ssim = caculate_psnr_ssim(derain_pred, clear_target)

        seg_pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                              cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            seg_pred, target, classes)

        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        psnr_meter.update(psnr)
        ssim_meter.update(ssim)

        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, psnr {3:.4f}, ssim {4:.4f}, accuracy {5:.4f}.'
            .format(i + 1, len(data_list), image_name + '.png', psnr, ssim,
                    accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
    PSNR = psnr_meter.avg
    SSIM = ssim_meter.avg

    logger.info('Eval result: PSNR/SSIM {:.4f}/{:.4f}.'.format(PSNR, SSIM))
    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))

    with open(result_txt_path, 'w') as result_file:
        result_file.writelines(
            'Eval result: PSNR/SSIM {:.4f}/{:.4f}.\n'.format(PSNR, SSIM))
        result_file.writelines(
            'Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.\n'.format(
                mIoU, mAcc, allAcc))
        for i in range(classes):
            result_file.writelines(
                'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.\n'.
                format(i, iou_class[i], accuracy_class[i], names[i]))
コード例 #28
0
ファイル: train.py プロジェクト: zots0127/ASGNet
def validate(val_loader, model, criterion):
    if main_process():
        logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    batch_time = AverageMeter()
    model_time = AverageMeter()
    data_time = AverageMeter()
    loss_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    if args.use_coco:
        split_gap = 20
    else:
        split_gap = 5
    class_intersection_meter = [0]*split_gap
    class_union_meter = [0]*split_gap  

    if args.manual_seed is not None and args.fix_random_seed_val:
        torch.cuda.manual_seed(args.manual_seed)
        np.random.seed(args.manual_seed)
        torch.manual_seed(args.manual_seed)
        torch.cuda.manual_seed_all(args.manual_seed)
        random.seed(args.manual_seed)

    model.eval()
    end = time.time()
    if args.split != 999:
        if args.use_coco:
            test_num = 20000
        else:
            test_num = 2000
    else:
        test_num = len(val_loader)
    assert test_num % args.batch_size_val == 0    
    iter_num = 0
    total_time = 0

    for e in range(10):
        for i, (input, target, s_input, s_mask, s_init_seed, subcls, ori_label) in enumerate(val_loader):
            if (iter_num-1) * args.batch_size_val >= test_num:
                break
            iter_num += 1    
            data_time.update(time.time() - end)
            input = input.cuda(non_blocking=True)
            target = target.cuda(non_blocking=True)
            ori_label = ori_label.cuda(non_blocking=True)
            start_time = time.time()
            output = model(s_x=s_input, s_y=s_mask, x=input, y=target, s_seed=s_init_seed)
            total_time = total_time + 1
            model_time.update(time.time() - start_time)

            if args.ori_resize:
                longerside = max(ori_label.size(1), ori_label.size(2))
                backmask = torch.ones(ori_label.size(0), longerside, longerside).cuda()*255
                backmask[0, :ori_label.size(1), :ori_label.size(2)] = ori_label
                target = backmask.clone().long()

            output = F.interpolate(output, size=target.size()[1:], mode='bilinear', align_corners=True)         
            loss = criterion(output, target)    

            n = input.size(0)
            loss = torch.mean(loss)

            output = output.max(1)[1]

            intersection, union, new_target = intersectionAndUnionGPU(output, target, args.classes, args.ignore_label)
            intersection, union, target, new_target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy(), new_target.cpu().numpy()
            intersection_meter.update(intersection), union_meter.update(union), target_meter.update(new_target)
                
            subcls = subcls[0].cpu().numpy()[0]
            class_intersection_meter[(subcls-1)%split_gap] += intersection[1]
            class_union_meter[(subcls-1)%split_gap] += union[1] 

            accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
            loss_meter.update(loss.item(), input.size(0))
            batch_time.update(time.time() - end)
            end = time.time()
            if ((i + 1) % (test_num/100) == 0) and main_process():
                logger.info('Test: [{}/{}] '
                            'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                            'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                            'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) '
                            'Accuracy {accuracy:.4f}.'.format(iter_num* args.batch_size_val, test_num,
                                                              data_time=data_time,
                                                              batch_time=batch_time,
                                                              loss_meter=loss_meter,
                                                              accuracy=accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    
    class_iou_class = []
    class_miou = 0
    for i in range(len(class_intersection_meter)):
        class_iou = class_intersection_meter[i]/(class_union_meter[i]+ 1e-10)
        class_iou_class.append(class_iou)
        class_miou += class_iou
    class_miou = class_miou*1.0 / len(class_intersection_meter)
    logger.info('meanIoU---Val result: mIoU {:.4f}.'.format(class_miou))
    for i in range(split_gap):
        logger.info('Class_{} Result: iou {:.4f}.'.format(i+1, class_iou_class[i]))            
    

    if main_process():
        logger.info('FBIoU---Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
        for i in range(args.classes):
            logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(i, iou_class[i], accuracy_class[i]))
        logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')

    print('avg inference time: {:.4f}, count: {}'.format(model_time.avg, test_num))
    return loss_meter.avg, mIoU, mAcc, allAcc, class_miou
コード例 #29
0
def cal_acc(data_list, pred_folder, classes, names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                          cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(
                i + 1, len(data_list), image_name + '.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
コード例 #30
0
def train(train_loader, model, optimizer, epoch):
    ## step.1 设置评价参数,随时更新
    batch_time = AverageMeter()
    data_time = AverageMeter()
    main_loss_meter = AverageMeter()
    aux_loss_meter = AverageMeter()
    loss_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.train()
    end = time.time()
    max_iter = args.epochs * len(train_loader)

    ## step.2 epoch内部循环
    for i, (input, target) in enumerate(train_loader):
        data_time.update(time.time() - end)
        if args.zoom_factor != 8:
            h = int((target.size()[1] - 1) / 8 * args.zoom_factor + 1)
            w = int((target.size()[2] - 1) / 8 * args.zoom_factor + 1)
            # 'nearest' mode doesn't support align_corners mode and 'bilinear' mode is fine for downsampling
            target = F.interpolate(target.unsqueeze(1).float(),
                                   size=(h, w),
                                   mode='bilinear',
                                   align_corners=True).squeeze(1).long()
        input = input.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)
        output, main_loss, aux_loss = model(input, target)  # 输出, 损失函数
        if not args.multiprocessing_distributed:
            main_loss, aux_loss = torch.mean(main_loss), torch.mean(aux_loss)
        loss = main_loss + args.aux_weight * aux_loss

        ## step.3 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        n = input.size(0)  # 一张卡的batch
        if args.multiprocessing_distributed:
            main_loss, aux_loss, loss = main_loss.detach(
            ) * n, aux_loss * n, loss * n  # not considering ignore pixels
            count = target.new_tensor([n], dtype=torch.long)
            dist.all_reduce(main_loss), dist.all_reduce(
                aux_loss), dist.all_reduce(loss), dist.all_reduce(count)
            n = count.item()
            main_loss, aux_loss, loss = main_loss / n, aux_loss / n, loss / n

        ## step.4 更新评价数据
        intersection, union, target = intersectionAndUnionGPU(
            output, target, args.classes, args.ignore_label)
        if args.multiprocessing_distributed:
            dist.all_reduce(intersection), dist.all_reduce(
                union), dist.all_reduce(target)
        intersection, union, target = intersection.cpu().numpy(), union.cpu(
        ).numpy(), target.cpu().numpy()
        intersection_meter.update(intersection), union_meter.update(
            union), target_meter.update(target)

        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        main_loss_meter.update(main_loss.item(), n)
        aux_loss_meter.update(aux_loss.item(), n)
        loss_meter.update(loss.item(), n)
        batch_time.update(time.time() - end)
        end = time.time()

        ## step.5 调整学习率
        current_iter = epoch * len(train_loader) + i + 1
        current_lr = poly_learning_rate(args.base_lr,
                                        current_iter,
                                        max_iter,
                                        power=args.power)
        for index in range(0, args.index_split):
            optimizer.param_groups[index]['lr'] = current_lr  # 原backbone学习率调整
        for index in range(args.index_split, len(optimizer.param_groups)):
            optimizer.param_groups[index]['lr'] = current_lr * 10  # 后面预测网络学习调整
        remain_iter = max_iter - current_iter
        remain_time = remain_iter * batch_time.avg
        t_m, t_s = divmod(remain_time, 60)
        t_h, t_m = divmod(t_m, 60)
        remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m),
                                                    int(t_s))  # 计算剩余时间

        ## step.6 打印日志
        if (i + 1) % args.print_freq == 0 and main_process():
            logger.info('Epoch: [{}/{}][{}/{}] '
                        'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                        'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                        'Remain {remain_time} '
                        'MainLoss {main_loss_meter.val:.4f} '
                        'AuxLoss {aux_loss_meter.val:.4f} '
                        'Loss {loss_meter.val:.4f} '
                        'Accuracy {accuracy:.4f}.'.format(
                            epoch + 1,
                            args.epochs,
                            i + 1,
                            len(train_loader),
                            batch_time=batch_time,
                            data_time=data_time,
                            remain_time=remain_time,
                            main_loss_meter=main_loss_meter,
                            aux_loss_meter=aux_loss_meter,
                            loss_meter=loss_meter,
                            accuracy=accuracy))
        if main_process():
            writer.add_scalar('loss_train_batch', main_loss_meter.val,
                              current_iter)
            writer.add_scalar('mIoU_train_batch',
                              np.mean(intersection / (union + 1e-10)),
                              current_iter)
            writer.add_scalar('mAcc_train_batch',
                              np.mean(intersection / (target + 1e-10)),
                              current_iter)
            writer.add_scalar('allAcc_train_batch', accuracy, current_iter)

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
    if main_process():
        logger.info(
            'Train result at epoch [{}/{}]: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'
            .format(epoch + 1, args.epochs, mIoU, mAcc, allAcc))
    return main_loss_meter.avg, mIoU, mAcc, allAcc