def train(args, train_loader, model, criterion, criterion1, optimizer, epoch):

    # switch to train mode
    model.train()

    iouEvalTrain = iouEval(args.classes)
    iouDiagEvalTrain = iouEval(args.diagClasses)

    epoch_loss = []
    class_loss = []

    total_batches = len(train_loader)
    for i, (input, target, target2) in enumerate(train_loader):
        start_time = time.time()

        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()
            target2 = target2.cuda()

        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)
        target2_var = torch.autograd.Variable(target2)

        #run the mdoel
        output, output1 = model(input_var)

        #set the grad to zero
        optimizer.zero_grad()
        loss = criterion(output, target_var)
        loss1 = criterion1(output1, target2_var)

        optimizer.zero_grad()
        loss1.backward(
            retain_graph=True
        )  # you need to keep the graph from classification branch so that it can be used
        # during the update from the segmentation branch
        loss.backward()
        optimizer.step()

        epoch_loss.append(loss.data[0])
        class_loss.append(loss1.data[0])
        time_taken = time.time() - start_time

        #compute the confusion matrix
        iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)
        iouDiagEvalTrain.addBatch(output1.max(1)[1].data, target2_var.data)

        print('[%d/%d] loss: %.3f time:%.2f' %
              (i, total_batches, loss.data[0], time_taken))

    average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)
    average_epoch_class_loss = sum(class_loss) / len(class_loss)

    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalTrain.getMetric()
    overall_acc1, per_class_acc1, per_class_iu1, mIOU1 = iouDiagEvalTrain.getMetric(
    )

    return average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU, average_epoch_class_loss, overall_acc1, per_class_acc1, per_class_iu1, mIOU1
Exemple #2
0
def train(args, train_loader, model, criteria, criterion1, optimizer, epoch):
    model.train()
    iouEvalTrain = iouEval(args.classes)
    iouDiagEvalTrain = iouEval(args.attrClasses)
    epoch_loss = []
    class_loss = []
    total_batches = len(train_loader)
    # print(total_batches)
    for i, (input, target, target2) in enumerate(train_loader):
        # print("input: ", input.size()[:], target.size()[:], input.size(0))
        start_time = time.time()
        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()
            target2 = target2.cuda()
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)
        target2_var = torch.autograd.Variable(target2)
        output, output1 = model(input_var)
        # print('==================')
        # print(output.shape)
        # print(target_var.shape)
        # print('==================')
        optimizer.zero_grad()
        loss = criteria(output, target_var)
        loss1 = criterion1(output1, target2_var)
        optimizer.zero_grad()
        loss1.backward(
            retain_graph=True
        )  # you need to keep the graph from classification branch so that it can be used
        # during the update from the segmentation branch
        loss.backward()
        optimizer.step()

        epoch_loss.append(loss.item())  #loss.data[0]
        class_loss.append(loss1.item())  #loss.data[0]
        time_taken = time.time() - start_time

        iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)
        iouDiagEvalTrain.addBatch(output1.max(1)[1].data, target2_var.data)

        # print('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.item(), time_taken))

    average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)
    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalTrain.getMetric()
    average_epoch_class_loss = sum(class_loss) / len(class_loss)
    overall_acc1, per_class_acc1, per_class_iu1, mIOU1 = iouDiagEvalTrain.getMetric(
    )

    return average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU, average_epoch_class_loss, overall_acc1, per_class_acc1, per_class_iu1, mIOU1
def val(args, val_loader, model, criterion, criterion1):
    #switch to evaluation mode
    model.eval()

    iouEvalVal = iouEval(args.classes)
    iouDiagEvalVal = iouEval(args.diagClasses)

    epoch_loss = []
    class_loss = []

    total_batches = len(val_loader)
    for i, (input, target, target2) in enumerate(val_loader):
        start_time = time.time()

        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()
            target2 = target2.cuda()

        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)
        target2_var = torch.autograd.Variable(target2)

        # run the mdoel
        output, output1 = model(input_var)

        # compute the loss
        loss = criterion(output, target_var)
        loss1 = criterion1(output1, target2_var)

        epoch_loss.append(loss.data[0])
        class_loss.append(loss1.data[0])

        time_taken = time.time() - start_time

        # compute the confusion matrix
        iouEvalVal.addBatch(output.max(1)[1].data, target_var.data)
        iouDiagEvalVal.addBatch(output1.max(1)[1].data, target2_var.data)

        print('[%d/%d] loss: %.3f time: %.2f' %
              (i, total_batches, loss.data[0], time_taken))

    average_epoch_loss_val = sum(epoch_loss) / len(epoch_loss)
    average_epoch_class_loss = sum(class_loss) / len(class_loss)

    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalVal.getMetric()
    overall_acc1, per_class_acc1, per_class_iu1, mIOU1 = iouDiagEvalVal.getMetric(
    )

    return average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU, average_epoch_class_loss, overall_acc1, per_class_acc1, per_class_iu1, mIOU1
Exemple #4
0
def train(args,
          train_loader,
          model,
          criterion,
          optimizer,
          epoch,
          max_batches,
          cur_iter=0):
    # switch to train mode
    model.train()
    iou_eval_train = iouEval(args.classes)
    epoch_loss = []

    total_batches = len(train_loader)
    for iter, (input, target) in enumerate(train_loader):
        start_time = time.time()

        # adjust the learning rate
        lr = adjust_learning_rate(args, optimizer, epoch, iter + cur_iter,
                                  max_batches)

        if args.gpu == True:
            input = input.cuda()
            target = target.cuda()
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # run the mdoel
        output = model(input_var)

        if not args.gpu or torch.cuda.device_count() <= 1:
            pred1, pred2, pred3, pred4 = tuple(output)
            loss = criterion(pred1, pred2, pred3, pred4, target_var)  #
        else:
            loss = criterion(output, target_var)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        epoch_loss.append(loss.data.item())
        time_taken = time.time() - start_time

        # compute the confusion matrix
        if args.gpu and torch.cuda.device_count() > 1:
            output = gather(output, 0, dim=0)[0]
        else:
            output = output[0]
        iou_eval_train.add_batch(
            output.max(1)[1].data.cpu().numpy(),
            target_var.data.cpu().numpy())

        print('[%d/%d] lr: %.7f loss: %.3f time:%.3f' %
              (iter, total_batches, lr, loss.data.item(), time_taken))

    average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)
    overall_acc, per_class_acc, per_class_iu, mIOU = iou_eval_train.get_metric(
    )

    return average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU, lr
Exemple #5
0
def val(args, val_loader, model, criteria):
    model.eval()
    iouEvalVal = iouEval(args.classes)
    epoch_loss = []
    total_batches = len(val_loader)
    for i, (input, target) in enumerate(val_loader):
        start_time = time.time()
        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()
        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)
        output = model(input_var)

        loss = criteria(output, target_var)

        epoch_loss.append(loss.item())
        time_taken = time.time() - start_time

        iouEvalVal.addBatch(output.max(1)[1].data, target_var.data)
        # print('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.item(), time_taken))

    average_epoch_loss_val = sum(epoch_loss) / len(epoch_loss)
    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalVal.getMetric()

    return average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU
Exemple #6
0
def validation(epoch, model, criterion, optimizer, val_loader):
    iouEvalVal = iouEval(nclass)
    model.eval()
    step = 0
    epoch_loss = 0
    epoch_mIOU = 0
    epoch_acc = 0.
    dt_size = len(val_loader.dataset)
    with torch.no_grad():
        for x, y in val_loader:
            step += 1
            inputs = x.to(device)
            labels = y.to(device)
            # zero the parameter gradients
            # forward
            #outputs = model(inputs)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            epoch_loss += loss.item()
            print("%d/%d,val_loss:%0.5f " %
                  (step, len(val_loader), loss.item()))
            #mIOU
            output = torch.softmax(outputs, dim=1)
            iouEvalVal.addBatch(output.max(1)[1].data, labels.data)
        overall_acc, per_class_acc, per_class_iou, mIOU = iouEvalVal.getMetric(
        )
    print("epoch %d val_loss:%0.5f " % (epoch + 1, epoch_loss / step))
    print("overall_acc :", overall_acc)
    print("per_class_acc :", per_class_acc)
    print("per_class_iou :", per_class_iou)
    print("mIOU :", mIOU)
    return epoch_loss / step, overall_acc, per_class_acc, per_class_iou, mIOU
Exemple #7
0
def train(args, train_loader, model, criterion, optimizer, epoch):
    '''
    :param args: general arguments
    :param train_loader: loaded for training dataset
    :param model: model
    :param criterion: loss function
    :param optimizer: optimization algo, such as ADAM or SGD
    :param epoch: epoch number
    :return: average epoch loss, overall pixel-wise accuracy, per class accuracy, per class iu, and mIOU
    '''
    # switch to train mode
    model.train()

    iouEvalTrain = iouEval(args.classes)

    epoch_loss = []

    total_batches = len(train_loader)
    print("training")
    print("gpu id:{}".format(args.gpu_id))
    for i, (input, target) in enumerate(train_loader):
        start_time = time.time()

        if type(args.gpu_id) is int:
            device = "cuda:{}".format(args.gpu_id)
            input = input.to(device)
            target = target.to(device)
        else:
            print("CPU")

        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        #run the mdoel
        output = model(input_var)

        #set the grad to zero
        optimizer.zero_grad()
        loss = criterion(output, target_var)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        #epoch_loss.append(loss.data[0])
        epoch_loss.append(loss.data)
        time_taken = time.time() - start_time

        #compute the confusion matrix
        iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)

        print('[%d/%d] loss: %.3f time:%.2f' %
              (i, total_batches, loss.data, time_taken))

    average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)

    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalTrain.getMetric()

    return average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU
Exemple #8
0
def train(args, train_loader, model, Dice, optimizer, epoch):
    # switch to train mode
    model.train()

    iouEvalTrain = iouEval(args.classes)

    epoch_loss = []
    # total_loss =0
    total_batches = len(train_loader)
    for i, (input, target) in enumerate(train_loader):
        start_time = time.time()

        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()

        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # run the mdoel

        # import matplotlib.pyplot as plt
        # t = target_var.cpu().numpy()
        # plt.imshow(t[0])
        # plt.show()

        target_var = target_var / 255

        output = model(input_var)
        # output = F.sigmoid(output)

        # set the grad to zero
        optimizer.zero_grad()

        # loss = critaria(output, target_var)

        loss = Dice(output, target_var)

        #optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # total_loss += loss
        epoch_loss.append(loss.data[0])
        # time_taken = time.time() - start_time

        # compute the confusion matrix
        iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)

        # print('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.data[0], time_taken))
    #
    average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)

    overall_acc, per_class_acc, iou, mIOU = iouEvalTrain.getMetric()

    return average_epoch_loss_train, overall_acc, per_class_acc, iou, mIOU
    def __init__(self,
                 staining_type,
                 annotation_dir,
                 target_list,
                 detect_list_file,
                 iou_threshold,
                 output_file,
                 output_dir,
                 wsi_dir,
                 gt_png_dir,
                 seg_gt_json_dir,
                 window_size,
                 seg_pred_json_dir,
                 nclasses,
                 no_save=False,
                 start=0,
                 end=0):
        super(Generate_Segmentation_Gt, self).__init__(annotation_dir,
                                                       staining_type)
        self.MARGIN = 20  # 20 micrometre
        self.iou_threshold = iou_threshold
        self.detect_list_file = detect_list_file
        self.output_file = output_file
        self.output_dir = output_dir
        self.image_ext = ['.PNG', '.png']
        self.detected_glomus_list = {}
        self.detected_patient_id = []
        self.image = None
        self.overlap_d = {
        }  # overlap_list  #key: date, value: [{"gt":gt, "pred":found_rect, "iou": iou, "json": json_file_name_l[0]}]
        self.seg_gt_json_dir = seg_gt_json_dir
        self.seg_pred_json_dir = seg_pred_json_dir
        self.wsi_dir = wsi_dir
        self.window_size = window_size
        self.annotation_file_date_pattern = '^\d{8}_(.+)'
        self.re_annotation_file_date_pattern = re.compile(
            self.annotation_file_date_pattern)
        self.glomus_category = ['glomerulus', 'glomerulus-kana']
        '''Flag indicating not saving visualization result.'''
        self.no_save = no_save

        self.target_list = target_list
        self.start = start
        self.end = end
        od = OrderedDict()
        od['glomerulus'] = 1
        od['crescent'] = 2
        od['collapsing'] = 3
        od['sclerosis'] = 3
        od['mesangium'] = 4
        od['poler_mesangium'] = 4
        self.target_dic = {'all': od}
        self.nclasses = nclasses
        self.iouEvalVal = iouEval(self.nclasses)
Exemple #10
0
def train(args, train_loader, model, criterion, optimizer, epoch):
    '''
    :param args: general arguments
    :param train_loader: loaded for training dataset
    :param model: model
    :param criterion: loss function
    :param optimizer: optimization algo, such as ADAM or SGD
    :param epoch: epoch number
    :return: average epoch loss, overall pixel-wise accuracy, per class accuracy, per class iu, and mIOU
    '''
    # switch to train mode
    model.train()

    iouEvalTrain = iouEval(args.classes)

    epoch_loss = []

    total_batches = len(train_loader)
    for i, (input, target) in enumerate(train_loader):
        start_time = time.time()

        if args.onGPU:
            input = input.cuda(non_blocking=True) #torch.autograd.Variable(input, volatile=True)
            target = target.cuda(non_blocking=True)

        #run the mdoel
        output1, output2 = model(input)

        #set the grad to zero
        optimizer.zero_grad()
        loss1 = criterion(output1, target)
        loss2 = criterion(output2, target)
        loss = loss1 + loss2

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        epoch_loss.append(loss.item())
        time_taken = time.time() - start_time

        #compute the confusion matrix
        iouEvalTrain.addBatch(output1.max(1)[1].data, target.data)

        print('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.item(), time_taken))

    average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)

    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalTrain.getMetric()

    return average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU
Exemple #11
0
def val(args, val_loader, model, criterion):
    '''
    :param args: general arguments
    :param val_loader: loaded for validation dataset
    :param model: model
    :param criterion: loss function
    :return: average epoch loss, overall pixel-wise accuracy, per class accuracy, per class iu, and mIOU
    '''
    #switch to evaluation mode
    model.eval()

    iouEvalVal = iouEval(args.classes)

    epoch_loss = []

    total_batches = len(val_loader)
    for i, (input, target) in enumerate(val_loader):
        start_time = time.time()

        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()

        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        # run the mdoel
        output = model(input_var)

        # compute the loss
        loss = criterion(output, target_var)

        epoch_loss.append(loss.data[0])

        time_taken = time.time() - start_time

        # compute the confusion matrix
        iouEvalVal.addBatch(output.max(1)[1].data, target_var.data)

        print('validation [%d/%d] loss: %.3f time: %.2f' %
              (i, total_batches, loss.data[0], time_taken))
        if i > 4:
            print("{} fits in memory!".format(val_loader))
            # break

    average_epoch_loss_val = sum(epoch_loss) / len(epoch_loss)

    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalVal.getMetric()

    return average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU
Exemple #12
0
def validate(args, model, image_list, label_list, crossVal, mean, std):
    iou_eval_val = iouEval(args.classes)
    for idx in range(len(image_list)):
        image = cv2.imread(image_list[idx]) / 255
        image = image[:, :, ::-1]
        label = cv2.imread(label_list[idx], 0) / 255

        img = image.astype(np.float32)
        img = ((img - mean) / std).astype(np.float32)
        img = cv2.resize(img, (args.width, args.height))
        img = img.transpose((2, 0, 1))
        img_variable = Variable(torch.from_numpy(img).unsqueeze(0))
        if args.gpu:
            img_variable = img_variable.cuda()

        start_time = time.time()
        img_out = model(img_variable)[0]

        torch.cuda.synchronize()
        diff_time = time.time() - start_time
        print('Segmentation for {}/{} takes {:.3f}s per image'.format(
            idx, len(image_list), diff_time))

        class_numpy = img_out[0].max(0)[1].data.cpu().numpy()
        label = cv2.resize(label, (512, 512), interpolation=cv2.INTER_NEAREST)
        iou_eval_val.add_batch(class_numpy, label)

        out_numpy = (class_numpy * 255).astype(np.uint8)
        name = image_list[idx].split('/')[-1]
        if not osp.isdir(osp.join(args.savedir, args.data_name)):
            os.mkdir(osp.join(args.savedir, args.data_name))
        if not osp.isdir(
                osp.join(args.savedir, args.data_name, args.model_name)):
            os.mkdir(osp.join(args.savedir, args.data_name, args.model_name))
        if not osp.isdir(
                osp.join(args.savedir, args.data_name, args.model_name,
                         'crossVal' + str(crossVal))):
            os.mkdir(
                osp.join(args.savedir, args.data_name, args.model_name,
                         'crossVal' + str(crossVal)))
        cv2.imwrite(
            osp.join(args.savedir, args.data_name, args.model_name,
                     'crossVal' + str(crossVal), name[:-4] + '.png'),
            out_numpy)

    overall_acc, per_class_acc, per_class_iu, mIOU = iou_eval_val.get_metric()
    print('Overall Acc (Val): %.4f\t mIOU (Val): %.4f' % (overall_acc, mIOU))
    return mIOU
Exemple #13
0
def train_model(model, criterion, optimizer, train_loader, scheduler, epoch,
                num_epochs):
    iouEvalTrain = iouEval(nclass)
    #scheduler.step()
    dt_size = len(train_loader.dataset)
    epoch_loss = 0
    step = 0
    #num_correct = 0
    for x, y in train_loader:
        num_correct = 0
        step += 1
        inputs = x.to(device)
        labels = y.to(device)
        # zero the parameter gradients
        optimizer.zero_grad()
        # forward
        #outputs = model(inputs)
        outputs = model(inputs)

        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        epoch_loss += loss.item()

        output = torch.softmax(outputs, dim=1)
        iouEvalTrain.addBatch(output.max(1)[1].data, labels.data)

        print("%d/%d,train_loss:%0.5f " % (step, len(train_loader), loss))
        ####################################################
        print(model.arch_parameters())  #for branch search
        ####################################################
    overall_acc, per_class_acc, per_class_iou, mIOU = iouEvalTrain.getMetric()
    print("overall_acc :", overall_acc)
    print("per_class_acc :", per_class_acc)
    print("per_class_iou :", per_class_iou)
    print("mIOU :", mIOU)
    dirName = "./models/road_BaseDownSample60ep_256_512/"
    if not os.path.exists(dirName):
        os.mkdir(dirName)
        print("Directory ", dirName, " Created ")
    if epoch % 5 == 4:
        torch.save(
            model.state_dict(),
            dirName + 'road_BaseDownSample60ep_256_512_weights_epoch_%d.pth' %
            (epoch + 1))
    return epoch_loss / step, overall_acc, per_class_acc, per_class_iou, mIOU
Exemple #14
0
def val(args, val_loader, model, Dice):
    # switch to evaluation mode
    model.eval()

    iouEvalVal = iouEval(args.classes)

    epoch_loss = []

    total_batches = len(val_loader)
    for i, (input, target) in enumerate(val_loader):
        start_time = time.time()

        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()

        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        # run the mdoel
        output = model(input_var)

        # compute the loss
        target_var = target_var / 255

        # criteria = torch.nn.CrossEntropyLoss()
        # Dice = dice.DiceLoss().cuda()

        # loss = criteria(output, target_var)

        loss = Dice(output, target_var)

        epoch_loss.append(loss.data[0])

        time_taken = time.time() - start_time

        # compute the confusion matrix
        iouEvalVal.addBatch(output.max(1)[1].data, target_var.data)

        # print('epoch:%d  [%d/%d] loss: %.3f  time: %.2f' % (i, i, total_batches, loss.data[0], time_taken))

    average_epoch_loss_val = sum(epoch_loss) / len(epoch_loss)

    overall_acc, per_class_acc, iou, mIOU = iouEvalVal.getMetric()

    return average_epoch_loss_val, overall_acc, per_class_acc, iou, mIOU
Exemple #15
0
def val(args, val_loader, model, criterion):
    # switch to evaluation mode
    model.eval()
    iou_eval_val = iouEval(args.classes)
    epoch_loss = []

    total_batches = len(val_loader)
    for iter, (input, target) in enumerate(val_loader):
        start_time = time.time()

        if args.gpu:
            input = input.cuda()
            target = target.cuda()
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # run the mdoel
        output = model(input_var)

        torch.cuda.synchronize()
        time_taken = time.time() - start_time

        # compute the loss
        if not args.gpu or torch.cuda.device_count() <= 1:
            pred1, pred2, pred3, pred4 = tuple(output)
            loss = criterion(pred1, pred2, pred3, pred4, target_var)  #
        else:
            loss = criterion(output, target_var)
        epoch_loss.append(loss.data.item())

        # compute the confusion matrix
        if args.gpu and torch.cuda.device_count() > 1:
            output = gather(output, 0, dim=0)[0]
        else:
            output = output[0]
        iou_eval_val.add_batch(
            output.max(1)[1].data.cpu().numpy(),
            target_var.data.cpu().numpy())

        print('[%d/%d] loss: %.3f time: %.3f' %
              (iter, total_batches, loss.data.item(), time_taken))

    average_epoch_loss_val = sum(epoch_loss) / len(epoch_loss)
    overall_acc, per_class_acc, per_class_iu, mIOU = iou_eval_val.get_metric()

    return average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU
def train(args, train_loader, model, criterion, optimizer, epoch):
    # switch to train mode
    model.train()

    iouEvalTrain = iouEval(args.classes)

    epoch_loss = []

    total_batches = len(train_loader)
    for i, (input, target) in enumerate(train_loader):
        start_time = time.time()

        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()

        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # run the mdoel
        output = model(input_var)

        # set the grad to zero
        optimizer.zero_grad()
        loss = criterion(output, target_var)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        epoch_loss.append(loss.data[0])
        time_taken = time.time() - start_time

        # compute the confusion matrix
        iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)

        print('[%d/%d] loss: %.3f time:%.2f' %
              (i, total_batches, loss.data[0], time_taken))

    average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)

    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalTrain.getMetric()

    return average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU
Exemple #17
0
def train(args, train_loader, model, criterion, optimizer, epoch):
    # switch to train mode
    model.train()

    iouEvalTrain = iouEval(args.classes)

    epoch_loss = []

    total_batches = len(train_loader)
    for i, (inp, inputA, inputB, inputC, target) in enumerate(train_loader):
        #continue
        start_time = time.time()
        input = torch.cat([inp, inputA, inputB, inputC], 1)  # dim-0 is batch

        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()

        # If you are using PyTorch > 0.3, then you don't need variable.
        # Instead you can use torch.enable_grad(). See Pytorch documentation for more details
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        output = model(input_var)  #, output_down, dec_out
        # set the grad to zero
        optimizer.zero_grad()
        loss = criterion(output, target_var)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # If you are using PyTorch > 0.3, then you loss.item() instead of loss.data[0]
        epoch_loss.append(loss.data[0])
        time_taken = time.time() - start_time

        # compute the confusion matrix
        iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)
        print('[%d/%d] loss: %.3f time:%.2f' %
              (i, total_batches, loss.data[0], time_taken))

    average_epoch_loss_train = np.mean(epoch_loss)
    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalTrain.getMetric()
    return average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU
Exemple #18
0
def val(args, val_loader, model, criterion):
    # switch to evaluation mode
    model.eval()

    iouEvalVal = iouEval(args.classes)

    epoch_loss = []

    total_batches = len(val_loader)
    for i, (inp, inputA, inputB, inputC, target) in enumerate(val_loader):

        start_time = time.time()
        input = torch.cat([inp, inputA, inputB, inputC], 1)  # dim-0 is batch

        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()

        # If you are using PyTorch > 0.3, then you don't need variable.
        # Instead you can use torch.no_grad(). See Pytorch documentation for more details
        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        output = model(input_var)
        loss = criterion(output, target_var)

        # If you are using PyTorch > 0.3, then you loss.item() instead of loss.data[0]
        epoch_loss.append(loss.data[0])

        time_taken = time.time() - start_time

        # compute the confusion matrix
        iouEvalVal.addBatch(output.max(1)[1].data, target_var.data)

        print('[%d/%d] loss: %.3f time: %.2f' %
              (i, total_batches, loss.data[0], time_taken))

    average_epoch_loss_val = np.mean(epoch_loss)

    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalVal.getMetric()

    return average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU
Exemple #19
0
def train(args, train_loader, model, criteria, optimizer, epoch):
    model.train()
    iouEvalTrain = iouEval(args.classes)
    epoch_loss = []
    total_batches = len(train_loader)
    # print(total_batches)

    for i, (input, target) in enumerate(train_loader):
        # print("input: ", input.size()[:], target.size()[:], input.size(0))
        start_time = time.time()
        if args.onGPU == True:
            input = input.cuda()
            target = target.cuda()
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)
        output = model(input_var)
        # print('==================')
        # print(output.shape)
        # print(target_var.shape)
        # print('==================')
        optimizer.zero_grad()
        loss = criteria(output, target_var)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        epoch_loss.append(loss.item())  #loss.data[0]
        time_taken = time.time() - start_time

        iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)
        # print('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.item(), time_taken))

    average_epoch_loss_train = sum(epoch_loss) / len(epoch_loss)
    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalTrain.getMetric()

    return average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU
 def generate_wsi_pred_gt_and_eval(self, file_key, times):
     """
         Abstract: generate WSIs of prediction and ground truth, and evaluate the performance
         Args:
             file_key: [str] directionary name ex) H16-09557
             times: [int] 
     """
     seg_gt_json_l = glob.glob(
         os.path.join(self.seg_gt_json_dir, file_key, "*.json"))
     seg_pred_json_l = glob.glob(
         os.path.join(self.seg_pred_json_dir, file_key, "*.json"))
     ndpi_l = glob.glob(os.path.join(self.wsi_dir, file_key, "*ndpi"))
     assert len(ndpi_l) == 1
     ndpi_path_s = ndpi_l[0]
     margin_x, margin_y, slide_width, slide_height = self.read_slide_and_cal_margin(
         ndpi_path_s)
     iou_eval_val = iouEval(self.nclasses)
     # make numpy for prediction and ground truth of WSI
     whole_gt_np = np.zeros(
         (int(slide_height / MAGNIFICATION), int(
             slide_width / MAGNIFICATION), 3),
         dtype=int)
     whole_pred_np = np.zeros(
         (int(slide_height / MAGNIFICATION), int(
             slide_width / MAGNIFICATION), 3),
         dtype=int)
     # make ground truth and prediction of window, and confusion_matrix
     for x_ind in range(slide_width // self.window_size + 1):
         xmin = x_ind * self.window_size
         if x_ind == slide_width // self.window_size:
             xmax = slide_width
         else:
             xmax = (x_ind + 1) * self.window_size
         if xmax > slide_width:
             continue
         for y_ind in range(slide_height // self.window_size + 1):
             ymin = y_ind * self.window_size
             if y_ind == slide_height // self.window_size:
                 ymax = slide_height
             else:
                 ymax = (y_ind + 1) * self.window_size
             if ymax > slide_width:
                 continue
             # make ground truth
             gt_np = self.overlay(self.gt_list, times, margin_x, margin_y,
                                  seg_gt_json_l, xmin, ymin, xmax, ymax,
                                  "gt")
             # make prediction result
             pred_np = self.overlay(self.detected_glomus_list[file_key], 1,
                                    0, 0, seg_pred_json_l, xmin, ymin, xmax,
                                    ymax, "pred")
             # make confusion matrix of ground truth and prediction result
             iou_eval_val.addBatch(pred_np, gt_np)
             self.iouEvalVal.addBatch(pred_np, gt_np)
             # make ground truth and prediction result of WSI
             whole_gt_np = self.generate_whole_img([xmin, ymin, xmax, ymax],
                                                   whole_gt_np, gt_np)
             whole_pred_np = self.generate_whole_img(
                 [xmin, ymin, xmax, ymax], whole_pred_np, pred_np)
     # save images
     output_gt_file_name = os.path.join(self.output_dir,
                                        file_key + "_gt.jpg")
     output_pred_file_name = os.path.join(self.output_dir,
                                          file_key + "_pred.jpg")
     cv2.imwrite(output_gt_file_name, whole_gt_np)
     cv2.imwrite(output_pred_file_name, whole_pred_np)
     # calcurate performance
     overall_acc, per_class_acc, per_class_iou, mIOU = iou_eval_val.getMetricRight(
     )
     return overall_acc, per_class_acc, per_class_iou, mIOU
def evaluateModel(args, model, up, rgb_image_list, label_image_list, device):
    # gloabl mean and std values (BGR)
    mean = list(map(float, args.mean))
    std = list(map(float, args.std))
    width = args.inWidth
    height = args.inHeight
    print("num of image:{}".format(len(rgb_image_list)))
    iouEvalVal = iouEval(args.classes)
    save_summary_acc = os.path.join(args.savedir, "summary_accuracy.csv")
    save_summary_data = os.path.join(args.savedir, "summary_dataset.csv")
    save_summary_pixel = os.path.join(args.savedir, "summary_pixel.csv")
    dataset_d = defaultdict(lambda :defaultdict(int))
    with open(save_summary_acc, "w") as summary_acc, open(save_summary_data, "w") as summary_data, open(save_summary_pixel, "w") as summary_pixel:
        summary_acc.write("filename,glomerulus, crescent, sclerosis, mesangium, background iou,glomerulus iou,crescent iou,sclerosis iou, mesangium iou,mIoU\n")
        summary_data.write("patient_id, glomerulus, crescent, sclerosis, mesangium\n")
        summary_pixel.write("patient_id, filename, background, glomerulus, crescent, sclerosis, mesangium\n")
        for i, (imgName, labelName) in enumerate(zip(rgb_image_list, label_image_list)):
            print("imgName: {}".format(imgName))
            patient_id = os.path.basename(os.path.dirname(imgName))
            img = cv2.imread(imgName)
            # if args.overlay:
            img_orig = np.copy(img)

            img = img.astype(np.float32)
            for j in range(3):
                img[:, :, j] -= mean[j]
            for j in range(3):
                img[:, :, j] /= std[j]

            # resize the image to 1024x512x3
            img = cv2.resize(img, (width, height))

            img /= 255
            img = img.transpose((2, 0, 1)) # convert to RGB
            img_tensor = torch.from_numpy(img)
            img_tensor = torch.unsqueeze(img_tensor, 0)  # add a batch dimension
            img_variable = Variable(img_tensor, volatile=True)
            if args.gpu_id >= 0:
                img_variable = img_variable.to(device)
            img_out = model(img_variable)

            if args.modelType == 2:
                img_out = up(img_out)

            classMap_numpy = img_out[0].max(0)[1].byte().cpu().data.numpy()
            classMap_numpy = cv2.resize(classMap_numpy,(img_orig.shape[1], img_orig.shape[0]),interpolation=cv2.INTER_NEAREST)
            if i % 100 == 0:
                print(i)

            name = imgName.split('/')[-1]
            name_rsplit = name.rsplit(".", 1)
            output_dir = os.path.join(args.savedir, patient_id)
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            if args.colored:
                classMap_numpy_color = np.zeros((img_orig.shape[0], img_orig.shape[1], img_orig.shape[2]), dtype=np.uint8)
                for idx in range(len(pallete)):
                    [r, g, b] = pallete[idx]
                    classMap_numpy_color[classMap_numpy == idx] = [b, g, r]
                name_c = name_rsplit[0] + "_c" + ".png"
                if args.overlay:
                    overlayed = cv2.addWeighted(img_orig, 0.4, classMap_numpy_color, 0.6, 0)
                    name_over = name_rsplit[0] + "_overlay" + ".jpg"
                    cv2.imwrite(args.savedir + os.sep + patient_id + os.sep + name_over, overlayed)
                    name_org = name_rsplit[0] + "_org" + ".png"
                    cv2.imwrite(args.savedir + os.sep + patient_id + os.sep + name_org, img_orig)
            background_px = np.count_nonzero(classMap_numpy==0)
            glomeruli_px = np.count_nonzero(classMap_numpy==1)
            crescent_px = np.count_nonzero(classMap_numpy==2)
            sclerosis_px = np.count_nonzero(classMap_numpy==3)
            mesangium_px = np.count_nonzero(classMap_numpy==4)
            summary_pixel.write("{},{},{},{},{},{},{}\n".format(patient_id, name.replace(args.img_extn, 'png'), background_px, glomeruli_px, crescent_px, sclerosis_px, mesangium_px))

            if args.cityFormat:
                classMap_numpy = relabel(classMap_numpy.astype(np.uint8))
            # save json file
            boundary_lines = bound2line(classMap_numpy, max_classes=4)
            output_d = {}
            output_d["shapes"] = []
            for idx, label in label_idx.items():
                if idx in boundary_lines and len(boundary_lines[idx]) > 0:
                    for i in range(len(boundary_lines[idx])):
                        b_obj = {
                            "line_color": None,
                            "points": boundary_lines[idx][i].tolist(),
                            "fill_color": None,
                            "label": label,
                        }
                        output_d["shapes"].append(b_obj)
            output_d["lineColor"] = [0, 0, 0, 255]
            output_d["imagePath"] = name
            output_d["flags"] = {}
            output_d["fillColor"] = [0, 0, 0, 255]
            # output_d["imageData"] = utils.img_arr_to_b64(classMap_numpy).decode('utf-8')
            output_d["imageData"] = utils.img_arr_to_b64(img_orig).decode('utf-8')
            output_json_file = os.path.join(output_dir, name.replace(args.img_extn, 'json'))
            with open(output_json_file,'w') as out_json:
                json.dump(output_d, out_json, indent=4)
            # save org img
            output_png_file = os.path.join(output_dir, name.replace(args.img_extn, 'png'))
            # evaluate and generate combined images including original, prediction, ground-truth
            # compute the confusion matrix
            print("labelName: {}".format(labelName))
            if labelName is not None:
                assert os.path.basename(imgName) == os.path.basename(labelName)
                img_label = PILImage.open(labelName)
                img_label = np.asarray(img_label)
                # check original image and label image size
                assert img_label.shape[0] == img_orig.shape[0]
                assert img_label.shape[1] == img_orig.shape[1]
                img_label_re = cv2.resize(img_label, (width, height), interpolation=cv2.INTER_NEAREST)
                unique_values = np.unique(img_label_re)
                img_label_tensor = torch.from_numpy(img_label_re)
                img_label_tensor = torch.unsqueeze(img_label_tensor, 0)  # add a batch dimension
                for i in unique_values.tolist():
                    dataset_d[patient_id][i] += 1
                eachiouEvalVal = iouEval(args.classes)
                _ = iouEvalVal.addBatch(img_out.max(1)[1].data, img_label_tensor)
                hist = eachiouEvalVal.addBatch(img_out.max(1)[1].data, img_label_tensor)
                overall_acc, per_class_acc, per_class_iou, _ = eachiouEvalVal.getMetricRight()
                # write summary
                hist_tp_fn_fp = hist.sum(1) + hist.sum(0) - np.diag(hist)
                per_class_iou_ex = np.diag(hist)[hist_tp_fn_fp > 0.]/hist_tp_fn_fp[hist_tp_fn_fp > 0.]
                per_class_iou_ex = np.diag(hist)[unique_values]/hist_tp_fn_fp[unique_values]
                mIoU_each = np.nanmean(per_class_iou_ex)
                glomeruli = 1 if np.count_nonzero(unique_values==1) else 0
                crescent = 1 if np.count_nonzero(unique_values==2) else 0
                sclerosis = 1 if np.count_nonzero(unique_values==3) else 0
                mesangium = 1 if np.count_nonzero(unique_values==4) else 0
                summary_acc.write("{}/{},{},{},{},{},{},{},{},{},{},{}\n".format(patient_id, name.replace(args.img_extn, 'png'),glomeruli, crescent, sclerosis, mesangium, per_class_iou[0],per_class_iou[1],per_class_iou[2], per_class_iou[3], per_class_iou[4], mIoU_each))
                # generate combined image including original, prediction, ground-truth
                org_height = img_orig.shape[0]
                org_width = img_orig.shape[1]
                classMap_gt_np = np.zeros((img_orig.shape[0], img_orig.shape[1], img_orig.shape[2]), dtype=np.uint8)
                for idx in range(len(pallete)):
                    [r, g, b] = pallete[idx]
                    classMap_gt_np[img_label == idx] = [b, g, r]
                overlayed_gt = cv2.addWeighted(img_orig, 0.4, classMap_gt_np, 0.6, 0)
                combined_np = np.zeros((org_height, org_width*3, 3), dtype=int)
                combined_np[0:org_height, 0:org_width,:] = img_orig
                combined_np[0:org_height, org_width:2*org_width,:] = overlayed_gt
                combined_np[0:org_height, 2*org_width:3*org_width,:] = overlayed
                output_3_dir = os.path.join(args.savedir, "combined_images", patient_id)
                if not os.path.exists(output_3_dir):
                    os.makedirs(output_3_dir)
                output_3_png_file = os.path.join(output_3_dir, name.replace(args.img_extn, 'png'))
                cv2.imwrite(output_3_png_file, combined_np)
        if label_image_list[0] is not None:
            for patient, values_d in dataset_d.items():
                summary_data.write(patient)
                for i in range(1, args.classes):
                    summary_data.write(",{}".format(values_d[i]))
                summary_data.write("\n")
            overall_acc, per_class_acc, per_class_iou, mIOU = iouEvalVal.getMetricRight()
            overall_accuracy_output = os.path.join(args.savedir, "overall_accuracy.txt")
            with open(overall_accuracy_output, "w") as overall_accuracy_output_file:
                overall_accuracy_output_file.write("overall_acc:{}, per_class_acc:{}, per_class_iou:{}, mIOU:{}".format(overall_acc, per_class_acc, per_class_iou, mIOU))
Exemple #22
0
def val(args, val_loader, model, criterion, epoch):
    '''
    :param args: general arguments
    :param val_loader: loaded for validation dataset
    :param model: model
    :param criterion: loss function
    :return: average epoch loss, overall pixel-wise accuracy, per class accuracy, per class iu, and mIOU
    '''
    #switch to evaluation mode
    model.eval()

    iouEvalVal = iouEval(args.classes)

    epoch_loss = []
    draw = 1

    total_batches = len(val_loader)
    for i, (input, target) in enumerate(val_loader):
        start_time = time.time()

        if type(args.gpu_id) is int:
            device = "cuda:{}".format(args.gpu_id)
            input = input.to(device)
            target = target.to(device)

        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        # run the mdoel
        output = model(input_var)

        # compute the loss
        loss = criterion(output, target_var)

        epoch_loss.append(loss.data)

        time_taken = time.time() - start_time

        # compute the confusion matrix
        iouEvalVal.addBatch(output.max(1)[1].data, target_var.data)

        print('[%d/%d] loss: %.3f time: %.2f' %
              (i, total_batches, loss.data, time_taken))
        if draw == 0:
            for ind in range(output.size()[0]):
                classMap_numpy = output[ind].max(
                    0)[1].byte().cpu().data.numpy()
                input_y = 704
                input_x = 664
                classMap_numpy = cv2.resize(classMap_numpy, (input_y, input_x),
                                            interpolation=cv2.INTER_NEAREST)
                print("classMap_numpy shape:{}".format(classMap_numpy.shape))
                outdir = os.path.join(args.savedir, str(epoch))
                if not os.path.exists(outdir):
                    os.makedirs(outdir)
                classMap_numpy_color = np.zeros((input_x, input_y, 3),
                                                dtype=np.uint8)
                print("the shape of classMap_numpy_color :{}".format(
                    classMap_numpy_color.shape))
                for idx in range(len(pallete)):
                    [r, g, b] = pallete[idx]
                    classMap_numpy_color[classMap_numpy == idx] = [b, g, r]
                cv2.imwrite(
                    os.path.join(outdir,
                                 str(i * output.size()[0] + ind) + '.png'),
                    classMap_numpy_color)

    average_epoch_loss_val = sum(epoch_loss) / len(epoch_loss)

    overall_acc, per_class_acc, per_class_iu, mIOU = iouEvalVal.getMetric()

    return average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU