def train_model(train_loader, model, criterion, optimizer, epoch, print_freq):
    train_loader = load_dataset(find_csv(args.data, 'train'))
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()

    for i, (target, cont_input, cat_input,
            identification) in enumerate(train_loader):
        # measure data loading timepil_resize
        data_time.update(time.time() - end)

        target = target.cuda(non_blocking=True)
        target = target.squeeze()

        # compute output
        output = model(cont_input, cat_input)
        # loss = criterion(output, target)
        loss = criterion_smoothing(output, target)

        # measure accuracy and record loss
        prec1 = accuracy(output, target, topk=(1, ))
        losses.update(loss.item(), target.size(0))
        top1.update(prec1[0].cpu().data.numpy()[0], target.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        # nothing line
        gradClamp(model.parameters(), grad_clip)
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0:
            message = ('Epoch: [{0}][{1}/{2}]\t' +
                       'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' +
                       'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' +
                       'Loss {loss.val:.4f} ({loss.avg:.4f})\t' +
                       'Prec@1 {top1.val:.3f} ({top1.avg:.3f})').format(
                           epoch,
                           i,
                           len(train_loader),
                           batch_time=batch_time,
                           data_time=data_time,
                           loss=losses,
                           top1=top1)
            log(message)
Exemplo n.º 2
0
def train_model(train_loader, model, criterion, optimizer, epoch, print_freq):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    psnrs = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target, target_seg, filename) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        target = target.cuda(non_blocking=True)
        target_seg = target_seg.cuda(non_blocking=True)

        # compute output
        output_seg = model(input)

        # compute loss
        if args.unet:
            loss = criterionCros(output_seg, target_seg)
        else:
            loss = criterionMSE(output_seg, target_seg)

        # compute psnr
        psnr = compute_psnr(loss)
        psnrs.update(psnr)

        # measure accuracy and record loss
        losses.update(loss.item(), input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Psnr {psnr:.4f} ({psnr:.4f})'.format(epoch,
                                                        i,
                                                        len(train_loader),
                                                        batch_time=batch_time,
                                                        data_time=data_time,
                                                        loss=losses,
                                                        psnr=psnr))
Exemplo n.º 3
0
def train_model(train_loader, model, criterion, optimizer, epoch, print_freq):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target, input_path) in enumerate(train_loader):
        # measure data loading timepil_resize
        data_time.update(time.time() - end)
        target = target.cuda(non_blocking=True)

        #check agumentation
        if args.save_tensor_image:
            save_tensor_image(input, input_path, args.agumetation_check)

        # compute output
        output = model(input)
        loss = criterion(output, target)

        # measure accuracy and record loss
        prec1 = accuracy(output, target, topk=(1, ))
        losses.update(loss.item(), input.size(0))
        top1.update(prec1[0].cpu().data.numpy()[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0:
            message = ('Epoch: [{0}][{1}/{2}]\t' +
                       'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' +
                       'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' +
                       'Loss {loss.val:.4f} ({loss.avg:.4f})\t' +
                       'Prec@1 {top1.val:.3f} ({top1.avg:.3f})').format(
                           epoch,
                           i,
                           len(train_loader),
                           batch_time=batch_time,
                           data_time=data_time,
                           loss=losses,
                           top1=top1)
            log(message)
Exemplo n.º 4
0
def train_model(train_loader, model, criterion, optimizer, epoch, print_freq):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target, input2, target2) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        target = target.cuda(non_blocking=True)
        target2 = target2.cuda(non_blocking=True)  # for multi task

        # compute output
        output, output2 = model(input)  # for multi task
        loss = criterion(output, target)
        loss2 = criterion(output2, target2)  # for multi task

        loss = loss + loss2  # for multi task

        # measure accuracy and record loss
        prec1 = accuracy(output, target, topk=(1,))
        losses.update(loss.item(), input.size(0))
        top1.update(prec1[0].cpu().data.numpy()[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(epoch,
                                                                  i,
                                                                  len(train_loader),
                                                                  batch_time=batch_time,
                                                                  data_time=data_time,
                                                                  loss=losses,
                                                                  top1=top1))
Exemplo n.º 5
0
def validate_model(val_loader, model, criterion, epoch, print_freq):
    batch_time = AverageMeter()
    top1 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        target_index_output = []
        target_index_target = []
        for i, (input, target) in enumerate(val_loader):
            target = target.cuda(non_blocking=True)

            # compute output
            output, output_seg = model(input)

            if args.acc_classify:
                prec1 = accuracy(output, target, topk=(1, ))
                prec1 = prec1[0].cpu().data.numpy()[0]

                # for auroc
                output_cpu = output.squeeze().cpu().data.numpy()
                output_cpu = np.array([
                    softmax(out)[args.target_index] for out in output_cpu
                ])  # convert to probability
            else:
                output_max = F.max_pool2d(
                    output_seg,
                    (args.avg_pooling_height, args.avg_pooling_width))
                output_max_cpu = output_max.cpu().data.numpy()
                target_cpu = target.cpu().data.numpy()

                output_max_cpu = [
                    1.0 if o > 0.5 else 0.0 for o in output_max_cpu
                ]
                prec1 = np.average(
                    np.equal(output_max_cpu, target_cpu).astype(
                        np.float)) * 100

                # for auroc
                output_cpu = output_max.squeeze().cpu().data.numpy()

            # --------------------------------------
            # for auroc get value from target index
            target_index_output.extend(output_cpu.astype(np.float))
            target_index_target.extend(
                np.equal(target.cpu().data.numpy(),
                         args.target_index).astype(np.int))
            # --------------------------------------

            # measure accuracy and record loss
            top1.update(prec1, input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                          i, len(val_loader), batch_time=batch_time,
                          top1=top1))

    auc, roc = compute_auroc(target_index_output, target_index_target)
    save_auroc(auc, roc, os.path.join(args.result, str(epoch) + '.png'))

    print(' * Prec@1 {top1.avg:.3f} at Epoch {epoch:0}'.format(top1=top1,
                                                               epoch=epoch))
    print(' * auc@1 {auc:.3f}'.format(auc=auc))

    return top1.avg
Exemplo n.º 6
0
def train_model(train_loader, model, criterion, optimizer, epoch, print_freq):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target, target_seg) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        target = target.cuda(non_blocking=True)
        target_seg = target_seg.cuda(non_blocking=True)

        # compute output
        output, output_seg = model(input)

        # compute loss
        loss1 = criterion(output, target)
        loss2 = criterionMSE(output_seg, target_seg)

        # compose loss
        if args.do_classify and args.do_seg:
            loss = loss1 + loss2
        elif args.do_classify:
            loss = loss1
        elif args.do_seg:
            loss = loss2

        if args.acc_classify:
            prec1 = accuracy(output, target, topk=(1, ))
            prec1 = prec1[0].cpu().data.numpy()[0]
        else:
            output_max = F.max_pool2d(
                output_seg, (args.avg_pooling_height, args.avg_pooling_width))
            output_max = output_max.cpu().data.numpy()
            target = target.cpu().data.numpy()

            output_max = [1.0 if o > 0.5 else 0.0 for o in output_max]
            prec1 = np.average(np.equal(output_max, target).astype(
                np.float)) * 100

        # measure accuracy and record loss
        losses.update(loss.item(), input.size(0))
        top1.update(prec1, input.size(0))

        # compute gradient and do SGD step
        if args.train_per_loss and args.do_classify and args.do_seg:
            if args.do_classify:
                optimizer.zero_grad()
                loss2.backward(retain_graph=True)
                optimizer.step()

                optimizer.zero_grad()
                loss1.backward()
                optimizer.step()
            elif args.do_seg:
                optimizer.zero_grad()
                loss1.backward(retain_graph=True)
                optimizer.step()

                optimizer.zero_grad()
                loss2.backward()
                optimizer.step()
        else:
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=top1))
Exemplo n.º 7
0
def validate_model(val_loader, model, criterion, epoch, print_freq):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    cnt_cnt_label = [0] * args.num_classes
    cnt_exact_pred = [0] * args.num_classes

    # switch to evaluate mode
    model.eval()

    if args.evaluate:
        evaluate_csv_file = os.path.join(args.result, 'evaluate.csv')
        feval = open(evaluate_csv_file, 'wt')

    with torch.no_grad():
        end = time.time()
        target_index_output, target_index_target = list(), list()
        for i, (input, target, input_path) in enumerate(val_loader):
            target = target.cuda(non_blocking=True)

            # compute output
            output = model(input)
            loss = criterion(output, target)

            # --------------------------------------
            # for auroc get value from target index
            output_cpu = output.squeeze().cpu().data.numpy()
            output_cpu = np.array([softmax(out)[args.target_index] for out in output_cpu])
            target_index_output.extend(output_cpu.astype(np.float))
            target_index_target.extend(np.equal(target.cpu().data.numpy(), args.target_index).astype(np.int))
            # --------------------------------------

            if args.evaluate:
                output_softmax = np.array([softmax(out) for out in output.cpu().numpy()])
                for file_path, pred_values in zip(input_path, output_softmax):
                    _, file = os.path.split(file_path)
                    name, _ = os.path.splitext(file)
                    line = ','.join([name] + [str(v) for v in pred_values])
                    feval.write(line + '\n')

            # measure accuracy and record loss
            prec1 = accuracy(output, target, topk=(1,))
            losses.update(loss.item(), input.size(0))
            top1.update(prec1[0].cpu().data.numpy()[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # put together for acc per label
            pred_list = pred(output).cpu().numpy().squeeze()
            target_list = target.cpu().numpy().squeeze()
            for (p, t) in zip(pred_list, target_list):
                cnt_cnt_label[t] += 1
                if p == t:
                    cnt_exact_pred[t] += 1

                pred_list = pred(output).cpu().numpy().squeeze()
                for pred_idx, pred_item in enumerate(pred_list):
                    dst = os.path.join(args.classification_result, 'kidney' if pred_item == 1 else 'no_kidney')

                    if not os.path.exists(dst):
                        os.makedirs(dst)

                    seg_img = input_path[pred_idx]
                    shutil.copy(seg_img, dst)

            if i % print_freq == 0:
                log(('Test: [{0}/{1}]\t' +
                     'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' +
                     'Loss {loss.val:.4f} ({loss.avg:.4f})\t' +
                     'Prec@1 {top1.val:.3f} ({top1.avg:.3f})').format(i,
                                                                      len(val_loader),
                                                                      batch_time=batch_time,
                                                                      loss=losses,
                                                                      top1=top1))

        auc, roc = compute_auroc(target_index_output, target_index_target)
        # save_auroc(auc, roc, os.path.join(args.result, str(epoch) + '.png'))

        log(' * Prec@1 {top1.avg:.3f} at Epoch {epoch:0}'.format(top1=top1, epoch=epoch))
        log(' * auc@1 {auc:.3f}'.format(auc=auc))

        for (i, (n_label, n_exact)) in enumerate(zip(cnt_cnt_label, cnt_exact_pred)):
            acc_label = (n_exact / n_label * 100) if n_label > 0 else 0
            log('acc of label {:0d}: {:0.3f}%'.format(i, acc_label))

    return auc
Exemplo n.º 8
0
def validate_model(val_loader, model, criterion, epoch, print_freq):
    if args.evaluate:
        print('eval check')
        val_loader = load_dataset(find_csv(args.data, ''))
    else:
        val_loader = load_dataset(find_csv(args.data, 'val'))

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    cnt_cnt_label = [0] * args.num_classes
    cnt_exact_pred = [0] * args.num_classes

    # switch to evaluate mode
    model.eval()

    if args.evaluate:
        evaluate_csv_file = os.path.join(args.result, 'evaluate.csv')
        feval = open(evaluate_csv_file, 'wt')

    with torch.no_grad():
        end = time.time()
        target_index_output, target_index_target = list(), list()

        for i, (target, cont_input, cat_input,
                identification) in enumerate(val_loader):

            target = target.cuda(non_blocking=True)
            target = target.squeeze()

            # compute output
            output = model(cont_input, cat_input)

            # loss = criterion(output, target)
            loss = criterion_smoothing(output, target)

            # --------------------------------------
            # for auroc get value from target index
            output_cpu = output.cpu().data.numpy()

            output_cpu = np.array(
                [softmax(out)[args.target_index] for out in output_cpu])
            target_index_output.extend(output_cpu.astype(np.float))
            target_index_target.extend(
                np.equal(target.cpu().data.numpy(),
                         args.target_index).astype(np.int))
            # --------------------------------------

            if args.evaluate:
                output_softmax = np.array(
                    [softmax(out) for out in output.cpu().numpy()])
                for ident, pred_values in zip(identification, output_softmax):
                    ident = str(np.squeeze(ident.data.numpy()))
                    idx_biger = np.argmax(pred_values)
                    diag = 'CKD' if idx_biger == 1 else 'AKI or NOR'

                    line = ','.join([ident, diag])
                    feval.write(line + '\n')

            # measure accuracy and record loss
            prec1 = accuracy(output, target, topk=(1, ))

            losses.update(loss.item(), target.size(0))
            top1.update(prec1[0].cpu().data.numpy()[0], target.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # put together for acc per label
            pred_list = pred(output).cpu().numpy()[0]
            target_list = target.cpu().numpy()

            for (p, t) in zip(pred_list, target_list):
                cnt_cnt_label[t] += 1
                if p == t:
                    cnt_exact_pred[t] += 1

            if i % print_freq == 0 and False:
                log(('Test: [{0}/{1}]\t' +
                     'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' +
                     'Loss {loss.val:.4f} ({loss.avg:.4f})\t' +
                     'Prec@1 {top1.val:.3f} ({top1.avg:.3f})').format(
                         i,
                         len(val_loader),
                         batch_time=batch_time,
                         loss=losses,
                         top1=top1))

        auc, roc = compute_auroc(target_index_output, target_index_target)

        global best_acc
        global best_auc
        # if auc > best_auc:
        if top1.avg > best_acc:
            log(' * Prec@1 {top1.avg:.3f} at Epoch {epoch:0}'.format(
                top1=top1, epoch=epoch))
            log(' * auc@1 {auc:.3f}'.format(auc=auc))
            best_acc = top1.avg
            best_auc = auc

            acc_label_list = []
            for (i, (n_label,
                     n_exact)) in enumerate(zip(cnt_cnt_label,
                                                cnt_exact_pred)):
                acc_label = (n_exact / n_label * 100) if n_label > 0 else 0
                acc_label_list.append(acc_label)
                log('acc of label {:0d}: {:0.3f}%'.format(i, acc_label))

            save_values(epoch, losses.avg, top1.avg, auc, acc_label_list)
            print("=" * 50)
    return auc
Exemplo n.º 9
0
def validate_model(val_loader, model, criterion, epoch, print_freq):
    batch_time = AverageMeter()
    losses = AverageMeter()
    psnrs = AverageMeter()

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (input, target, target_seg, filename) in enumerate(val_loader):
            target = target.cuda(non_blocking=True)
            target_seg = target_seg.cuda(non_blocking=True)

            # compute output
            output_seg = model(input)

            # compute loss
            # add crossentropy
            if args.unet:
                loss = criterionCros(output_seg, target_seg)
            else:
                loss = criterionMSE(output_seg, target_seg)

            # measure accuracy and record loss
            psnrs.update(compute_psnr(loss))
            losses.update(loss.item(), input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # save segmentation result
            if args.seg_result != '':
                name = [os.path.split(f)[1] for f in filename]
                save_tensor_image(output_seg, name, args.seg_result)

            if i % print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Psnr {psnr.val:.4f} ({psnr.avg:.4f})'.format(
                          i,
                          len(val_loader),
                          batch_time=batch_time,
                          loss=losses,
                          psnr=psnrs))

    print(' * Loss@1 {loss.avg:.4f} Psnr@1 {psnr.avg:.4f} at Epoch {epoch:0}'.
          format(loss=losses, psnr=psnrs, epoch=epoch))

    # save log graph
    # It's not a good location, but it's practical.

    # save_log_graph(log=os.path.join(args.result, 'log.txt'))

    # do post process seg_result

    if args.post_processing:
        for filename in os.listdir(args.seg_result):
            filename = os.path.join(args.seg_result, filename)
            with open(filename, 'rb') as f:
                with Image.open(f) as img:
                    # do post processing
                    img = np.array(img)
                    img = post_processing(img)
                    #add resize
                    img = cv2.resize(img, dsize=(512, 512))
                    img = Image.fromarray(img)
                    # save post processed result
                    img.save(filename)

    return psnrs.avg