def main():
    global args, logger
    args = parser.parse_args()
    set_prefix(args.prefix, __file__)
    model = model_builder()
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    # accelerate the speed of training
    cudnn.benchmark = True

    train_loader, val_loader = load_dataset()
    # class_names=['LESION', 'NORMAL']
    class_names = train_loader.dataset.class_names
    print(class_names)

    # learning rate decay per epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
    since = time.time()
    print('-' * 10)
    for epoch in range(args.epochs):
        # adjust weight once unet can be nearly seen as an identical mapping
        exp_lr_scheduler.step()
        train(train_loader, model, optimizer, epoch)

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    with torch.no_grad():
        validate(model, train_loader, val_loader)
    # save_typical_result(model)
    torch.save(model.state_dict(), add_prefix(args.prefix, 'locator.pkl'))
    write(vars(args), add_prefix(args.prefix, 'paras.txt'))
def main(prefix, epoch, data_dir):
    saved_path = '../%s/dice_loss%s/' % (prefix, epoch)
    criterion = DiceLoss(prefix, epoch, data_dir)
    resutls = dict()
    # note the range of threshold: if the value is too small,the dice loss will be high but wrong because entire images will tend to 1.
    for thresh in range(1, 256):
        avg_dice_loss = criterion(thresh)
        resutls[thresh] = avg_dice_loss
        print('avg dice loss=%.4f,thresh=%d' % (avg_dice_loss, thresh))
    write(resutls, add_prefix(saved_path, 'results.txt'))
Пример #3
0
def main():
    global args, min_loss, best_acc
    args = parser.parse_args()
    device_counts = torch.cuda.device_count()
    print('there is %d gpus in usage' % (device_counts))
    # save source script
    set_prefix(args.prefix, __file__)
    model = model_selector(args.model_type)
    print(model)
    if args.cuda:
        model = DataParallel(model).cuda()
    else:
        raise RuntimeError('there is no gpu')

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    # accelerate the speed of training
    cudnn.benchmark = True

    train_loader, val_loader = load_dataset()
    # class_names=['LESION', 'NORMAL']
    class_names = train_loader.dataset.class_names
    print(class_names)
    criterion = nn.BCELoss().cuda()

    # learning rate decay per epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                           step_size=args.step_size,
                                           gamma=args.gamma)
    since = time.time()
    print('-' * 10)
    for epoch in range(args.epochs):
        exp_lr_scheduler.step()
        train(train_loader, model, optimizer, criterion, epoch)
        cur_loss, cur_acc = validate(model, val_loader, criterion)
        is_best = cur_loss < min_loss
        best_loss = min(cur_loss, min_loss)
        if is_best:
            best_acc = cur_acc
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.model_type,
                'state_dict': model.state_dict(),
                'min_loss': best_loss,
                'acc': best_acc,
                'optimizer': optimizer.state_dict(),
            }, is_best)
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    check_point = torch.load(add_prefix(args.prefix, args.best_model_path))
    print('min_loss=%.4f, best_acc=%.4f' %
          (check_point['min_loss'], check_point['acc']))
    write(vars(args), add_prefix(args.prefix, 'paras.txt'))
Пример #4
0
def main():
    global args, best_acc
    args = parser.parse_args()
    # save source script
    set_prefix(args.prefix, __file__)
    model = models.densenet121(pretrained=False, num_classes=2)
    if args.cuda:
        model = DataParallel(model).cuda()
    else:
        warnings.warn('there is no gpu')

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    # accelerate the speed of training
    cudnn.benchmark = True

    train_loader, val_loader = load_dataset()
    # class_names=['LESION', 'NORMAL']
    class_names = train_loader.dataset.classes
    print(class_names)
    if args.is_focal_loss:
        print('try focal loss!!')
        criterion = FocalLoss().cuda()
    else:
        criterion = nn.CrossEntropyLoss().cuda()

    # learning rate decay per epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                           step_size=args.step_size,
                                           gamma=args.gamma)
    since = time.time()
    print('-' * 10)
    for epoch in range(args.epochs):
        exp_lr_scheduler.step()
        train(train_loader, model, optimizer, criterion, epoch)
        cur_accuracy = validate(model, val_loader, criterion)
        is_best = cur_accuracy > best_acc
        best_acc = max(cur_accuracy, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': 'resnet18',
                'state_dict': model.state_dict(),
                'best_accuracy': best_acc,
                'optimizer': optimizer.state_dict(),
            }, is_best)
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    # compute validate meter such as confusion matrix
    compute_validate_meter(model, add_prefix(args.prefix,
                                             args.best_model_path), val_loader)
    # save running parameter setting to json
    write(vars(args), add_prefix(args.prefix, 'paras.txt'))
def plot_roc_curve(our_scores, our_true, cam_scores, cam_true, grad_scores, grad_true, saved_path):
    """
    references: https://github.com/JakenHerman/Plot_ROC_Curve/blob/master/roc_test.py
                https://blog.csdn.net/site1997/article/details/79180384
    """
    mkdir(saved_path)
    cam_fpr, cam_tpr, _ = roc_curve(cam_true, cam_scores)
    grad_fpr, grad_tpr, _ = roc_curve(grad_true, grad_scores)
    our_fpr, our_tpr, _ = roc_curve(our_true, our_scores)

    cam_roc_auc = auc(cam_fpr, cam_tpr)
    grad_roc_auc = auc(grad_fpr, grad_tpr)
    our_roc_auc = auc(our_fpr, our_tpr)
    auc_scores = dict(cam_roc_auc=cam_roc_auc, grad_roc_auc=grad_roc_auc, our_roc_auc=our_roc_auc)
    print(auc_scores)
    write(auc_scores, '%s/auc_scores.txt' % saved_path)

    fig_width_pt = 246.0  # Get this from LaTeX using \showthe\columnwidth
    inches_per_pt = 1.0 / 72.27  # Convert pt to inch
    golden_mean = (pylab.sqrt(5) - 1.0) / 2.0  # Aesthetic ratio
    fig_width = fig_width_pt * inches_per_pt  # width in inches
    fig_height = fig_width * golden_mean  # height in inches
    fig_size = [fig_width, fig_height]
    params = {'backend': 'ps',
              'axes.labelsize': 8,
              'text.fontsize': 8,
              'legend.fontsize': 8,
              'xtick.labelsize': 8,
              'ytick.labelsize': 8,
              'text.usetex': True,
              'figure.figsize': fig_size}
    pylab.rcParams.update(params)
    # Generate data

    # plot vectorgraph
    pylab.figure(1)
    pylab.clf()
    pylab.axes([0.125, 0.2, 0.95 - 0.125, 0.95 - 0.2])
    pylab.plot(cam_fpr, cam_tpr, 'b', label='CAM(AUC=$%.3f$)' % cam_roc_auc)
    pylab.plot(grad_fpr, grad_tpr, 'r', label='Grad-CAM(AUC=$%.3f$)' % grad_roc_auc)
    pylab.plot(our_fpr, our_tpr, 'g', label='Our(AUC=$%.3f$)' % our_roc_auc)
    pylab.xlabel('false positive rate')
    pylab.ylabel('true positive rate')
    pylab.xlim([0.0, 1.0])
    pylab.ylim([0.0, 1.05])
    pylab.legend()
    pylab.savefig(os.path.join(saved_path, 'roc_curve.eps'))
    print('plot roc curve successfully.')
def main():
    global args, logger
    args = parser.parse_args()
    # logger = Logger(add_prefix(args.prefix, 'logs'))
    set_prefix(args.prefix, __file__)
    model = UNet(3, depth=5, in_channels=3)
    print(model)
    print('load unet with depth=5')
    if args.cuda:
        model = DataParallel(model).cuda()
    else:
        raise RuntimeError('there is no gpu')
    criterion = nn.L1Loss(reduce=False).cuda()
    print('use l1_loss')
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    # accelerate the speed of training
    cudnn.benchmark = True

    data_loader = get_dataloader()
    # class_names=['LESION', 'NORMAL']
    # class_names = data_loader.dataset.class_names
    # print(class_names)

    since = time.time()
    print('-' * 10)
    for epoch in range(1, args.epochs + 1):
        train(data_loader, model, optimizer, criterion, epoch)
        if epoch % 40 == 0:
            validate(model, epoch, data_loader)

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    validate(model, args.epochs, data_loader)
    # save model parameter
    torch.save(model.state_dict(),
               add_prefix(args.prefix, 'identical_mapping.pkl'))
    # save running parameter setting to json
    write(vars(args), add_prefix(args.prefix, 'paras.txt'))
Пример #7
0
def save_results(results, saved_path):
    if not os.path.exists(saved_path):
        os.makedirs(saved_path)
    write(results, '%s/results.txt' % (saved_path))
Пример #8
0
 def save_hyperparameters(self, args):
     write(vars(args), add_prefix(self.prefix, 'para.txt'))
     print('save hyperparameters successfully.')