Пример #1
0
def apply_to_slide(args):
    # Read the trained model
    model_file = os.path.abspath('./exp01/checkpoint.200th.tar')
    model = load_model(args.network)
    model.cuda()
    model.eval()

    # Read the slide
    osl = openslide.OpenSlide(args.slide)

    # Transform
    tran_norm = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    # Break the image into patches, for each patch, run the model on it
    (w, h) = osl.level_dimensions[0]

    # Size of the input window and number of windows
    window_size = int(args.window)
    nw_x, nw_y = math.ceil(w / window_size), math.ceil(h / window_size)

    # Output window size and output dimensions
    output_window_size = int(window_size / 256)
    (ow, oh) = nw_x * output_window_size, nw_y * output_window_size
    output = np.zeros((ow, oh, 20))

    # Set up a threaded worker to read openslide patches
    worker = threading.Thread(target=osl_worker,
                              args=(osl, (0, nw_x), (0, nw_y), window_size, 0))
    worker.start()

    while True:

        # Read a chunk of data
        q_data = osl_read_chunk_from_queue()

        # Check for sentinel value
        if q_data is None:
            break

        # Get the values
        ((i_x, i_y), (c_x, c_y, wd), _, window) = q_data

        # The corner of the region
        W = tran_norm(window).cuda()
        R = run_model_on_window(model, W).cpu().numpy().transpose((1, 0, 2))

        co_x, co_y = output_window_size * i_x, output_window_size * i_y
        output[co_x:co_x + output_window_size,
               co_y:co_y + output_window_size, :] = R
        print('Finished (%d,%d) of (%d,%d)' % (i_x, i_y, nw_x, nw_y))

    # Clip the output
    output = output[0:math.ceil(w / 256),
                    0:math.ceil(h / 256), :].transpose(1, 0, 2)

    # Set the spacing based on openslide
    # Get the image spacing from the header, in mm units
    (sx, sy) = (0.0, 0.0)
    if 'openslide.mpp-x' in osl.properties:
        sx = float(osl.properties['openslide.mpp-x']) * 256 / 1000.0
        sy = float(osl.properties['openslide.mpp-y']) * 256 / 1000.0
    elif 'openslide.comment' in osl.properties:
        for z in osl.properties['openslide.comment'].split('\n'):
            r = parse.parse('Resolution = {} um', z)
            if r is not None:
                sx = float(r[0]) * 256 / 1000.0
                sy = float(r[0]) * 256 / 1000.0

    # If there is no spacing, throw exception
    if sx == 0.0 or sy == 0.0:
        raise Exception('No spacing information in image')

    # Report spacing information
    print("Spacing of the mri-like image: %gx%gmm\n" % (sx, sy))

    # Write the result as a NIFTI file
    nii = sitk.GetImageFromArray(np.transpose(output, (0, 1, 2)), True)
    nii.SetSpacing((sx, sy))
    sitk.WriteImage(nii, args.output)
Пример #2
0
def apply_to_slide(args):

    # Read the trained model
    model = load_model(args.network)
    model.cuda()
    model.eval()

    # Read the slide
    I = Image.open(args.slide)
    h, w = I.size

    # Split the transforms
    tran_tt = transforms.ToTensor()
    tran_model = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    # Get the patch size and downsample factor
    patch_size, ds = int(args.patch), int(args.downsample)

    # Image as tensor
    I_crop = I.crop((0, 0, ds * int(h / ds), ds * int(w / ds)))
    IT = tran_tt(I_crop)

    # Image unfolded into patches
    IT_unfold = IT.unfold(1, patch_size, ds).unfold(2, patch_size,
                                                    ds).permute(1, 2, 0, 3, 4)

    # Output dimensions (before padding)
    (ow, oh) = IT_unfold.shape[0], IT_unfold.shape[1]

    # Flat array of inputs
    batches = IT_unfold.reshape(-1, 3, patch_size, patch_size)

    # Break into digestable batches
    bs = int(args.batch_size)
    for k in range(0, batches.shape[0], bs):
        k_end = min(k + bs, batches.shape[0])
        batch = torch.zeros((k_end - k, 3, 224, 224))
        for j in range(k, k_end):
            batch[j - k, :, :, :] = tran_model(batches[j, :, :, :])
        with torch.no_grad():
            res_batch = model(batch.cuda()).detach().cpu()
            if k == 0:
                result = res_batch
            else:
                result = torch.cat((result, res_batch), 0)
        print('Batch %d of %d' % (k / bs, batches.shape[0] / bs))

    # Reformat into a 20xWxH image
    Z = result.permute(1, 0).reshape(-1, ow, oh)

    # Pad the result to desired size
    owp, ohp = int(w * 1.0 / ds + 0.5), int(h * 1.0 / ds + 0.5)
    pw0, ph0 = int((owp - ow) / 2), int((ohp - oh) / 2)
    pw1, ph1 = owp - ow - pw0, ohp - oh - ph0
    Z = torch.nn.functional.pad(Z, (ph0, ph1, pw0, pw1, 0, 0), 'constant', 0)

    # Write the result as a NIFTI file
    nii = sitk.GetImageFromArray(Z.permute(1, 2, 0), True)
    nii.SetSpacing((ds, ds))
    sitk.WriteImage(nii, args.output)

    # Write the optional thumb
    if args.thumb is not None:
        rgb = np.asarray(I.resize((ohp, owp), Image.LANCZOS))
        nii = sitk.GetImageFromArray(rgb, True)
        nii.SetSpacing((ds, ds))
        sitk.WriteImage(nii, args.thumb)
Пример #3
0
def main():
    global args
    args = parser.parse_args()
    root, dictionary_pickle, metadata_path = build_paths()
    #fix random seeds
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    best_prec1 = 0

    # load model
    model = load_model(args.model)
    model.cuda()
    cudnn.benchmark = True

    # freeze the features layers
    for param in model.features.parameters():
        param.requires_grad = False

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    train_dataset = COIN(root, dictionary_pickle, metadata_path, train=True)

    val_dataset = COIN(root, dictionary_pickle, metadata_path, train=False)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=int(args.batch_size /
                                                            2),
                                             shuffle=False,
                                             num_workers=args.workers)

    num_classes = len(train_dataset.class_dict)
    model.top_layer = nn.Linear(4096, num_classes)
    model.top_layer.weight.data.normal_(0, 0.01)
    model.top_layer.bias.data.zero_()
    model.top_layer.cuda()

    # logistic regression
    #reglog = RegLog(args.conv, num_classes).cuda()
    optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad,
                                       model.parameters()),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=10**args.weight_decay)

    # create logs
    exp_log = os.path.join(args.exp, 'log')
    if not os.path.isdir(exp_log):
        os.makedirs(exp_log)

    loss_log = Logger(os.path.join(exp_log, 'loss_log'))
    prec1_log = Logger(os.path.join(exp_log, 'prec1'))
    prec5_log = Logger(os.path.join(exp_log, 'prec5'))
    print(model)
    for epoch in range(args.epochs):
        end = time.time()

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1, prec5, loss = validate(val_loader, model, criterion)

        loss_log.log(loss)
        prec1_log.log(prec1)
        prec5_log.log(prec5)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        if is_best:
            filename = str(end) + '-model_best.pth.tar'
        else:
            filename = str(end) + '-checkpoint.pth.tar'
        torch.save(
            {
                'epoch': epoch + 1,
                'arch': 'alexnet',
                'state_dict': model.state_dict(),
                'prec5': prec5,
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, os.path.join(args.exp, filename))
Пример #4
0
def main():
    global args
    args = parser.parse_args()
    root, dictionary_pickle, metadata_path = build_paths()

    # fix random seeds
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    best_prec1 = 0

    # load model
    model = load_model(args.model, args.modal)
    model.cuda()
    cudnn.benchmark = True

    # freeze the features layers
    if args.modal == 'video_only':
        for end_point in model.end_points:
            print("Frozen: ", end_point)
            model._modules[end_point].requires_grad = False

    elif args.modal == 'joint':
        model.i3d.requires_grad = False
        model.roberta.requires_grad = False
    else:
        model.features.requires_grad = False
        model.classifier.requires_grad = False

    model.cuda()
    cudnn.benchmark = True

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    train_dataset = COIN(root,
                         dictionary_pickle,
                         metadata_path,
                         method=args.modal,
                         clip_len=args.cliplen,
                         train=True,
                         do_crop=True)

    val_dataset = COIN(root,
                       dictionary_pickle,
                       metadata_path,
                       method=args.modal,
                       clip_len=args.cliplen,
                       train=False,
                       do_crop=True)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=int(args.batch_size /
                                                            2),
                                             shuffle=False,
                                             num_workers=args.workers)

    num_classes = len(train_dataset.class_dict)
    # logistic regression
    # reglog = RegLog(args.conv, num_classes).cuda()
    optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad,
                                       model.parameters()),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=10**args.weight_decay)

    if args.modal == 'video_only':
        model.replace_logits(num_classes)
        model.logits.conv3d.weight = nn.init.kaiming_normal_(
            model.logits.conv3d.weight, mode='fan_out')
        if model.logits.conv3d.bias is not None:
            model.logits.conv3d.bias.data.zero_()
        if args.resume:
            model, optimizer = _load_model(args.resume, model, args, optimizer)

    elif args.modal == 'text_only':
        model.top_layer = nn.Linear(512, num_classes)
        model.top_layer.weight.data.normal_(0, 0.01)
        model.top_layer.bias.data.zero_()
    else:
        model.top_layer = nn.Linear(1024, num_classes)
        model.initialize_weights()
    model.cuda()

    # create logs
    exp_log = os.path.join(args.exp, 'log')
    if not os.path.isdir(exp_log):
        os.makedirs(exp_log)

    loss_log = Logger(os.path.join(exp_log, 'loss_log'))
    prec1_log = Logger(os.path.join(exp_log, 'prec1'))
    prec5_log = Logger(os.path.join(exp_log, 'prec5'))
    for epoch in range(args.epochs):
        end = time.time()

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1, prec5, loss = validate(val_loader, model, criterion)
        print("Validation %d: " % epoch, prec1, prec5, loss)
        loss_log.log(loss)
        prec1_log.log(prec1)
        prec5_log.log(prec5)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        if is_best:
            filename = 'model_best.pth.tar'
        else:
            filename = 'checkpoint.pth.tar'
        torch.save(
            {
                'epoch': epoch + 1,
                'arch': 'alexnet',
                'state_dict': model.state_dict(),
                'prec5': prec5,
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, os.path.join(args.exp, filename))