Exemple #1
0
def main():
    model = AE(device)
    model.cuda(device)
    feature_size = [34, 34]
    optimizer = optim.Adam(model.parameters(), lr=1e-4)
    file = 'mask/masks.h5'
    rootPath = '/g/kreshuk/hilt/projects/fewShotLearning/mutexWtsd/models'
    modelFileCircle = os.path.join(rootPath, 'UnetEdgePreds.pth')
    affinities_predictor_circle = UNet(n_channels=1, n_classes=len(offsets), bilinear=True, device=device)
    affinities_predictor_circle.load_state_dict(torch.load(modelFileCircle), strict=True)
    affinities_predictor_circle.cuda(device)
    dloader = DataLoader(DiscSpGraphDset(affinities_predictor_circle, separating_channel, offsets), batch_size=1,
                              shuffle=True, pin_memory=True)

    writer = SummaryWriter(logdir='./logs')

    for e in tqdm(range(10000)):
        edges, edge_feat, diff_to_gt, gt_edge_weights, node_feat, seg, raw, affinities, _, angles = next(iter(dloader))
        seg = seg.squeeze()
        raw = raw.squeeze()
        seg += 1
        sp_boxes = get_boxed_sp(np.unique(seg), seg, raw, size=feature_size)
        seg -= 1

        loss = train(sp_boxes, model, optimizer, feature_size)

        writer.add_scalar("loss/sp_ae", loss, e)

    torch.save(model.state_dict(), 'ae_sp_feat_model.pth')
Exemple #2
0
def load_model(filename, device, channels=3, model_architecture=None):
    '''
    Function loads the model from checkpoint
    INPUT:
        filename - filename of the checkpoint containing saved model
        channels - number of image channels
        model_arch - model architecture
    '''
    if device == 'cpu':
        checkpoint = torch.load(filename)
    else:
        checkpoint = torch.load(filename, map_location='cpu')

    try:
        model_arch = checkpoint['model_arch']
        train_losses = checkpoint['train_losses']
        test_losses = checkpoint['test_losses']
        train_metrics = checkpoint['train_metrics']
        test_metrics = checkpoint['test_metrics']
    except:
        model_arch = model_architecture
        train_losses = []
        test_losses = []
        train_metrics = []
        test_metrics = []

    # initialize model
    if model_arch == 'UNet':
        model = UNet(num_classes=1,
                     depth=6,
                     start_filts=8,
                     merge_mode='concat')

    if model_arch == 'UNet11':
        model = UNet11(pretrained=True)

    if model_arch == 'UNet16':
        model = UNet16(num_classes=1, pretrained=True)

    if model_arch == 'AlbuNet':
        model = AlbuNet(num_classes=1, pretrained=True)

    if model_arch == 'NestedUNet':
        model = NestedUNet()

    if model_arch == 'Unet_2D':
        model = Unet_2D(n_channels=channels, n_classes=1)

    if model_arch == 'Res34Unetv4':
        model = Res34Unetv4()

    if model_arch == 'Res34Unetv3':
        model = Res34Unetv3()

    if model_arch == 'Res34Unetv5':
        model = Res34Unetv5()

    model.load_state_dict(checkpoint['state_dict'])

    return model, model_arch, train_losses, test_losses, train_metrics, test_metrics
Exemple #3
0
    # test_model()
    file = 'mask/masks.h5'
    rootPath = '/g/kreshuk/hilt/projects/fewShotLearning/mutexWtsd/models'

    # modelFileSimple = os.path.join(rootPath, 'UnetEdgePredsSimple.pth')
    # dloader = DataLoader(simpleSeg_4_4_Dset(), batch_size=1, shuffle=True, pin_memory=True)
    # affinities_predictor_simple = smallUNet(n_channels=1, n_classes=len(offsets), bilinear=True, device=device)
    # affinities_predictor_simple.load_state_dict(torch.load(modelFileSimple), strict=True)
    # affinities_predictor_simple.cuda()

    modelFileCircle = os.path.join(rootPath, 'UnetEdgePreds.pth')
    modelFileCircleG1 = os.path.join(rootPath, 'UnetEdgePredsG1.pth')
    # trainAffPredCircles(modelFileCircle, device, separating_channel, offsets, strides,)
    a = 1
    affinities_predictor_circle = UNet(n_channels=1,
                                       n_classes=len(offsets),
                                       bilinear=True,
                                       device=device)
    affinities_predictor_circle.load_state_dict(torch.load(modelFileCircle),
                                                strict=True)
    affinities_predictor_circle.cuda()
    dloader_disc = DataLoader(CustomDiscDset(affinities_predictor_circle,
                                             separating_channel),
                              batch_size=1,
                              shuffle=True,
                              pin_memory=True)
    dloader_simple_img = DataLoader(SimpleSeg_20_20_Dset(),
                                    batch_size=1,
                                    shuffle=True,
                                    pin_memory=True)

    q_learning(dloader_disc, rootPath, learn=True)
Exemple #4
0
def build_model(device,
                img_size,
                channels,
                test_split,
                batch_size,
                workers,
                model_arch,
                epochs,
                learning_rate,
                swa,
                enable_scheduler,
                loss='BCEDiceLoss',
                all_data=False,
                tta=False):
    # create data loaders
    trainloader, testloader, validloader = build_dataloaders(
        image_size=(img_size, img_size),
        channels=channels,
        test_split=test_split,
        batch_size=batch_size,
        num_workers=workers,
        all_data=all_data,
        data_filepath='../siim-train-test/')

    # setup the device
    if device == None:
        device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")

    # initialize model
    if model_arch == 'UNet':
        model = UNet(num_classes=1,
                     depth=6,
                     start_filts=8,
                     merge_mode='concat')

    if model_arch == 'UNet11':
        model = UNet11(pretrained=True)

    if model_arch == 'UNet16':
        model = UNet16(num_classes=1, pretrained=True)

    if model_arch == 'AlbuNet':
        model = AlbuNet(num_classes=1, pretrained=True)

    if model_arch == 'NestedUNet':
        model = NestedUNet()

    if model_arch == 'Unet_2D':
        model = Unet_2D(n_channels=channels, n_classes=1)

    if model_arch == 'Res34Unetv4':
        model = Res34Unetv4()

    if model_arch == 'Res34Unetv3':
        model = Res34Unetv3()

    if model_arch == 'Res34Unetv5':
        model = Res34Unetv5()

    if model_arch == 'BrainUNet':
        model = brain_unet(pretrained=True)

    if model_arch == 'R2U_Net':
        model = R2U_Net()

    if model_arch == 'AttU_Net':
        model = AttU_Net()

    if model_arch == 'R2AttU_Net':
        model = R2AttU_Net()

    # setup criterion, optimizer and metrics
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    if loss == 'BCEDiceLoss':
        criterion = BCEDiceLoss()

    if loss == 'LovaszSoftmaxLoss':
        criterion = LovaszSoftmaxLoss()

    if loss == 'JaccardLoss':
        criterion = JaccardLoss(device=device)

    if loss == 'mIoULoss':
        criterion = mIoULoss(n_classes=1)

    if loss == 'WeightedBCEDiceLoss':
        criterion = WeightedBCEDiceLoss()

    metric = iou_score

    #train model
    model, train_losses, test_losses, train_metrics, test_metrics = train(
        model,
        device,
        trainloader,
        testloader,
        optimizer,
        criterion,
        metric,
        epochs,
        learning_rate,
        swa=swa,
        enable_scheduler=enable_scheduler,
        model_arch=model_arch)

    # create submission
    filename = 'submission_' + model_arch + '_lr' + str(
        learning_rate) + '_' + str(epochs) + '.csv'
    print('Generating submission to ' + filename + '\n')
    thresholds, ious, index_max, threshold_max = determine_threshold(
        model,
        device,
        testloader,
        image_size=(img_size, img_size),
        channels=channels)
    make_submission(filename,
                    device,
                    model,
                    validloader,
                    image_size=(img_size, img_size),
                    channels=channels,
                    threshold=threshold_max,
                    original_size=1024,
                    tta=tta)

    # save the model
    save_model(model,
               model_arch,
               learning_rate,
               epochs,
               train_losses,
               test_losses,
               train_metrics,
               test_metrics,
               filepath='models_checkpoints')
Exemple #5
0
def trainAffPredCircles(saveToFile,
                        device,
                        separating_channel,
                        offsets,
                        strides,
                        numEpochs=8):
    file = 'mask/masks.h5'
    rootPath = '/g/kreshuk/hilt/projects/fewShotLearning/data/Discs'

    dloader = DataLoader(CustomDiscDset(length=5),
                         batch_size=1,
                         shuffle=True,
                         pin_memory=True)
    print('----START TRAINING----' * 4)

    model = UNet(n_channels=1, n_classes=len(offsets), bilinear=True)
    for param in model.parameters():
        param.requires_grad = True

    criterion = nn.MSELoss()

    optim = torch.optim.Adam(model.parameters())

    model.cuda()
    since = time.time()

    for epoch in range(numEpochs):
        print('Epoch {}/{}'.format(epoch, numEpochs - 1))
        print('-' * 10)
        # Each epoch has a training and validation phase
        # Iterate over data.
        for step, (inputs, _, affinities) in enumerate(dloader):
            inputs = inputs.to(device)
            affinities = affinities.to(device)
            # zero the parameter gradients
            optim.zero_grad()
            # forward
            # track history if only in train
            with torch.set_grad_enabled(True):
                outputs = model(inputs)
                loss = criterion(outputs, affinities)
                loss.backward()
                optim.step()

        weights = outputs.squeeze().detach().cpu().numpy()
        # weights[separating_channel:] /= 2
        affs = affinities.squeeze().detach().cpu().numpy()
        weights[separating_channel:] *= -1
        weights[separating_channel:] += +1
        affs[separating_channel:] *= -1
        affs[separating_channel:] += +1

        weights[:separating_channel] /= 1.5

        ndim = len(offsets[0])
        assert all(len(off) == ndim for off in offsets)
        image_shape = weights.shape[1:]
        valid_edges = get_valid_edges(weights.shape, offsets,
                                      separating_channel, strides, False)
        node_labeling1, cut_edges, used_mtxs, neighbors_features = compute_partial_mws_prim_segmentation(
            weights.ravel(), valid_edges.ravel(), offsets, separating_channel,
            image_shape)
        node_labeling_gt = compute_mws_segmentation(affs,
                                                    offsets,
                                                    separating_channel,
                                                    algorithm='kruskal')
        labels = compute_mws_segmentation(weights,
                                          offsets,
                                          separating_channel,
                                          algorithm='kruskal')
        # labels, neighbors, cutting_edges, mutexes = compute_mws_segmentation_cstm(weights, offsets, separating_channel)
        edges = np.zeros(affs.shape).ravel()
        # lbl = 1
        # for cut_edges, rep_edges in zip(cutting_edges, mutexes):
        #     for edge in cutting_edges:
        #         edges[edge] = lbl
        #     for edge in rep_edges:
        #         edges[edge] = lbl
        #     lbl += 1
        # edges = edges.reshape(affs.shape)
        import matplotlib.pyplot as plt
        from matplotlib import cm
        labels = labels.reshape(image_shape)
        labels1 = node_labeling1.reshape(image_shape)
        node_labeling_gt = node_labeling_gt.reshape(image_shape)

        # show_edge1 = cm.prism(edges[0] / edges[0].max())
        # show_edge2 = cm.prism(edges[1] / edges[1].max())
        # show_edge3 = cm.prism(edges[2] / edges[2].max())
        # show_edge4 = cm.prism(edges[3] / edges[3].max())

        show_seg1 = cm.prism(labels1 / labels1.max())
        show_seg = cm.prism(labels / labels.max())
        show_seg2 = cm.prism(node_labeling_gt / node_labeling_gt.max())
        show_raw = cm.gray(inputs.squeeze().detach().cpu().numpy())
        # img1 = np.concatenate([np.concatenate([show_edge1, show_edge2], axis=1),
        #                       np.concatenate([show_edge3, show_edge4], axis=1)], axis=0)
        img2 = np.concatenate([
            np.concatenate([show_seg, show_seg1], axis=1),
            np.concatenate([show_raw, show_seg2], axis=1)
        ],
                              axis=0)
        # plt.imshow(img1); plt.show()
        # plt.imshow(img2); plt.show()

    torch.save(model.state_dict(), saveToFile)
    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))

    return