Beispiel #1
0
def train():
    model =ModelFlow_stride(2,3,start_channel).cuda()
    loss_similarity =mse_loss
    loss_inverse = mse_loss
    loss_antifold = antifoldloss
    loss_smooth = smoothloss
    transform = SpatialTransform().cuda()
    for param in transform.parameters():
        param.requires_grad = False
        param.volatile=True
    names = glob.glob(datapath + '/*.gz')
    grid = generate_grid(imgshape)
    grid = Variable(torch.from_numpy(np.reshape(grid, (1,) + grid.shape))).cuda().float()

    print(grid.type())
    optimizer = torch.optim.Adam(model.parameters(),lr=lr) 
    model_dir = '../Model'

    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    lossall = np.zeros((5,iteration))
    training_generator = Data.DataLoader(Dataset(names,iteration,True), batch_size=1,
                        shuffle=False, num_workers=2)
    step=0
    for  X,Y in training_generator:

        X = X.cuda().float()
        Y = Y.cuda().float()
        F_xy = model(X,Y)
        F_yx = model(Y,X)
    
        X_Y = transform(X,F_xy.permute(0,2,3,4,1)*range_flow,grid)
        Y_X = transform(Y,F_yx.permute(0,2,3,4,1)*range_flow,grid)
        # Note that, the generation of inverse flow depends on the definition of transform. 
        # The generation strategies are sligtly different for the backward warpping and forward warpping
        F_xy_ = transform(-F_yx,F_xy.permute(0,2,3,4,1)*range_flow,grid)
        F_yx_ = transform(-F_xy,F_yx.permute(0,2,3,4,1)*range_flow,grid)
        loss1 = loss_similarity(Y,X_Y) + loss_similarity(X,Y_X)
        loss2 = loss_inverse(F_xy*range_flow,F_xy_*range_flow) + loss_inverse(F_yx*range_flow,F_yx_*range_flow)
        
        
        loss3 =  loss_antifold(F_xy*range_flow) + loss_antifold(F_yx*range_flow)
        loss4 =  loss_smooth(F_xy*range_flow) + loss_smooth(F_yx*range_flow)
        loss = loss1+inverse*loss2 + antifold*loss3 + smooth*loss4
        optimizer.zero_grad()           # clear gradients for this training step
        loss.backward()                 # backpropagation, compute gradients
        optimizer.step()                # apply gradients
        lossall[:,step] = np.array([loss.item(),loss1.item(),loss2.item(),loss3.item(),loss4.item()])
        sys.stdout.write("\r" + 'step "{0}" -> training loss "{1:.4f}" - sim "{2:.4f}" - inv "{3:.4f}" \
            - ant "{4:.4f}" -smo "{5:.4f}" '.format(step, loss.item(),loss1.item(),loss2.item(),loss3.item(),loss4.item()))
        sys.stdout.flush()
        if(step % n_checkpoint == 0):
            modelname = model_dir + '/' + str(step) + '.pth'
            torch.save(model.state_dict(), modelname)
        step+=1
    np.save(model_dir+'/loss.npy',lossall)
def train():
    torch.cuda.empty_cache()
    device = torch.device("cuda:6")
    model = ModelFlow_stride(2, 3, start_channel).cuda(device)
    # model =ModelFlow_stride(2,3,start_channel).cpu()
    loss_similarity = mse_loss
    loss_inverse = mse_loss
    loss_antifold = antifoldloss
    loss_smooth = smoothloss
    transform = SpatialTransform().cuda(device)
    # transform = SpatialTransform().cpu()
    for param in transform.parameters():
        param.requires_grad = False
        param.volatile = True
    names = glob.glob(datapath + '/*.gz')
    grid = generate_grid(imgshape)
    grid = Variable(torch.from_numpy(np.reshape(
        grid, (1, ) + grid.shape))).cuda(device).float()
    # grid = Variable(torch.from_numpy(np.reshape(grid, (1,) + grid.shape))).cpu().float()

    print(grid.type())
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    model_dir = '../Model'

    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    lossall = np.zeros((5, iteration))
    training_generator = Data.DataLoader(Dataset(names, iteration, True),
                                         batch_size=1,
                                         shuffle=False,
                                         num_workers=0)
    step = 0
    for X, Y in training_generator:
        X = X.cuda(device).float()
        # X = X.cpu().float()
        Y = Y.cuda(device).float()
        # Y = Y.cpu().float()
        # # X = sitk.GetArrayFromImage(sitk.ReadImage(X, sitk.sitkFloat32))
        # # Y = sitk.GetArrayFromImage(sitk.ReadImage(Y, sitk.sitkFloat32))
        #X = Variable(X).cuda(device).float()
        #Y = Variable(Y).cuda(device).float()
        X, Y = train_padding(X, Y)  #added today

        X = X.cuda(device).float(
        )  #added as the values returned after padding was not cuda
        Y = Y.cuda(device).float()

        F_xy = model(X, Y)
        F_yx = model(Y, X)
        X_Y = transform(X, F_xy.permute(0, 2, 3, 4, 1) * range_flow, grid)
        Y_X = transform(Y, F_yx.permute(0, 2, 3, 4, 1) * range_flow, grid)
        F_xy_ = transform(-F_xy,
                          F_xy.permute(0, 2, 3, 4, 1) * range_flow, grid)
        F_yx_ = transform(-F_yx,
                          F_yx.permute(0, 2, 3, 4, 1) * range_flow, grid)
        loss1 = loss_similarity(Y, X_Y) + loss_similarity(X, Y_X)
        loss2 = loss_inverse(F_xy * range_flow,
                             F_xy_ * range_flow) + loss_inverse(
                                 F_yx * range_flow, F_yx_ * range_flow)
        loss3 = loss_antifold(F_xy * range_flow) + loss_antifold(
            F_yx * range_flow)
        loss4 = loss_smooth(F_xy * range_flow) + loss_smooth(F_yx * range_flow)
        loss = loss1 + inverse * loss2 + antifold * loss3 + smooth * loss4
        optimizer.zero_grad()  # clear gradients for this training step
        loss.backward()  # backpropagation, compute gradients
        optimizer.step()  # apply gradients
        #lossall[:,step] = np.array([loss.data[0],loss1.data[0],loss2.data[0],loss3.data[0],loss4.data[0]])
        lossall[:, step] = np.array(
            [loss.data, loss1.data, loss2.data, loss3.data, loss4.data])
        #sys.stdout.write("\r" + 'step "{0}" -> training loss "{1:.4f}" - sim "{2:.4f}" - inv "{3:.4f}" - ant "{4:.4f}" -smo "{5:.4f}" '.format(step, loss.data[0],loss1.data[0],loss2.data[0],loss3.data[0],loss4.data[0]))
        sys.stdout.write(
            "\r" +
            'step "{0}" -> training loss "{1:.4f}" - sim "{2:.4f}" - inv "{3:.4f}" - ant "{4:.4f}" -smo "{5:.4f}" '
            .format(step, loss.data, loss1.data, loss2.data, loss3.data,
                    loss4.data))
        sys.stdout.flush()
        if (step % n_checkpoint == 0):
            modelname = model_dir + '/' + str(step) + '.pth'
            torch.save(model.state_dict(), modelname)
        step += 1
        torch.cuda.empty_cache()
        del X
        del Y
    np.save(model_dir + '/loss.npy', lossall)
Beispiel #3
0
def train():
    model = ModelFlow_stride(2, 3, start_channel).cuda()
    loss_similarity = NCC().loss
    loss_cycle = l1_loss
    loss_smooth = smoothloss
    transform = SpatialTransform().cuda()
    for param in transform.parameters():
        param.requires_grad = False
        param.volatile = True
    names = glob.glob(datapath + '/*.gz')
    grid = generate_grid(imgshape)
    grid = Variable(torch.from_numpy(np.reshape(grid, (1, ) +
                                                grid.shape))).cuda().float()

    print(grid.type())
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    model_dir = '../Model'

    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)
    lossall = np.zeros((5, iteration))
    training_generator = Data.DataLoader(Dataset(names, iteration, False),
                                         batch_size=1,
                                         shuffle=False,
                                         num_workers=2)
    step = 0
    for X, Y in training_generator:

        X = X.cuda().float()
        Y = Y.cuda().float()
        F_xy = model(X, Y)
        F_yx = model(Y, X)

        X_Y = transform(X, F_xy.permute(0, 2, 3, 4, 1) * range_flow, grid)
        Y_X = transform(Y, F_yx.permute(0, 2, 3, 4, 1) * range_flow, grid)

        F_xy_ = model(Y_X, X_Y)
        F_yx_ = model(X_Y, Y_X)

        Y_X_Y = transform(Y_X, F_xy_.permute(0, 2, 3, 4, 1) * range_flow, grid)
        X_Y_X = transform(X_Y, F_yx_.permute(0, 2, 3, 4, 1) * range_flow, grid)

        F_xx = model(X, X)
        F_yy = model(Y, Y)
        X_X = transform(X, F_xx.permute(0, 2, 3, 4, 1) * range_flow, grid)
        Y_Y = transform(Y, F_yy.permute(0, 2, 3, 4, 1) * range_flow, grid)

        L_smooth = loss_smooth(F_xy * range_flow) + loss_smooth(
            F_yx * range_flow)
        L_regist = loss_similarity(Y, X_Y) + loss_similarity(X, Y_X) + \
                      lambda_ * L_smooth
        L_cycle = loss_cycle(X, X_Y_X) + loss_cycle(Y, Y_X_Y)
        L_identity = loss_similarity(X, X_X) + loss_similarity(Y, Y_Y)
        loss = L_regist + alpha * L_cycle + beta * L_identity

        optimizer.zero_grad()  # clear gradients for this training step
        loss.backward()  # backpropagation, compute gradients
        optimizer.step()  # apply gradients
        lossall[:, step] = np.array([
            loss.data[0], L_regist.data[0], L_cycle.data[0],
            L_identity.data[0], L_smooth.data[0]
        ])
        sys.stdout.write(
            "\r" +
            'step "{0}" -> training loss "{1:.4f}" - reg "{2:.4f}" - cyc "{3:.4f}" - ind "{4:.4f}" -smo "{5:.4f}" '
            .format(step, loss.data[0], L_regist.data[0], L_cycle.data[0],
                    L_identity.data[0], L_smooth.data[0]))
        sys.stdout.flush()
        if (step % n_checkpoint == 0):
            modelname = model_dir + '/' + str(step) + '.pth'
            torch.save(model.state_dict(), modelname)
        step += 1
    np.save(model_dir + '/loss.npy', lossall)
Beispiel #4
0
def train():
    model = SYMNet(2, 3, start_channel).cuda()
    loss_similarity = NCC()
    loss_smooth = smoothloss
    loss_magnitude = magnitude_loss
    loss_Jdet = neg_Jdet_loss

    transform = SpatialTransform().cuda()
    diff_transform = DiffeomorphicTransform(time_step=7).cuda()
    com_transform = CompositionTransform().cuda()

    for param in transform.parameters():
        param.requires_grad = False
        param.volatile = True
    names = sorted(glob.glob(datapath + '/*.nii'))[0:255]
    grid = generate_grid(imgshape)
    grid = torch.from_numpy(np.reshape(grid,
                                       (1, ) + grid.shape)).cuda().float()

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    # optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
    model_dir = '../Model'

    if not os.path.isdir(model_dir):
        os.mkdir(model_dir)

    lossall = np.zeros((6, iteration))

    training_generator = Data.DataLoader(Dataset_epoch(names, norm=False),
                                         batch_size=1,
                                         shuffle=True,
                                         num_workers=2)
    step = 0

    while step <= iteration:
        for X, Y in training_generator:

            X = X.cuda().float()
            Y = Y.cuda().float()
            F_xy, F_yx = model(X, Y)

            F_X_Y_half = diff_transform(F_xy, grid, range_flow)
            F_Y_X_half = diff_transform(F_yx, grid, range_flow)

            F_X_Y_half_inv = diff_transform(-F_xy, grid, range_flow)
            F_Y_X_half_inv = diff_transform(-F_yx, grid, range_flow)

            X_Y_half = transform(
                X,
                F_X_Y_half.permute(0, 2, 3, 4, 1) * range_flow, grid)
            Y_X_half = transform(
                Y,
                F_Y_X_half.permute(0, 2, 3, 4, 1) * range_flow, grid)

            F_X_Y = com_transform(F_X_Y_half, F_Y_X_half_inv, grid, range_flow)
            F_Y_X = com_transform(F_Y_X_half, F_X_Y_half_inv, grid, range_flow)

            X_Y = transform(X, F_X_Y.permute(0, 2, 3, 4, 1) * range_flow, grid)
            Y_X = transform(Y, F_Y_X.permute(0, 2, 3, 4, 1) * range_flow, grid)

            loss1 = loss_similarity(X_Y_half, Y_X_half)
            loss2 = loss_similarity(Y, X_Y) + loss_similarity(X, Y_X)
            loss3 = loss_magnitude(F_X_Y_half * range_flow,
                                   F_Y_X_half * range_flow)
            loss4 = loss_Jdet(
                F_X_Y.permute(0, 2, 3, 4, 1) * range_flow, grid) + loss_Jdet(
                    F_Y_X.permute(0, 2, 3, 4, 1) * range_flow, grid)
            loss5 = loss_smooth(F_xy * range_flow) + loss_smooth(
                F_yx * range_flow)

            loss = loss1 + loss2 + magnitude * loss3 + local_ori * loss4 + smooth * loss5
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            lossall[:, step] = np.array([
                loss.item(),
                loss1.item(),
                loss2.item(),
                loss3.item(),
                loss4.item(),
                loss5.item()
            ])
            sys.stdout.write(
                "\r" +
                'step "{0}" -> training loss "{1:.4f}" - sim_mid "{2:.4f}" - sim_full "{3:4f}" - mag "{4:.4f}" - Jdet "{5:.10f}" -smo "{6:.4f}" '
                .format(step, loss.item(), loss1.item(), loss2.item(),
                        loss3.item(), loss4.item(), loss5.item()))
            sys.stdout.flush()

            if (step % n_checkpoint == 0):
                modelname = model_dir + '/SYMNet_' + str(step) + '.pth'
                torch.save(model.state_dict(), modelname)
                np.save(model_dir + '/loss_SYMNet_' + str(step) + '.npy',
                        lossall)
            step += 1

            if step > iteration:
                break
        print("one epoch pass")
    np.save(model_dir + '/loss_SYMNet.npy', lossall)