Exemplo n.º 1
0
        K = 1
    else:
        optimizer = optim.Adam(resnet.parameters(), lr=lr, betas=(0.5, 0.999))
        K = 4

    epoch = 0
    niter = 5
    loss_iters = np.zeros(niter)
    while epoch < 2:
        epoch += 1
        # for idx, (rdf_inputs, rdfs, masks, weights, wGs, D) in enumerate(trainLoader):  # for real rdf
        for idx, (qsms, rdf_inputs, rdfs, wG, weights, masks_csf,
                  D) in enumerate(trainLoader):  # for simulated rdf

            if epoch == 1:
                unet3d.eval(), resnet.eval(), unet3d_p.eval(), unet3d_.eval(
                ), unet3d_pdi.eval()
                unet3d.to(device), unet3d_p.to(device), unet3d_.to(
                    device), unet3d_pdi.to(device)
                rdf_inputs = rdf_inputs.to(device, dtype=torch.float)
                with torch.no_grad():
                    qsm_inputs = unet3d(rdf_inputs).cpu().detach()
                    QSMnet_p = unet3d_p(rdf_inputs).cpu().detach()
                    QSMnet_ = unet3d_(rdf_inputs).cpu().detach()
                    PDI = unet3d_pdi((rdf_inputs + 0.15) * 3).cpu().detach()
                # QSMnet = np.squeeze(np.asarray(qsm_inputs))
                QSMnet = np.squeeze(np.asarray(QSMnet_))
                QSMnet_p = np.squeeze(np.asarray(QSMnet_p))
                PDI = np.squeeze(np.asarray(PDI[0, 0, ...])) / 3

            else:
Exemplo n.º 2
0
    epoch = 0
    loss_iters = np.zeros(niter)
    while epoch < niter:
        epoch += 1

        # training phase
        for idx, (rdfs, masks, weights, wGs) in enumerate(trainLoader):

            rdfs = (rdfs.to(device, dtype=torch.float) + trans) * scale
            masks = masks.to(device, dtype=torch.float)
            weights = weights.to(device, dtype=torch.float)
            wGs = wGs.to(device, dtype=torch.float)

            if epoch == 1:
                unet3d.eval()

                means = unet3d(rdfs)[:, 0, ...]
                stds = unet3d(rdfs)[:, 1, ...]
                QSM = np.squeeze(np.asarray(means.cpu().detach()))
                STD = np.squeeze(np.asarray(stds.cpu().detach()))

                print('Saving initial results')
                adict = {}
                adict['QSM'] = QSM
                sio.savemat(rootDir + '/QSM_0.mat', adict)

                adict = {}
                adict['STD'] = STD
                sio.savemat(rootDir + '/STD_0.mat', adict)
Exemplo n.º 3
0
    # network
    model = Unet(input_channels=1,
                 output_channels=2,
                 num_filters=[2**i for i in range(5, 10)],
                 bilateral=1,
                 use_deconv=1,
                 use_deconv2=1,
                 renorm=0,
                 flag_r_train=0,
                 flag_UTFI=1)
    model.to(device)
    model.load_state_dict(
        torch.load(
            rootDir +
            '/weights/weight_pdf={0}_tv={1}.pt'.format(lambda_pdf, lambda_tv)))
    model.eval()

    chi_bs, chi_ls = [], []
    with torch.no_grad():
        for idx, (ifreqs, masks, data_weights, wGs) in enumerate(testLoader):
            ifreqs = ifreqs.to(device, dtype=torch.float)
            masks = masks.to(device, dtype=torch.float)
            outputs = model(ifreqs)
            chi_b, chi_l = outputs[:, 0:1, ...] * (
                1 - masks), outputs[:, 1:2, ...] * masks
            chi_bs.append(chi_b[0, ...].cpu().detach())
            chi_ls.append(chi_l[0, ...].cpu().detach())

        chi_bs = np.concatenate(chi_bs, axis=0)
        chi_ls = np.concatenate(chi_ls, axis=0)
Exemplo n.º 4
0
            loss1 = loss_QSMnet(outputs1, qsms, masks, D)
            loss2 = loss_QSMnet(outputs2, qsms, masks, D)
            loss = loss1 + loss2

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss1_sum += loss1
            loss2_sum += loss2
            gen_iterations += 1

        scheduler.step(epoch)

        # validation phase
        resnet.eval(), unet3d.eval()
        loss_total = 0
        idx = 0
        with torch.no_grad():  # to solve memory exploration issue
            for idx, (rdfs, masks, qsms) in enumerate(valLoader):
                idx += 1
                rdfs = (rdfs.to(device, dtype=torch.float) + trans) * scale
                qsms = (qsms.to(device, dtype=torch.float) + trans) * scale
                masks = masks.to(device, dtype=torch.float)

                outputs1 = unet3d(rdfs)
                outputs2 = resnet(torch.cat((rdfs, outputs1), 1))
                loss1 = loss_QSMnet(outputs1, qsms, masks, D)
                loss2 = loss_QSMnet(outputs2, qsms, masks, D)
                loss = loss1 + loss2
                loss_total += loss
Exemplo n.º 5
0
                           'images',
                           None,
                           False,
                           0,
                           transform=transform_test)

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=8)

    print('model initalize')
    net = Unet(1, 1, 16).cuda()
    net = nn.DataParallel(net)
    print('model load')
    net.load_state_dict(torch.load('./ckpt/unet.pth'))
    net.eval()

    with open('output/result.csv', 'w') as f:
        f.write('id,rle_mask\n')
        for batch_image, batch_name in tqdm(dataloader):
            outputs = net(batch_image)
            outputs = F.softmax(outputs, dim=1)[:, 1, :, :]
            outputs = outputs > 0.50
            # pdb.set_trace()
            for k, v in zip(batch_name, outputs):
                run = rle_encode(np.array(v))
                f.write('{},{}\n'.format(k[:-4], run))

            pass