コード例 #1
0
        # else:
        #     if self.axis == 2:
        #         mri = mri[36,:,:]
        #     elif self.axis == 1:
        #         mri = mri[:,47,:]
        #     elif self.axis == 0:
        #         mri = mri[:,:,36]
        #
        # if self.transform is not None:
        #     mri = self.transform(mri)

        return dict(image=mri, target=target, sid=sid)


if __name__ == '__main__':
    labels = ('AD', 'CN')
    sids, targets, train_idx, test_idx, ratio = fold_split(
        5, 0, labels, 'data/subject_ids.pkl', 'data/diagnosis.pkl')
    trainset = ADNIDataset(labels, 'data/', sids[train_idx],
                           targets[train_idx])
    from summary import Image3D
    from visdom import Visdom

    vis = Visdom(port=10001, env='datacheck')
    mri_print = Image3D(vis, 'mri')
    for smri, target in iter(trainset):
        print(smri.max(), smri.min())
        print(smri[:, :, 2:-2].shape)
        mri_print(str(target), smri[:, :, 2:-2], 10)
        exit()
コード例 #2
0
                                             title='AP_loss',
                                             ytickmin=0,
                                             ytinkmax=10)),
                    info_loss=Scalar(vis,
                                     'info_loss',
                                     opts=dict(showlegend=True,
                                               title='info_loss',
                                               ytickmin=0,
                                               ytinkmax=10)),
                    acc=Scalar(vis,
                               'Accuracy',
                               opts=dict(showlegend=True,
                                         title='Accuracy',
                                         ytickmin=0,
                                         ytinkmax=2.0)),
                    inputs=Image3D(vis, 'inputs'),
                    outputs=Image3D(vis, 'outputs'))

    # create train set,  x = image, y=target
    x, y, train_idx, test_idx, ratio = fold_split(FG)
    #transform=Compose([ToWoldCoordinateSystem(), Normalize((0.5, 0.9)), ToTensor()])
    transform = Compose([ToWoldCoordinateSystem(), ToTensor()])

    trainset = ADNIDataset(FG, x[train_idx], y[train_idx], transform=transform)
    testset = ADNIDataset(FG, x[test_idx], y[test_idx], transform=transform)

    trainloader = DataLoader(trainset,
                             batch_size=FG.batch_size,
                             shuffle=True,
                             pin_memory=True,
                             num_workers=4)
コード例 #3
0
    for i in range(sample_num):
        sample_c[i, 2] = temp_c[i % len(temp_c)]

    idx = 0
    for i in range(len(temp_c)**2):
        for j in range(len(temp_c)):
            sample_c[i * len(temp_c) + j, 1] = temp_c[idx % len(temp_c)]
        idx += 1

    sample_c = sample_c.cuda(device, non_blocking=True)
    gi_c1 = G(sample_z, sample_c, FG.axis)
    for i in range(sample_num):
        name = 'target_' + str(i) + ':' + str(
            sample_c[i, 0].item()) + ':' + str(
                sample_c[i, 1].item()) + ':' + str(sample_c[i, 2].item())
        save_printer1 = Image3D(vis, name)
        save_printer1(name, torch.clamp(gi_c1[i, :, :, :], min=0, max=1))
    exit()
    ############################################################################

    # create latent codes
    discrete_code = FG.d_code
    continuous_code = FG.c_code
    z_dim = FG.z
    sample_num = 400
    sample_c = torch.zeros((sample_num, continuous_code))
    sample_c = sample_c.cuda(device, non_blocking=True)

    sample_z = torch.zeros((400, z_dim))
    for k in range(400):
        sample_z[k] = torch.rand(1, z_dim)
コード例 #4
0
    model.eval()
    G.eval()
    torch.set_grad_enabled(False)
    ############################ original : test #############################
    test_scores = ScoreReport()
    test_scores.clear()
    print('original image result')
    for sample in testloader:
        mri = sample['image']
        target = sample['target']
        mri = mri.cuda(device, non_blocking=True)
        target = target.to(device)

        name = 'ori-target'
        save_printer1 = Image3D(vis, name)
        save_printer1(name, torch.clamp(mri[0, :, :, :], min=0, max=1))

        output = model(mri)

        score = torch.nn.functional.softmax(output, 1)
        test_scores.update_true(target)
        test_scores.update_score(score)

    print('accuracy:', test_scores.accuracy)

    ############################ cont test ##################################
    # create latent codes
    discrete_code = FG.d_code
    continuous_code = FG.c_code
    z_dim = FG.z
コード例 #5
0
                                         title='D_x',
                                         ytickmin=0,
                                         ytinkmax=2.0)),
                    ofake=Scalar(vis,
                                 'Fake output',
                                 opts=dict(showlegend=True,
                                           title='Fake output',
                                           ytickmin=0,
                                           ytinkmax=2.0)),
                    oreal=Scalar(vis,
                                 'Real output',
                                 opts=dict(showlegend=True,
                                           title='Real output',
                                           ytickmin=0,
                                           ytinkmax=2.0)),
                    inputs=Image3D(vis, 'inputs'),
                    fake=Image3D(vis, 'fake'),
                    valid=Image3D(vis, 'valid'),
                    outputs=Image3D(vis, 'outputs'),
                    outputs2=Image3D(vis, 'outputs2'))

    # x, y = Trainset(FG)      # x = image, y=target
    x, y, train_idx, test_idx, ratio = fold_split(FG)
    # transform=Compose([ToFloatTensor(), Normalize(0.5,0.5)])
    # trainset = ADNIDataset2D(FG, x[train_idx], y[train_idx], transform=transform)
    # testset = ADNIDataset2D(FG, x[test_idx], y[test_idx], transform=transform)

    transform = Compose([
        ToWoldCoordinateSystem(),
        ToTensor(),
        Pad(1, 0, 1, 0, 1, 0),
コード例 #6
0
            showlegend=True, title='lr', ytickmin=0, ytinkmax=2.0)),
        D_loss = Scalar(vis, 'D_loss', opts=dict(
            showlegend=True, title='D loss', ytickmin=0, ytinkmax=2.0)),
        G_loss = Scalar(vis, 'G_loss', opts=dict(
            showlegend=True, title='G loss', ytickmin=0, ytinkmax=10)),
        AC_loss = Scalar(vis, 'AC_loss', opts=dict(
            showlegend=True, title='AC loss', ytickmin=0, ytinkmax=10)),
        info_loss = Scalar(vis, 'info_loss', opts=dict(
            showlegend=True, title='info_loss', ytickmin=0, ytinkmax=10)),
        DG_z1 = Scalar(vis, 'DG_z1', opts=dict(
            showlegend=True, title='DG_z1', ytickmin=0, ytinkmax=2.0)),
        DG_z2 = Scalar(vis, 'DG_z2', opts=dict(
            showlegend=True, title='DG_z2', ytickmin=0, ytinkmax=2.0)),
        D_x = Scalar(vis, 'D_x', opts=dict(
            showlegend=True, title='D_x', ytickmin=0, ytinkmax=2.0)),
        inputs0 = Image3D(vis, 'inputs0'),
        inputs1 = Image3D(vis, 'inputs1'),
        fake0 = Image3D(vis, 'fake0'),
        fake1 = Image3D(vis, 'fake1'),
        outputs0 = Image3D(vis, 'outputs0'),
        outputs1 = Image3D(vis, 'outputs1'))

    # dataset setting
    x, y = Trainset(FG)
    # x, y, train_idx, test_idx, ratio = fold_split(FG)
    # transform = Compose([ToFloatTensor(), Normalize(0.5,0.5)])
    # trainset = ADNIDataset2D(FG, x, y, transform=transform)
    transform=Compose([ToWoldCoordinateSystem(), ToTensor(), Pad(1,0,1,0,1,0), Normalize(0.5,0.5)])
    trainset = ADNIDataset(FG, x, y, transform=transform)
    trainloader = DataLoader(trainset, batch_size=FG.batch_size,
                             shuffle=True, pin_memory=True)
コード例 #7
0
                                         title='acc',
                                         ytickmin=0,
                                         ytinkmax=3)),
                    accuracy=Scalar(vis,
                                    'accuracy',
                                    opts=dict(showlegend=True,
                                              title='accuracy',
                                              ytickmin=0,
                                              ytinkmax=3)),
                    predict=Scalar(vis,
                                   'predict',
                                   opts=dict(showlegend=True,
                                             title='predict',
                                             ytickmin=0,
                                             ytinkmax=3)),
                    output0=Image3D(vis, 'output0'),
                    output1=Image3D(vis, 'output1'),
                    output2=Image3D(vis, 'output2'),
                    output3=Image3D(vis, 'output3'),
                    output4=Image3D(vis, 'output4'),
                    output5=Image3D(vis, 'output5'),
                    output6=Image3D(vis, 'output6'),
                    output7=Image3D(vis, 'output7'),
                    output8=Image3D(vis, 'output8'),
                    output9=Image3D(vis, 'output9'))

    # transforms to apply to the data
    trans = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
コード例 #8
0
ファイル: main_dcgan.py プロジェクト: christy4526/mri_biGAN
                                           title='DG_z1',
                                           ytickmin=0,
                                           ytinkmax=2.0)),
                    DG_z2=Scalar(vis,
                                 'DG_z2',
                                 opts=dict(showlegend=True,
                                           title='DG_z2',
                                           ytickmin=0,
                                           ytinkmax=2.0)),
                    D_x=Scalar(vis,
                               'D_x',
                               opts=dict(showlegend=True,
                                         title='D_x',
                                         ytickmin=0,
                                         ytinkmax=2.0)),
                    inputs=Image3D(vis, 'inputs'),
                    fake=Image3D(vis, 'fake'),
                    valid=Image3D(vis, 'valid'),
                    outputs=Image3D(vis, 'outputs'),
                    outputs2=Image3D(vis, 'outputs2'))

    x, y = Trainset(FG)  # x = image, y=target
    transform = Compose([ToFloatTensor(), Normalize(0.5, 0.5)])
    trainset = ADNIDataset2D(FG, x, y, transform=transform)
    trainloader = DataLoader(trainset,
                             batch_size=FG.batch_size,
                             shuffle=True,
                             pin_memory=True)

    D = dcDiscriminator2D(FG).to('cuda:{}'.format(
        FG.devices[0]))  # discriminator net D(x, z)
コード例 #9
0
        y_pred = get_ensambled_y_preds(outputs)

        # confidence = ...
        confidence = torch.zeros(1).cuda(device, non_blocking=True)
        temp = torch.zeros(5, 2).cuda(device, non_blocking=True)
        for j in range(len(nets)):
            temp[j] += outputs[j][0]
        if y_pred.item() == 0:
            confidence = temp[:, 0].max()
        else:
            confidence = 1 - temp[:, 1].max()
        #print(confidence.item())
        print(y_pred.item())

        c = c[0]
        #print((confidence.item(),)+c[0])
        result.append((confidence.item(), ) + cc)

        title = 'c' + str(count) + '>' + str(c.squeeze().tolist())
        im = im * 0.5 + 0.5
        ip = Image3D(vis, title)
        # print(im.shape)
        ip(title, im[:, :, :])
        # vis.image(im.squeeze(), win='si', opts=dict(title=title))

        # time.sleep(0.3)
        # input()
        vis.save([vis.env])

    np.savetxt('Result-256-1.txt', result, delimiter=',')
コード例 #10
0
    timer = SimpleTimer()

    FG.save_dir = str(FG.vis_env)
    if not os.path.exists(FG.save_dir):
            os.makedirs(FG.save_dir)

    printers = dict(
        lr = Scalar(vis, 'lr', opts=dict(
            showlegend=True, title='lr', ytickmin=0, ytinkmax=1.0)),
        D_loss = Scalar(vis, 'D_loss', opts=dict(
            showlegend=True, title='D loss', ytickmin=0, ytinkmax=2.0)),
        G_loss = Scalar(vis, 'G_loss', opts=dict(
            showlegend=True, title='G loss', ytickmin=0, ytinkmax=10)),
        info_loss = Scalar(vis, 'info_loss', opts=dict(
            showlegend=True, title='info loss', ytickmin=0, ytinkmax=10)),
        input = Image3D(vis, 'input'),
        output = Image3D(vis, 'output'),
        cont_output = Image3D(vis, 'cont_output'),
        disc_output = Image3D(vis, 'disc_output'))

    # create train set
    x, y = Trainset(FG)      # x = image, y=target
    if FG.gm == 'true':
        transform=Compose([ToWoldCoordinateSystem(), ToTensor()])
    else :
        transform=Compose([ToWoldCoordinateSystem(), Normalize(0.2,0.9), ToTensor()])
    trainset = ADNIDataset(FG, x, y, transform=transform)
    trainloader = DataLoader(trainset, batch_size=FG.batch_size,
                             shuffle=True, pin_memory=True,
                             num_workers=4)
コード例 #11
0
ファイル: test.py プロジェクト: christy4526/mri_biGAN
        for t2 in range(len(temp_c)):
            C[:, 1] = temp_c[t2]
            for t3 in range(5):
                for t in range(5):
                    C[t3*5+t, 2] = temp_c[t3]
            for s in range(sample_num):
                C[s, 3] = temp_c[s%5]

            vis = Visdom(port=FG.vis_port, env='Test-c4-'+str(FG.axis))
            gi_c = G(Z, C, FG.axis)

            for s in range(sample_num):
                name = 'target:'+str(C[s, 0].item())+':'+str(C[s, 1].item())
                # name = 'target:'+str(C[s, 0].item())+':'+str(C[s, 1].item())+\
                #     ':'+str(C[s, 2].item())+':'+str(C[s, 3].item())
                save_printer = Image3D(vis, name)
                save_printer(name, ((gi_c*0.5)+0.5)[s,:,:,:])

            gi = gi_c.squeeze() #.cuda(device, non_blocking=True)
            gi = gi_transforms(gi)
            gi = gi.view(gi.size(0)*gi.size(1), 1,*gi.shape[2:])

            x, targets = process_batch((gi, targets), device)
            outputs = list(map(lambda net: get_confidence(net, x), nets))
            y_pred = get_ensambled_y_preds(outputs)

            out_max = torch.zeros(sample_num, 1).cuda(device, non_blocking=True)
            for i in range(sample_num):
                temp=torch.zeros(5,2).cuda(device, non_blocking=True)
                for j in range(len(nets)):
                    temp[j] += outputs[j][i]
コード例 #12
0
    def __init__(self, FG, SUPERVISED=True):
        # parameters
        self.num_epoch = FG.num_epoch
        self.batch_size = FG.batch_size
        self.save_dir = FG.save_dir
        self.result_dir = FG.result_dir
        self.dataset = 'MRI'
        self.log_dir = FG.log_dir
        self.model_name = 'infoGAN'
        self.input_size = FG.input_size
        self.z_dim = FG.z
        self.SUPERVISED = SUPERVISED        # if it is true, label info is directly used for code
        self.len_discrete_code = 10         # categorical distribution (i.e. label)
        self.len_continuous_code = 2        # gaussian distribution (e.g. rotation, thickness)
        self.sample_num = self.len_discrete_code ** 2
        
        # torch setting
        self.device = torch.device('cuda:{}'.format(FG.devices[0]))
        torch.cuda.set_device(FG.devices[0])
        timer = SimpleTimer()

        # load dataset
        x, y = Trainset(FG)      # x = image, y=target
        trainset = ADNIDataset(FG, x, y, cropping=NineCrop((40,40,40),(32,32,32)),
                               transform=Compose([Lambda(lambda patches: torch.stack([ToTensor()(patch) for patch in patches]))]))     
        self.trainloader = DataLoader(trainset, batch_size=self.batch_size,
                                 shuffle=True, pin_memory=True,
                                 num_workers=4)
        #self.data_loader = dataloader(self.dataset, self.input_size, self.batch_size)
        #data = self.trainloader
        for _, data in enumerate(self.trainloader):
            data = data['image']
            break

        # networks init
        self.G = generator(input_dim=self.z_dim, output_dim=data.shape[1],
                           input_size=self.input_size, len_discrete_code=self.len_discrete_code,
                           len_continuous_code=self.len_continuous_code).to('cuda:{}'.format(FG.devices[0]))
        self.D = discriminator(input_dim=data.shape[1], output_dim=1, input_size=self.input_size,
                               len_discrete_code=self.len_discrete_code, len_continuous_code=self.len_continuous_code).to('cuda:{}'.format(FG.devices[0]))
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=FG.lrG, betas=(FG.beta1, FG.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=FG.lrD, betas=(FG.beta1, FG.beta2))
        self.info_optimizer = optim.Adam(itertools.chain(self.G.parameters(), self.D.parameters()), lr=FG.lrD, betas=(FG.beta1, FG.beta2))

        if len(FG.devices) != 1:
            self.G = torch.nn.DataParallel(self.G, FG.devices)
            self.D = torch.nn.DataParallel(self.D, FG.devices)
        self.BCE_loss = nn.BCELoss().to('cuda:{}'.format(FG.devices[0]))
        self.CE_loss = nn.CrossEntropyLoss().to('cuda:{}'.format(FG.devices[0]))
        self.MSE_loss = nn.MSELoss().to('cuda:{}'.format(FG.devices[0]))

        print('---------- Networks architecture -------------')
        ori_utils.print_network(self.G)
        ori_utils.print_network(self.D)
        print('-----------------------------------------------')

        # fixed noise & condition
        self.sample_z = torch.zeros((self.sample_num, self.z_dim))
        for i in range(self.len_discrete_code):
            self.sample_z[i * self.len_discrete_code] = torch.rand(1, self.z_dim)
            for j in range(1, self.len_discrete_code):
                self.sample_z[i * self.len_discrete_code + j] = self.sample_z[i * self.len_discrete_code]

        temp = torch.zeros((self.len_discrete_code, 1))
        for i in range(self.len_discrete_code):
            temp[i, 0] = i

        temp_y = torch.zeros((self.sample_num, 1))
        for i in range(self.len_discrete_code):
            temp_y[i * self.len_discrete_code: (i + 1) * self.len_discrete_code] = temp

        self.sample_y = torch.zeros((self.sample_num, self.len_discrete_code)).scatter_(1, temp_y.type(torch.LongTensor), 1)
        self.sample_c = torch.zeros((self.sample_num, self.len_continuous_code))

        # manipulating two continuous code
        #self.sample_z2 = torch.rand((1, self.z_dim)).expand(self.sample_num, self.z_dim)
        self.sample_z2 = torch.zeros((self.sample_num, self.z_dim))
        z2 = torch.rand(1, self.z_dim)
        for i in range(self.sample_num):
            self.sample_z2[i] = z2
        
        self.sample_y2 = torch.zeros(self.sample_num, self.len_discrete_code)
        self.sample_y2[:, 0] = 1

        temp_c = torch.linspace(-1, 1, 10)
        self.sample_c2 = torch.zeros((self.sample_num, 2))
        for i in range(self.len_discrete_code):
            for j in range(self.len_discrete_code):
                self.sample_c2[i*self.len_discrete_code+j, 0] = temp_c[i]
                self.sample_c2[i*self.len_discrete_code+j, 1] = temp_c[j]

        self.sample_z = self.sample_z.cuda(self.device, non_blocking=True)
        self.sample_y = self.sample_y.cuda(self.device, non_blocking=True) 
        self.sample_c = self.sample_c.cuda(self.device, non_blocking=True)
        self.sample_z2 = self.sample_z2.cuda(self.device, non_blocking=True)
        self.sample_y2 = self.sample_y2.cuda(self.device, non_blocking=True)
        self.sample_c2 = self.sample_c2.cuda(self.device, non_blocking=True)


        vis = Visdom(port=10002, env=str(FG.vis_env))

        self.printers = dict(
            D_loss = Scalar(vis, 'D_loss', opts=dict(
                showlegend=True, title='D loss', ytickmin=0, ytinkmax=2.0)),
            G_loss = Scalar(vis, 'G_loss', opts=dict(
                showlegend=True, title='G loss', ytickmin=0, ytinkmax=10)),
            info_loss = Scalar(vis, 'info_loss', opts=dict(
                showlegend=True, title='info loss', ytickmin=0, ytinkmax=10)),
            input = Image3D(vis, 'input'),
            input_fi = Image3D(vis, 'input_fi'),
            output = Image3D(vis, 'output'),
            output2 = Image3D(vis, 'output2'))

        self.timer = SimpleTimer()
コード例 #13
0
                                         title='acc',
                                         ytickmin=0,
                                         ytinkmax=3)),
                    accuracy=Scalar(vis,
                                    'accuracy',
                                    opts=dict(showlegend=True,
                                              title='accuracy',
                                              ytickmin=0,
                                              ytinkmax=3)),
                    predict=Scalar(vis,
                                   'predict',
                                   opts=dict(showlegend=True,
                                             title='predict',
                                             ytickmin=0,
                                             ytinkmax=3)),
                    train_input=Image3D(vis, 'train_input'),
                    test_input=Image3D(vis, 'test_input'),
                    output0=Image3D(vis, 'output0'),
                    output1=Image3D(vis, 'output1'),
                    output2=Image3D(vis, 'output2'))

    train_scores = ScoreReport()
    test_scores = ScoreReport()
    max_acc = 0
    min_loss = 0

    for epoch in range(FG.num_epoch):
        model.train()
        torch.set_grad_enabled(True)
        train_scores.clear()
        scheduler.step()