Exemplo n.º 1
0
    vis = Visdom(port=FG.vis_port, env=str(FG.vis_env))
    vis.text(argument_report(FG, end='<br>'), win='config')

    # torch setting
    device = torch.device('cuda:{}'.format(FG.devices[0]))
    torch.cuda.set_device(FG.devices[0])
    timer = SimpleTimer()

    #save dir setting
    save_dir = str(FG.vis_env)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    #visdom printer setting
    printers = dict(
        lr = Scalar(vis, 'lr', opts=dict(
            showlegend=True, title='lr', ytickmin=0, ytinkmax=2.0)),
        D_loss = Scalar(vis, 'D_loss', opts=dict(
            showlegend=True, title='D loss', ytickmin=0, ytinkmax=2.0)),
        G_loss = Scalar(vis, 'G_loss', opts=dict(
            showlegend=True, title='G loss', ytickmin=0, ytinkmax=10)),
        AC_loss = Scalar(vis, 'AC_loss', opts=dict(
            showlegend=True, title='AC loss', ytickmin=0, ytinkmax=10)),
        info_loss = Scalar(vis, 'info_loss', opts=dict(
            showlegend=True, title='info_loss', ytickmin=0, ytinkmax=10)),
        DG_z1 = Scalar(vis, 'DG_z1', opts=dict(
            showlegend=True, title='DG_z1', ytickmin=0, ytinkmax=2.0)),
        DG_z2 = Scalar(vis, 'DG_z2', opts=dict(
            showlegend=True, title='DG_z2', ytickmin=0, ytinkmax=2.0)),
        D_x = Scalar(vis, 'D_x', opts=dict(
            showlegend=True, title='D_x', ytickmin=0, ytinkmax=2.0)),
        inputs0 = Image3D(vis, 'inputs0'),
Exemplo n.º 2
0
    FG = train_args()
    vis = Visdom(port=FG.vis_port, env=str(FG.vis_env))
    vis.text(argument_report(FG, end='<br>'), win='config')

    # torch setting
    device = torch.device('cuda:{}'.format(FG.devices[0]))
    torch.cuda.set_device(FG.devices[0])
    timer = SimpleTimer()

    FG.save_dir = str(FG.vis_env)
    if not os.path.exists(FG.save_dir):
        os.makedirs(FG.save_dir)

    printers = dict(lr=Scalar(vis,
                              'lr',
                              opts=dict(showlegend=True,
                                        title='lr',
                                        ytickmin=0,
                                        ytinkmax=2.0)),
                    D_loss=Scalar(vis,
                                  'D_loss',
                                  opts=dict(showlegend=True,
                                            title='D loss',
                                            ytickmin=0,
                                            ytinkmax=2.0)),
                    G_loss=Scalar(vis,
                                  'G_loss',
                                  opts=dict(showlegend=True,
                                            title='G loss',
                                            ytickmin=0,
                                            ytinkmax=10)),
                    AP_loss=Scalar(vis,
Exemplo n.º 3
0
    vis = Visdom(port=FG.vis_port, env=str(FG.vis_env))
    vis.text(argument_report(FG, end='<br>'), win='config')

    save_dir = str(FG.vis_env)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    # torch setting
    device = torch.device('cuda:{}'.format(FG.devices[0]))
    torch.cuda.set_device(FG.devices[0])
    timer = SimpleTimer()

    printers = dict(lr=Scalar(vis,
                              'lr',
                              opts=dict(showlegend=True,
                                        title='lr',
                                        ytickmin=0,
                                        ytinkmax=2.0)),
                    D_loss=Scalar(vis,
                                  'D_loss',
                                  opts=dict(showlegend=True,
                                            title='D loss',
                                            ytickmin=0,
                                            ytinkmax=2.0)),
                    G_loss=Scalar(vis,
                                  'G_loss',
                                  opts=dict(showlegend=True,
                                            title='G loss',
                                            ytickmin=0,
                                            ytinkmax=10)),
                    AC_loss=Scalar(vis,
Exemplo n.º 4
0
    SUPERVISED = 'True'
    len_discrete_code = 10
    len_continuous_code = 2
    #sample_num = len_discrete_code ** 2
    sample_num = 100

    DATA_PATH = 'data'
    MODEL_STORE_PATH = 'mnist_classify_'

    FG.save_dir = str(FG.vis_env)
    if not os.path.exists(FG.save_dir):
        os.makedirs(FG.save_dir)

    printers = dict(lr=Scalar(vis,
                              'learning_rate',
                              opts=dict(showlegend=True,
                                        title='learning rate',
                                        ytickmin=0,
                                        ytinkmax=1)),
                    loss=Scalar(vis,
                                'loss',
                                opts=dict(showlegend=True,
                                          title='loss',
                                          ytickmin=0,
                                          ytinkmax=3)),
                    loss_G=Scalar(vis,
                                  'loss_G',
                                  opts=dict(showlegend=True,
                                            title='loss_G',
                                            ytickmin=0,
                                            ytinkmax=3)),
                    acc=Scalar(vis,
Exemplo n.º 5
0
if __name__ == '__main__':
    FG = train_args()
    vis = Visdom(port=FG.vis_port, env=str(FG.vis_env))
    vis.text(argument_report(FG, end='<br>'), win='config')

    # torch setting
    device = torch.device('cuda:{}'.format(FG.devices[0]))
    torch.cuda.set_device(FG.devices[0])
    timer = SimpleTimer()

    FG.save_dir = str(FG.vis_env)
    if not os.path.exists(FG.save_dir):
            os.makedirs(FG.save_dir)

    printers = dict(
        lr = Scalar(vis, 'lr', opts=dict(
            showlegend=True, title='lr', ytickmin=0, ytinkmax=1.0)),
        D_loss = Scalar(vis, 'D_loss', opts=dict(
            showlegend=True, title='D loss', ytickmin=0, ytinkmax=2.0)),
        G_loss = Scalar(vis, 'G_loss', opts=dict(
            showlegend=True, title='G loss', ytickmin=0, ytinkmax=10)),
        info_loss = Scalar(vis, 'info_loss', opts=dict(
            showlegend=True, title='info loss', ytickmin=0, ytinkmax=10)),
        input = Image3D(vis, 'input'),
        output = Image3D(vis, 'output'),
        cont_output = Image3D(vis, 'cont_output'),
        disc_output = Image3D(vis, 'disc_output'))

    # create train set
    x, y = Trainset(FG)      # x = image, y=target
    if FG.gm == 'true':
        transform=Compose([ToWoldCoordinateSystem(), ToTensor()])
Exemplo n.º 6
0
import torch
import torch.nn as nn
from torchvision.transforms import Compose, Lambda
from torch import optim
# project packages
from summary import Scalar, Image3D
import itertools
import numpy as np

import matplotlib as mpl
import matplotlib.pylab as plt

if __name__ == '__main__':
    vis = Visdom(port=10002, env='result-plot')

    plot = Scalar(vis, 'c1', opts=dict(
        showlegend=True, title='c1', ytickmin=0, ytinkmax=1.0))

    plots = dict(
        c1 = Scalar(vis, 'c1', opts=dict(
            showlegend=True, title='c1')),
        c2 = Scalar(vis, 'c2', opts=dict(
            showlegend=True, title='c2')),
        c3 = Scalar(vis, 'c3', opts=dict(
            showlegend=True, title='c3')))
    #data = np.genfromtxt('sort-result0.txt')
    data = np.genfromtxt('sort-result0.txt', names=('n','c1', 'c2','c3','AD','NC'))
    print(data['n'],data['c1'],data['c2'],data['c3'],data['AD'],data['NC'])
    #exit()
    #plots['c1']('c1', data['c1'], data['n'], )
    for i in range(12500):
        #print('AD', data[i][1], data[i][4])
Exemplo n.º 7
0
    def __init__(self, FG, SUPERVISED=True):
        # parameters
        self.num_epoch = FG.num_epoch
        self.batch_size = FG.batch_size
        self.save_dir = FG.save_dir
        self.result_dir = FG.result_dir
        self.dataset = 'MRI'
        self.log_dir = FG.log_dir
        self.model_name = 'infoGAN'
        self.input_size = FG.input_size
        self.z_dim = FG.z
        self.SUPERVISED = SUPERVISED        # if it is true, label info is directly used for code
        self.len_discrete_code = 10         # categorical distribution (i.e. label)
        self.len_continuous_code = 2        # gaussian distribution (e.g. rotation, thickness)
        self.sample_num = self.len_discrete_code ** 2
        
        # torch setting
        self.device = torch.device('cuda:{}'.format(FG.devices[0]))
        torch.cuda.set_device(FG.devices[0])
        timer = SimpleTimer()

        # load dataset
        x, y = Trainset(FG)      # x = image, y=target
        trainset = ADNIDataset(FG, x, y, cropping=NineCrop((40,40,40),(32,32,32)),
                               transform=Compose([Lambda(lambda patches: torch.stack([ToTensor()(patch) for patch in patches]))]))     
        self.trainloader = DataLoader(trainset, batch_size=self.batch_size,
                                 shuffle=True, pin_memory=True,
                                 num_workers=4)
        #self.data_loader = dataloader(self.dataset, self.input_size, self.batch_size)
        #data = self.trainloader
        for _, data in enumerate(self.trainloader):
            data = data['image']
            break

        # networks init
        self.G = generator(input_dim=self.z_dim, output_dim=data.shape[1],
                           input_size=self.input_size, len_discrete_code=self.len_discrete_code,
                           len_continuous_code=self.len_continuous_code).to('cuda:{}'.format(FG.devices[0]))
        self.D = discriminator(input_dim=data.shape[1], output_dim=1, input_size=self.input_size,
                               len_discrete_code=self.len_discrete_code, len_continuous_code=self.len_continuous_code).to('cuda:{}'.format(FG.devices[0]))
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=FG.lrG, betas=(FG.beta1, FG.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=FG.lrD, betas=(FG.beta1, FG.beta2))
        self.info_optimizer = optim.Adam(itertools.chain(self.G.parameters(), self.D.parameters()), lr=FG.lrD, betas=(FG.beta1, FG.beta2))

        if len(FG.devices) != 1:
            self.G = torch.nn.DataParallel(self.G, FG.devices)
            self.D = torch.nn.DataParallel(self.D, FG.devices)
        self.BCE_loss = nn.BCELoss().to('cuda:{}'.format(FG.devices[0]))
        self.CE_loss = nn.CrossEntropyLoss().to('cuda:{}'.format(FG.devices[0]))
        self.MSE_loss = nn.MSELoss().to('cuda:{}'.format(FG.devices[0]))

        print('---------- Networks architecture -------------')
        ori_utils.print_network(self.G)
        ori_utils.print_network(self.D)
        print('-----------------------------------------------')

        # fixed noise & condition
        self.sample_z = torch.zeros((self.sample_num, self.z_dim))
        for i in range(self.len_discrete_code):
            self.sample_z[i * self.len_discrete_code] = torch.rand(1, self.z_dim)
            for j in range(1, self.len_discrete_code):
                self.sample_z[i * self.len_discrete_code + j] = self.sample_z[i * self.len_discrete_code]

        temp = torch.zeros((self.len_discrete_code, 1))
        for i in range(self.len_discrete_code):
            temp[i, 0] = i

        temp_y = torch.zeros((self.sample_num, 1))
        for i in range(self.len_discrete_code):
            temp_y[i * self.len_discrete_code: (i + 1) * self.len_discrete_code] = temp

        self.sample_y = torch.zeros((self.sample_num, self.len_discrete_code)).scatter_(1, temp_y.type(torch.LongTensor), 1)
        self.sample_c = torch.zeros((self.sample_num, self.len_continuous_code))

        # manipulating two continuous code
        #self.sample_z2 = torch.rand((1, self.z_dim)).expand(self.sample_num, self.z_dim)
        self.sample_z2 = torch.zeros((self.sample_num, self.z_dim))
        z2 = torch.rand(1, self.z_dim)
        for i in range(self.sample_num):
            self.sample_z2[i] = z2
        
        self.sample_y2 = torch.zeros(self.sample_num, self.len_discrete_code)
        self.sample_y2[:, 0] = 1

        temp_c = torch.linspace(-1, 1, 10)
        self.sample_c2 = torch.zeros((self.sample_num, 2))
        for i in range(self.len_discrete_code):
            for j in range(self.len_discrete_code):
                self.sample_c2[i*self.len_discrete_code+j, 0] = temp_c[i]
                self.sample_c2[i*self.len_discrete_code+j, 1] = temp_c[j]

        self.sample_z = self.sample_z.cuda(self.device, non_blocking=True)
        self.sample_y = self.sample_y.cuda(self.device, non_blocking=True) 
        self.sample_c = self.sample_c.cuda(self.device, non_blocking=True)
        self.sample_z2 = self.sample_z2.cuda(self.device, non_blocking=True)
        self.sample_y2 = self.sample_y2.cuda(self.device, non_blocking=True)
        self.sample_c2 = self.sample_c2.cuda(self.device, non_blocking=True)


        vis = Visdom(port=10002, env=str(FG.vis_env))

        self.printers = dict(
            D_loss = Scalar(vis, 'D_loss', opts=dict(
                showlegend=True, title='D loss', ytickmin=0, ytinkmax=2.0)),
            G_loss = Scalar(vis, 'G_loss', opts=dict(
                showlegend=True, title='G loss', ytickmin=0, ytinkmax=10)),
            info_loss = Scalar(vis, 'info_loss', opts=dict(
                showlegend=True, title='info loss', ytickmin=0, ytinkmax=10)),
            input = Image3D(vis, 'input'),
            input_fi = Image3D(vis, 'input_fi'),
            output = Image3D(vis, 'output'),
            output2 = Image3D(vis, 'output2'))

        self.timer = SimpleTimer()