Пример #1
0
def predict(params):

    inverter = PrectNormalizer(xr.open_dataset(params['norm_fn']),
                               params['output_vars'],
                               params['input_transform'][0],
                               params['input_transform'][1],
                               params['var_cut_off'], params['model_type'])

    model = CVAE(params)
    optimizer = optim.Adam(model.parameters(), lr=0.00001, weight_decay=0.001)

    ### Load model
    checkpoint = torch.load('./runs/VAE_model_DNN_classifier_exp_VAE_Exp04.pt')
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    loss = checkpoint['loss']

    model.cuda()
    model.eval()

    valid_dataset = spcamDataset(params, phase="validation")
    valid_loader = DataLoader(valid_dataset,
                              sampler=SubsetSampler(valid_dataset.indices))

    result_predicted, result_actual = [], []
    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(valid_loader):

            target = target.squeeze(0).type(torch.float32).to(params['device'])

            z = torch.randn(data.shape[1], 16)

            z = torch.cat((z, data.squeeze(0)), dim=1).cuda()

            predPrecit = model.decoder(z)

            print("Batch MSE {}".format(
                metrics.mean_squared_error(
                    predPrecit.detach().cpu().numpy(),
                    target.squeeze(0).detach().cpu().numpy())))

            #val_loss = compute_loss(target, sampled_precit, mean, log_var) #.type(torch.FloatTensor).to(params['device']))
            #assert val_loss.requires_grad == False

            result_predicted.extend(predPrecit.cpu().detach().numpy())
            result_actual.extend(target.squeeze(0).cpu().detach().numpy())

    mse = metrics.mean_squared_error(np.array(result_actual),
                                     np.array(result_predicted))

    print("MSE {}".format(mse))
Пример #2
0
    cvae = CVAE(nz=opts.nz, imSize=64, fSize=opts.fSize)
    dis = DISCRIMINATOR(imSize=64, fSize=opts.fSize)

    if cvae.useCUDA:
        print 'using CUDA'
        cvae.cuda()
        dis.cuda()
    else:
        print '\n *** NOT USING CUDA ***\n'

    print cvae
    print dis

    ####### Define optimizer #######
    optimizerCVAE = optim.RMSprop(
        cvae.parameters(),
        lr=opts.lr)  #specify the params that are being upated
    optimizerDIS = optim.RMSprop(dis.parameters(), lr=opts.lr, alpha=opts.mom)

    ####### Create a new folder to save results and model info #######
    exDir = make_new_folder(opts.outDir)
    print 'Outputs will be saved to:', exDir
    save_input_args(exDir, opts)  #save training opts

    losses = {
        'total': [],
        'kl': [],
        'bce': [],
        'dis': [],
        'gen': [],
        'test_bce': [],
Пример #3
0
test_dataset = datasets.MNIST(
    './data',
    train=False,
    download=True,
    transform=transforms.Compose(
        [transforms.ToTensor()]
    )
)


train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size)

model = CVAE(input_size, hidden_size, latent_size, num_of_classes).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)

for epoch in tqdm.tqdm(range(n_epochs)):
  
  model.train()
  train_loss = 0
  
  for x, y in train_dataloader:
    
    x = x.view(-1, input_size).to(device)
    y = utils.y_to_onehot(y, batch_size, num_of_classes).to(device)
    
    optimizer.zero_grad()
    x_mu, x_logvar, z, z_mu, z_logvar = model(x, y)
    loss = model.loss_calc(x, x_mu, z_mu, z_logvar)
    loss.backward()
Пример #4
0
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')

transform = transforms.ToTensor()

dataset = torchvision.datasets.MNIST(root='../data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)

data_shape = dataset[0][0].shape[1:]
data_size = dataset[0][0].numel()

net = CVAE(1, data_shape, data_size, 20, 400, 10)

net = net.to(device)

optimizer = optim.Adam(net.parameters(), lr=opt.learning_rate)
summary = Summary()
iteration = 0

def loss_fn(inputs, outputs, mean, logvar):
    MSE = F.mse_loss(outputs.view(-1, data_size), inputs.view(-1, data_size), size_average=False)
    KLD = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())
    return MSE + KLD

def train():
    global iteration
    net.train()
    progress = tqdm(enumerate(trainloader), total=len(trainloader), ascii=True)
    for batch_idx, (inputs, targets) in progress:
        inputs, targets = inputs.to(device), targets.to(device)
        outputs, mean, logvar = net(inputs, targets)