def extractFlow(self, path, tube, h, w, bb):
        flow_frame = torch.zeros((64, 2, 240, 320))
        path = path + "/flows/"
        os.mkdir(path)

        print(tube.shape)
        for index in range(1, 64):
            data = skimage.transform.resize(
                tube[0, :, index - 1, :, :].cpu().numpy(), (3, 240, 320))
            x1 = torch.from_numpy(data).cuda()
            data = skimage.transform.resize(
                tube[0, :, index, :, :].cpu().numpy(), (3, 240, 320))
            x2 = torch.from_numpy(data).cuda()
            print(x2.type())
            u1, u2, _ = of(x2.unsqueeze(0), x1.unsqueeze(0), need_result=True)

            data = u1.detach()[0, 0, bb[index][1]:bb[index][3],
                               bb[index][0]:bb[index][2]]
            data = skimage.transform.resize(data.cpu().numpy(),
                                            (1, 1, 224, 224))
            flow_frame[index, 0, :, :] = u1  # torch.from_numpy(data)

            data = u2.detach()[0, 0, bb[index][1]:bb[index][3],
                               bb[index][0]:bb[index][2]]
            data = skimage.transform.resize(data.cpu().numpy(),
                                            (1, 1, 224, 224))
            flow_frame[index, 1, :, :] = u2  # torch.from_numpy(data)

            im = visualizeFlow(flow_frame[index, :, :, :])
            print(im.shape)
            imsave(path + str(index).zfill(7) + ".tiff", im)

        np.save(path + "flows.npy", flow_frame.numpy())

        return flow_frame
def attack(model, dataset, n_samples, method):

    model.eval()
    adversarial_attacks = []

    for data, target in tqdm(dataset):
        data = data.unsqueeze(0)
        target = torch.tensor(target).unsqueeze(0)

        if torch.cuda.is_available():
            data, target = data.cuda(), target.cuda()
            device = 'cuda'
        else:
            data, target = data.cpu(), target.cpu()
            device = 'cpu'

        samples_attacks = []

        for idx in list(range(n_samples)):
            random.seed(idx)
            perturbed_image = run_attack(net=model,
                                         image=data,
                                         label=target,
                                         method=method,
                                         device=device,
                                         hyperparams=None).squeeze()
            perturbed_image = torch.clamp(perturbed_image, 0., 1.)
            samples_attacks.append(perturbed_image)

        adversarial_attacks.append(torch.stack(samples_attacks).mean(0))

    return torch.stack(adversarial_attacks)
Example #3
0
    def get_seq_from_z(self, z):
        parameters = pickle.load(open("Outputs/SH3.db", 'rb'))
        q_n = parameters['q_n']
        index = parameters['index']
        v_traj_onehot = parameters['onehot']

        N = np.size(v_traj_onehot, axis=0)
        q = np.size(v_traj_onehot, axis=1)
        n = np.size(q_n)
        z_gen = [z]

        data = torch.FloatTensor(z_gen).to(device)
        data = model.decode(data)
        v_gen = data.cpu().detach().numpy()
        sample_list = []

        for i in range(len(z_gen)):  # number of sampling points
            v_samp_nothot = toolkit.sample_seq(0, q, n, q_n, i, v_gen)
            sample_list.append(v_samp_nothot)
            sequences = toolkit.convert_alphabet(np.array(sample_list), index,
                                                 q_n)

        sequence = sequences[0][:16] + "DD" + sequences[0][16:]
        sequence = sequence[:44] + "A" + sequence[44:]
        return (sequence)
Example #4
0
def evaluate(args, model, val_loader):
    model.eval()
    correct = 0
    output_list = []
    labels_list = []
    with torch.no_grad():
        begin = time.time()
        for data, target in val_loader:
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()
            else:
                data, target = data.cpu(), target.cpu()
            output = model(data)
            output = torch.nn.functional.softmax(output, dim=1)
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()
            output_list.append(output)
            labels_list.append(target)
        end = time.time()
        print("inference throughput: ", 10000 / (end - begin), " images/s")
    output = torch.cat(output_list)
    target = torch.cat(labels_list)
    print('\nTest Accuracy: {:.2f}%\n'.format(100. * correct /
                                              len(val_loader.dataset)))
    target_labels = target.cpu().data.numpy()
    np.save('../experiments/bayesian_torch/probs_cifar_det.npy', output.data.cpu().numpy())
    np.save('../experiments/bayesian_torch/cifar_test_labels.npy', target.data.cpu().numpy())
def visualize(data, title):
    input_tensor = data.cpu()
    in_grid = convert_image_np(torchvision.utils.make_grid(input_tensor))
    # Plot the results side-by-side
    plt.imshow(in_grid)
    plt.title(title)
    plt.show()
Example #6
0
def test(epoch):
    with experirment.test():
        model.eval()
        test_loss = 0
        n = 0
        for batch_idx, (data, ohe) in enumerate(test_loader):
            data = data.cuda()
            ohe = ohe.cuda()

            recon_batch, mu, logvar = model(data)
            loss = loss_function(recon_batch, ohe, mu, logvar)
            test_loss += loss.item()
            n += 1
            experirment.log_metric('loss', loss.item())

            num_right = 0
            _, preds = torch.max(recon_batch, dim=2)
            for i in range(recon_batch.shape[0]):
                num_right += int(torch.equal(preds[i, ...], data[i, ...]))
            experirment.log_metric(
                'accuracy',
                float(num_right) / float(recon_batch.shape[0]))

            if batch_idx % log_interval == 0:
                preds = preds.cpu().numpy()
                targets_copy = data.cpu().numpy()
                for i in range(4):
                    sample = preds[i, ...]
                    target = targets_copy[i, ...]
                    print("ORIG: {}\nNEW : {}".format(
                        "".join([charset[chars] for chars in target]),
                        "".join([charset[chars] for chars in sample])))

        print('test', test_loss / len(test_loader))
        return float(test_loss) / float(n)
Example #7
0
    def get_log_frequency(root="data/"):
        WikiTextDataset._load_datasets(root=root)
        data = WikiTextDataset.TEXT.numericalize(
            [WikiTextDataset.DATASET_SETS["train"][0].text]).squeeze(dim=0)
        word_bincount = np.bincount(data.cpu().numpy())
        word_bincount += 1  # Smoothing
        print("Maximum word count: %i" % (word_bincount.max()))
        print("Minimum word count: %i" % (word_bincount.min()))
        print(word_bincount[-10:])
        word_probs = word_bincount * 1.0 / word_bincount.sum(keepdims=True)
        word_bincount = np.log2(word_bincount.astype(np.float32)) - np.log2(
            word_bincount.sum(keepdims=True).astype(np.float32))
        printed_zero = False
        for key in WikiTextDataset.DATASET_VOCAB.stoi.keys():
            val = WikiTextDataset.DATASET_VOCAB.stoi[key]
            if (val == 0 and not printed_zero) or \
               (val == 1) or \
               (val != 0 and word_probs[val] > 0.001) or \
               (val > 10000 and val < 10010) or \
               (val > 15000 and val < 15010) or \
               (val > 19900):
                printed_zero = printed_zero or (val == 0)
                print("%s (%s): %4.2f%%, log %4.2f" %
                      (key, str(val), word_probs[val] * 100.0,
                       word_bincount[val]))

        print("-" * 20)
        print("Unigram Bpc: %4.2f" % (word_probs * (-word_bincount)).sum())
        return word_bincount
Example #8
0
 def to_cpu(data):
     if isinstance(data, dict):
         for k, v in data.items():
             data[k] = to_cpu(v)
         return data
     if isinstance(data, torch.Tensor):
         return data.cpu()
     return data
Example #9
0
def MyPlotFuc(data):
    import matplotlib.pyplot as plt
    import numpy as np
    data = data.cpu()
    data = data.numpy()
    data = np.where(data > 0)
    plt.xlim(0, 128)
    plt.ylim(0, 128)
    plt.scatter(data[0], data[1], s=5)
    plt.show()
Example #10
0
 def animate(i):
     view = block_rot.get_view()
     # Set skip index to anything > 15 to make sure nothing is skipped
     x_mu = model.generate(x_real, v_real, view, 44, 99)
     data = x_mu.squeeze(0)
     data = data.repeat(3, 1, 1)
     data = data.permute(1, 2, 0)
     data = data.cpu().detach()
     im.set_data(data)
     return im
Example #11
0
def de_normalization(data):
    data=data.cpu()

    mm=MinMaxScaler()
    train=TrainData('./data/airport',label_dir='./data/label')
    
    label_data=np.array(train.label_data).reshape(-1,1)

    train_label=mm.fit_transform(label_data)

    return mm.transform(data),train.label_data[910]
Example #12
0
def generate(model, seed, q, q_n, n, d, device, n_gen, n_samp, thresh):
    np.random.seed(seed)
    z_gen = np.random.normal(0., 1., (n_gen, d)) #generate normal distribution of random numbers
    data = torch.FloatTensor(z_gen).to(device)
    data = model.decode(data) # Use the decoding layer to generate new sequences.
    v_gen = data.cpu().detach().numpy()
    sample_list = []
    for i in range(n_gen):
        for k in range(n_samp):
            v_samp_nothot = sample_seq(seed+k, q, n, q_n, i, v_gen)
            sample_list.append(v_samp_nothot)
    return sample_list
Example #13
0
def save_nii(data, save_folder, name, mask=None):
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    nifti_affine = np.array([[1,0,0,1], [0,1,0,1], [0,0,1,1], [0,0,0,1]], dtype=np.float)

    data = data.cpu().detach().squeeze().numpy()
    data = np.fliplr(data)
    data = np.pad(data, ((2, 2), (6, 7), (6, 7)), mode='constant')
    if mask is not None:
        data = data * mask
    nifti = nib.Nifti1Image(data, affine=nifti_affine)
    nib.save(nifti, os.path.join(save_folder, name + '.nii'))
def visualize_stn():
    with torch.no_grad():
        # Get a batch of training data
        data = next(iter(test_loader))[0].to(device)

        input_tensor = data.cpu()
        in_grid = convert_image_np(torchvision.utils.make_grid(input_tensor, nrow=4))
        out_grid = convert_image_np(torchvision.utils.make_grid(input_tensor, nrow=4))

        # Plot the results side-by-side
        f, axarr = plt.subplots()
        axarr.imshow(torchvision.utils.make_grid(input_tensor, nrow=4))
        axarr.set_title('Dataset Images')
Example #15
0
def test(frac_anom, beta_val):
    model.eval()
    test_loss_total = 0
    test_loss_anom = 0
    num_anom = 0
    with torch.no_grad():
        for i, (data, data_lab) in enumerate(test_loader):
            #        data = (data.gt(0.5).type(torch.FloatTensor)).to(device)
            data = (data).to(device)

            recon_batch, mu, logvar = model(data)
            anom_lab = data_lab == 10
            num_anom += np.sum(anom_lab.numpy())  # count number of anomalies
            anom_lab = (anom_lab[:, None].float()).to(device)

            test_loss_anom += MSE_loss(recon_batch * anom_lab,
                                       data * anom_lab).item()
            test_loss_total += MSE_loss(recon_batch, data).item()

            if i == 0:
                n = min(data.size(0), 100)
                #samp = np.arange(200)  # [
                #1 - 1, 101 - 1, 5 - 1, 7 - 1, 15 - 1, 109 - 1, 120 - 1,
                #   26 - 1, 30 - 1, 33 - 1
                # ]  #np.arange(200) #[4, 14, 50, 60, 25, 29, 32, 65]
                samp = [4, 14, 50, 60, 25, 29, 32, 65]
                comparison = torch.cat([
                    data.view(len(recon_batch), 1, 28, 28)[samp],
                    recon_batch.view(len(recon_batch), 1, 28, 28)[samp]
                ])
                save_image(
                    comparison.cpu(),
                    '/big_disk/akrami/git_repos_new/fair_VAE/results/neg/fashion_mnist_recon_shallow_'
                    + str(beta_val) + '_' + str(frac_anom) + '.png',
                    nrow=n)

        np.savez(
            '/big_disk/akrami/git_repos_new/fair_VAE/results/neg/fashion_mnist_'
            + str(beta_val) + '_' + str(frac_anom) + '.npz',
            recon=recon_batch.cpu(),
            data=data.cpu(),
            anom_lab=anom_lab.cpu())

    test_loss_normals = (test_loss_total - test_loss_anom) / (
        len(test_loader.dataset) - num_anom)
    test_loss_anom /= num_anom
    test_loss_total /= len(test_loader.dataset)

    print('====> Test set loss: {:.4f}'.format(test_loss_total))

    return test_loss_total, test_loss_anom, test_loss_normals
Example #16
0
def evaluate(model, val_loader, corrupt=None, level=None):
    pred_probs_mc = []
    test_loss = 0
    correct = 0
    with torch.no_grad():
        pred_probs_mc = []
        for batch_idx, (data, target) in enumerate(val_loader):
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()
            else:
                data, target = data.cpu(), target.cpu()
            for mc_run in range(args.num_monte_carlo):
                model.eval()
                output, _ = model.forward(data)
                pred_probs = torch.nn.functional.softmax(output, dim=1)
                pred_probs_mc.append(pred_probs.cpu().data.numpy())

        if corrupt == 'ood':
            np.save(args.log_dir + '/preds/svi_avu_ood_probs.npy',
                    pred_probs_mc)
            print('saved predictive probabilities')
            return None

        target_labels = target.cpu().data.numpy()
        pred_mean = np.mean(pred_probs_mc, axis=0)
        #print(pred_mean)
        Y_pred = np.argmax(pred_mean, axis=1)
        test_acc = (Y_pred == target_labels).mean()
        #brier = np.mean(calib.brier_scores(target_labels, probs=pred_mean))
        #ece = calib.expected_calibration_error_multiclass(pred_mean, target_labels)
        print('Test accuracy:', test_acc * 100)
        #print('Brier score: ', brier)
        #print('ECE: ', ece)
        if corrupt is not None:
            np.save(
                args.log_dir +
                '/preds/svi_avu_corrupt-static-{}-{}_probs.npy'.format(
                    corrupt, level), pred_probs_mc)
            np.save(
                args.log_dir +
                '/preds/svi_avu_corrupt-static-{}-{}_labels.npy'.format(
                    corrupt, level), target_labels)
            print('saved predictive probabilities')
        elif corrupt == 'test':
            np.save(args.log_dir + '/preds/svi_avu_test_probs.npy',
                    pred_probs_mc)
            np.save(args.log_dir + '/preds/svi_avu_test_labels.npy',
                    target_labels)
            print('saved predictive probabilities')
    return test_acc
Example #17
0
def train(epoch):
    global batches_loss_array
    global epochs_loss_array
    global least_error
    model.train()
    train_loss = 0
    for batch_idx, data in enumerate(train_loader):
        #print('next batch')
        data = data.to(device)
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)

        if torch.isnan(recon_batch).any().item():
            print("got a nan array")
            print(recon_batch)
            print(mu)
            print(logvar)
            raise Exception('nan value tensor')

        np.save('fail_recon_x.npy', recon_batch.detach().cpu().numpy())
        np.save('fail_x.npy', data.cpu().numpy())

        loss = loss_function(recon_batch, data, mu, logvar)
        train_loss += loss.item()
        loss.backward()

        #model_params = filter(lambda p: p.requires_grad, model.parameters())
        #grad_vector = np.concatenate([p.grad.cpu().numpy().flatten() for p in model_params])

        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.2f} '.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
            batches_loss_array = np.append(batches_loss_array, loss.item())

    avg_train_loss = train_loss / len(train_loader.dataset)

    print('====> Epoch: {} Average Training loss: {:.2f}'.format(
        epoch, avg_train_loss))
    epochs_loss_array = np.append(epochs_loss_array, avg_train_loss)

    if least_error == -1 or least_error > avg_train_loss:
        PATH = os.path.join(output_dir, 'vae_model.pt')

        torch.save(model.state_dict(), PATH)

        least_error = avg_train_loss
Example #18
0
def test(frac_anom, beta_val):
    model.eval()
    test_loss_total = 0
    test_loss_anom = 0
    num_anom = 0
    with torch.no_grad():
        for i, (data, data_lab) in enumerate(test_loader):
            #        data = (data.gt(0.5).type(torch.FloatTensor)).to(device)
            data = (data).to(device)

            recon_batch, mu = model(data)
            #recon_batch = torch.tensor(recon_batch > 0.5).float()
            anom_lab = data_lab == 10
            num_anom += np.sum(anom_lab.numpy())  # count number of anomalies
            anom_lab = (anom_lab[:, None].float()).to(device)

            test_loss_anom += MSE_loss(recon_batch * anom_lab,
                                       data * anom_lab).item()
            test_loss_total += MSE_loss(recon_batch, data).item()

            if i == 0:
                n = min(data.size(0), 100)
                samp = [96, 97, 99, 90, 14, 35, 53, 57]
                comparison = torch.cat([
                    data.view(len(recon_batch), 1, 28, 28)[samp],
                    recon_batch.view(len(recon_batch), 1, 28, 28)[samp]
                ])

                save_image(comparison.cpu(),
                           'results/letters_mnist_recon_' + str(beta_val) +
                           '_' + str(frac_anom) + '.png',
                           nrow=n)

        np.savez('results/letters_mnist_' + str(beta_val) + '_' +
                 str(frac_anom) + '.npz',
                 recon=recon_batch.cpu(),
                 data=data.cpu(),
                 anom_lab=anom_lab.cpu())

    test_loss_normals = (test_loss_total - test_loss_anom) / (
        len(test_loader.dataset) - num_anom)
    test_loss_anom /= num_anom
    test_loss_total /= len(test_loader.dataset)

    print('====> Test set loss: {:.4f}'.format(test_loss_total))

    return test_loss_total, test_loss_anom, test_loss_normals
Example #19
0
def plot_beat(data, recon_x=None):
    # first, move everything to CPU
    if data.is_cuda:
        data = data.cpu()
    if recon_x is not None and recon_x.is_cuda:
        recon_x = recon_x.cpu()

    # plot a bunch of data
    ncol = int(data.shape[0] / 2)
    nrow = 2
    fig, axarr = plt.subplots(nrow, ncol, figsize=(4*ncol, 2*nrow))
    for i, ax in enumerate(axarr.flatten()):
        ax.plot(data[i, 0, :].data.numpy(), "-o", label="beat")
        if recon_x is not None:
            ax.plot(recon_x[i, 0, :].data.numpy(), label="recon")
        ax.legend()
    return fig, axarr
def evaluate(args, model, val_loader, n_samples):

    pred_probs_mc = []
    test_loss = 0
    correct = 0
    output_list = []
    labels_list = []

    model.eval()
    with torch.no_grad():
        begin = time.time()
        for data, target in val_loader:

            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()
            else:
                data, target = data.cpu(), target.cpu()
            output_mc = []
            for mc_run in range(n_samples):
                output, _ = model.forward(data)
                output_mc.append(output)

            output_ = torch.stack(output_mc)
            output_list.append(output_)
            labels_list.append(target)

        end = time.time()

        print("inference throughput: ",
              len(val_loader.dataset) / (end - begin), " images/s")

        output = torch.stack(output_list)
        output = output.permute(1, 0, 2, 3)
        output = output.contiguous().view(n_samples, len(val_loader.dataset),
                                          -1)
        output = torch.nn.functional.softmax(output, dim=2)
        labels = torch.cat(labels_list)
        pred_mean = output.mean(dim=0)
        Y_pred = torch.argmax(pred_mean, axis=1)
        print('Test accuracy:',
              (Y_pred.data.cpu().numpy() == labels.data.cpu().numpy()).mean() *
              100)
        np.save('../experiments/bayesian_torch/probs_cifar_mc.npy',
                output.data.cpu().numpy())
        np.save('../experiments/bayesian_torch/cifar_test_labels_mc.npy',
                labels.data.cpu().numpy())
Example #21
0
def test(epoch):
    model.eval()
    test_loss = 0
    test_loss_mse = 0
    total_diff = 0
    with torch.no_grad():
        for i, (data, _) in enumerate(test_loader):
            data = data.to(device)
            recon_batch, enc = model(data)
            pred = (recon_batch.cpu().detach().numpy() > .5).astype(int)
            total_diff += float(np.sum(data.cpu().detach().numpy() != pred))
            test_loss += loss_function(recon_batch, data).item()

    test_loss /= len(test_loader.dataset)
    total_diff /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss))
    print('====> Test set diff: {:.4f}'.format(total_diff))
def evaluate(model, val_loader, args):
    pred_probs_mc = []
    test_loss = 0
    correct = 0
    with torch.no_grad():
        pred_probs_mc = []
        output_list = []
        label_list = []
        begin = time.time()
        for batch_idx, (data, target) in enumerate(val_loader):
            #print('Batch idx {}, data shape {}, target shape {}'.format(batch_idx, data.shape, target.shape))
            if torch.cuda.is_available():
                data, target = data.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)
            else:
                data, target = data.cpu(non_blocking=True), target.cpu(
                    non_blocking=True)
            output_mc = []
            output_mc_np = []
            for mc_run in range(args.num_monte_carlo):
                model.eval()
                output, _ = model.forward(data)
                pred_probs = torch.nn.functional.softmax(output, dim=1)
                output_mc_np.append(pred_probs.cpu().data.numpy())

            output_mc = torch.from_numpy(
                np.mean(np.asarray(output_mc_np), axis=0))
            output_list.append(output_mc)
            label_list.append(target)
        end = time.time()
        print('inference throughput: ', len_valset / (end - begin),
              ' images/s')
        labels = torch.cat(label_list).cuda()
        probs = torch.cat(output_list).cuda()

        target_labels = labels.data.cpu().numpy()
        pred_mean = probs.data.cpu().numpy()
        Y_pred = np.argmax(pred_mean, axis=1)
        test_acc = (Y_pred == target_labels).mean()
        print('Test accuracy:', test_acc * 100)

        np.save(args.log_dir + '/bayesian_imagenet_probs.npy', pred_mean)
        np.save(args.log_dir + '/bayesian_imagenet_labels.npy', target_labels)

    return test_acc
Example #23
0
def visualize_gradients(viz, data, caption='', zoom=4):
  batchSize = data.size(0)
  rows = int(math.sqrt(batchSize))
  toPIL = transforms.ToPILImage()
  # normalize it
  data = data.cpu()
  dmin = data.min()
  dmax = data.max()
  width = dmax - dmin
  if (width > 0.0):
    data = data.add(-dmin).div(width)

  data_imgs = utils.make_grid(data, nrow=rows)
  pimg = toPIL(data_imgs)
  pimg = pimg.resize((pimg.height * zoom, pimg.width * zoom), Image.NEAREST)
  imgarray = np.array(pimg)
  new_image = torch.from_numpy(imgarray)
  assert (new_image.dim() == 3)
Example #24
0
def test(model, dataloader, dataset_size, criterion, device):
    running_corrects = 0
    running_loss = 0
    pred = []
    true = []
    pred_wrong = []
    true_wrong = []
    image = []
    paths = []
    prob = []

    for batch_idx, (data, target, path) in enumerate(dataloader):
        data, target = data.to(device), target.to(device)
        data = data.type(torch.cuda.FloatTensor)
        target = target.type(torch.cuda.LongTensor)
        model.eval()
        output = model(data)
        loss = criterion(output, target)
        output = nn.Softmax(dim=1)(output)
        _, preds = torch.max(output, 1)
        running_corrects += torch.sum(preds == target.data)
        running_loss += loss.item() * data.size(0)
        preds = preds.cpu().numpy()
        target = target.cpu().numpy()
        probs = output.detach().cpu().numpy()[:, 1]
        preds = np.reshape(preds, (len(preds), 1))
        target = np.reshape(target, (len(preds), 1))
        data = data.cpu().numpy()

        for i in range(len(preds)):
            pred.append(preds[i])
            true.append(target[i])
            prob.append(probs[i])
            paths.append(path[i])
            if (preds[i] != target[i]):
                pred_wrong.append(preds[i])
                true_wrong.append(target[i])
                image.append(data[i])

    epoch_acc = running_corrects.double() / dataset_size
    epoch_loss = running_loss / dataset_size
    print(epoch_acc, epoch_loss)
    return true, pred, prob, paths, image, true_wrong, pred_wrong
def evaluate(model, val_loader, args):
    pred_probs_mc = []
    test_loss = 0
    correct = 0
    with torch.no_grad():
        pred_probs_mc = []
        output_list = []
        labels_list = []
        model.eval()
        begin = time.time()
        for batch_idx, (data, target) in enumerate(val_loader):
            #print('Batch idx {}, data shape {}, target shape {}'.format(batch_idx, data.shape, target.shape))
            if torch.cuda.is_available():
                data, target = data.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)
            else:
                data, target = data.cpu(non_blocking=True), target.cpu(
                    non_blocking=True)
            data = torch.cat([data for _ in range(args.num_monte_carlo)], 0)
            output_mc = []
            output, _ = model.forward(data)
            output_mc.append(output)
            output_ = torch.stack(output_mc)
            output_ = output_.reshape(args.num_monte_carlo, -1, num_classes)
            output_list.append(output_)
            labels_list.append(target)

        end = time.time()
        print("inference throughput: ", len_valset / (end - begin),
              " images/s")

        output = torch.stack(output_list)
        output = output.permute(1, 0, 2, 3)
        output = output.contiguous().view(args.num_monte_carlo, len_valset, -1)
        output = torch.nn.functional.softmax(output, dim=2)
        labels = torch.cat(labels_list)
        pred_mean = output.mean(dim=0)
        Y_pred = torch.argmax(pred_mean, axis=1)

        print('Test accuracy:',
              (Y_pred.data.cpu().numpy() == labels.data.cpu().numpy()).mean() *
              100)
def visualize_stn():
    with torch.no_grad():
        # Get a batch of training data
        data = next(iter(test_loader))[0].to(device)

        input_tensor = data.cpu()
        transformed_input_tensor = model.stn(data).cpu()

        in_grid = convert_image_np(torchvision.utils.make_grid(input_tensor))

        out_grid = convert_image_np(
            torchvision.utils.make_grid(transformed_input_tensor))

        # Plot the results side-by-side
        f, axarr = plt.subplots(1, 2)
        axarr[0].imshow(in_grid)
        axarr[0].set_title('Dataset Images')

        axarr[1].imshow(out_grid)
        axarr[1].set_title('Transformed Images')
Example #27
0
    def get_score_from_z(self, z):
        z = [z]
        parameters = pickle.load(open("Outputs/SH3.db", 'rb'))
        q_n = parameters['q_n']
        index = parameters['index']
        v_traj_onehot = parameters['onehot']

        N = np.size(v_traj_onehot, axis=0)
        q = np.size(v_traj_onehot, axis=1)
        n = np.size(q_n)

        data = torch.FloatTensor(z).to(device)
        data = model.decode(data)
        v_gen = data.cpu().detach().numpy()

        v_traj_onehot = v_gen
        pred_ref, _, _ = model(torch.FloatTensor(v_traj_onehot))
        p_weight = pred_ref.cpu().detach().numpy()
        log_p_list = np.array(toolkit.make_logP(v_traj_onehot, p_weight, q_n))
        return log_p_list
Example #28
0
  def _plot_image(self, data, gt_boxes, num_boxes):
      import matplotlib.pyplot as plt
      X=data.cpu().numpy().copy()
      X += cfg.PIXEL_MEANS
      X = X.astype(np.uint8) 
      X = X.squeeze(0)
      boxes = gt_boxes[:num_boxes,:].cpu().numpy().copy()

      fig, ax = plt.subplots(figsize=(8,8))
      ax.imshow(X[:,:,::-1], aspect='equal')
      for i in range(boxes.shape[0]):
          bbox = boxes[i, :4]
          ax.add_patch(
                  plt.Rectangle((bbox[0], bbox[1]),
                                 bbox[2]-bbox[0],
                                 bbox[3]-bbox[1], fill=False, linewidth=2.0)
                  )
      #plt.imshow(X[:,:,::-1])
      plt.tight_layout()
      plt.show()
def evaluate(args, model, val_loader):
    pred_probs_mc = []
    test_loss = 0
    correct = 0
    output_list = []
    labels_list = []
    model.eval()
    with torch.no_grad():
        begin = time.time()
        for data, target in val_loader:
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()
            else:
                data, target = data.cpu(), target.cpu()

            data = torch.cat([data for _ in range(args.num_monte_carlo)], 0)
            output_mc = []
            output, _ = model.forward(data)
            output_mc.append(output)
            output_ = torch.stack(output_mc)
            output_ = output_.reshape(args.num_monte_carlo, -1, num_classes)
            output_list.append(output_)
            labels_list.append(target)
        end = time.time()
        print("inference throughput: ", len_testset / (end - begin),
              " images/s")

        output = torch.stack(output_list)
        output = output.permute(1, 0, 2, 3)
        output = output.contiguous().view(args.num_monte_carlo, len_testset,
                                          -1)
        output = torch.nn.functional.softmax(output, dim=2)
        labels = torch.cat(labels_list)
        pred_mean = output.mean(dim=0)
        Y_pred = torch.argmax(pred_mean, axis=1)
        print('Test accuracy:',
              (Y_pred.data.cpu().numpy() == labels.data.cpu().numpy()).mean() *
              100)
        np.save('./probs_cifar_mc_flipout.npy', output.data.cpu().numpy())
        np.save('./cifar_test_labels_mc_flipout.npy',
                labels.data.cpu().numpy())
Example #30
0
def evaluate(model, val_loader):
    model.eval()
    correct = 0
    with torch.no_grad():
        for data, target in val_loader:
            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()
            else:
                data, target = data.cpu(), target.cpu()
            output = model(data)
            output = torch.nn.functional.softmax(output, dim=1)
            pred = output.argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    print('\nTest set: Accuracy: {:.2f}%\n'.format(100. * correct /
                                                   len(val_loader.dataset)))
    target_labels = target.cpu().data.numpy()
    np.save('./probs_cifar_det.npy', output.cpu().data.numpy())
    np.save('./cifar_test_labels.npy', target_labels)