コード例 #1
0
def t_sne(size_datapoints):
    model.to('cpu')
    model.eval()
    with torch.no_grad():
        dataloader = load.load_mnist(size_datapoints)
        data = iter(dataloader)
        images, labels = data.next()
        mean, logvar = model.encoder(images)
        z = model.reparameterize(
            mean, logvar)  # latent space representation of dataset

        tsne = TSNE(n_components=2, random_state=0)
        z_2d = tsne.fit_transform(
            z)  # Apply t-sne to convert to 2D (n_components) dimensions

        target_ids = range(0, 10)
        y = labels.detach().numpy(
        )  # need to detach labels from computation graph to use .numpy()

        plt.figure(figsize=(6, 5))
        colors = 'r', 'g', 'b', 'c', 'm', 'gold', 'k', 'gray', 'orange', 'chocolate'
        for i, c in zip(target_ids, colors):  # zip creates iterator
            ind = np.where(y == i)  # returns indices where y==i
            plt.scatter(
                z_2d[ind, 0], z_2d[ind, 1],
                c=c)  # plt.scatter(x-coordinate , y-coordinate , color)
        plt.savefig(t_sne_save_directory + 't_sne_visualization.png')

        plt.show()
        plt.close()
コード例 #2
0
model = model.to(device)

criterium = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-4)

# Map

data = Dataset(src_data, tgt_data, 20 * 1000, batch_size_sents=1000)

data.create_order()

for epoch in range(4001):
    # Definition of inputs as variables for the net.
    # requires_grad is set False because we do not need to compute the
    # derivative of the inputs.
    samples = data.next()
    batch = samples[0]
    #batch.cuda()

    # Set gradient to 0.
    optimizer.zero_grad()
    # Feed forward.
    src = batch.get('source')
    src = src.transpose(0, 1)
    #print("Batch: " + str(src.size()))
    src_mask = batch.get("src_mask")
    src_length = batch.get("src_length")

    targets = batch.get('target_input')

    pred = model(src, src_mask, src_length)
コード例 #3
0
ファイル: main.py プロジェクト: wxyhv/TRTorch
def main():
    global state
    global classes
    global writer
    if not os.path.isdir(args.ckpt_dir):
        os.makedirs(args.ckpt_dir)

    training_dataset = datasets.CIFAR10(root='./data',
                                        train=True,
                                        download=True,
                                        transform=transforms.Compose([
                                            transforms.RandomCrop(32,
                                                                  padding=4),
                                            transforms.RandomHorizontalFlip(),
                                            transforms.ToTensor(),
                                            transforms.Normalize(
                                                (0.4914, 0.4822, 0.4465),
                                                (0.2023, 0.1994, 0.2010)),
                                        ]))
    training_dataloader = torch.utils.data.DataLoader(
        training_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=2)

    testing_dataset = datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=True,
                                       transform=transforms.Compose([
                                           transforms.ToTensor(),
                                           transforms.Normalize(
                                               (0.4914, 0.4822, 0.4465),
                                               (0.2023, 0.1994, 0.2010)),
                                       ]))

    testing_dataloader = torch.utils.data.DataLoader(
        testing_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=2)

    num_classes = len(classes)

    model = vgg16(num_classes=num_classes, init_weights=False)
    model = model.cuda()

    data = iter(training_dataloader)
    images, _ = data.next()

    writer.add_graph(model, images.cuda())
    writer.close()

    crit = nn.CrossEntropyLoss()
    opt = optim.SGD(model.parameters(),
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)

    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    if args.start_from != 0:
        ckpt_file = args.ckpt_dir + '/ckpt_epoch' + str(
            args.start_from) + '.pth'
        print('Loading from checkpoint {}'.format(ckpt_file))
        assert (os.path.isfile(ckpt_file))
        ckpt = torch.load(ckpt_file)
        model.load_state_dict(ckpt["model_state_dict"])
        opt.load_state_dict(ckpt["opt_state_dict"])
        state = ckpt["state"]

    for epoch in range(args.start_from, args.epochs):
        adjust_lr(opt, epoch)
        writer.add_scalar('Learning Rate', state["lr"], epoch)
        writer.close()
        print('Epoch: [%5d / %5d] LR: %f' %
              (epoch + 1, args.epochs, state['lr']))

        train(model, training_dataloader, crit, opt, epoch)
        test_loss, test_acc = test(model, testing_dataloader, crit, epoch)

        print("Test Loss: {:.5f} Test Acc: {:.2f}%".format(
            test_loss, 100 * test_acc))

        if epoch % 10 == 9:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'model_state_dict': model.state_dict(),
                    'acc': test_acc,
                    'opt_state_dict': opt.state_dict(),
                    'state': state
                },
                ckpt_dir=args.ckpt_dir)
コード例 #4
0
ファイル: vanilla.py プロジェクト: hesampakdaman/ppgn-disc
        if (G_step < 25 or G_step % 500 == 0):
            Diters = 100
        else:
            Diters = 5
        j = 0
        ###########################
        # Train discriminator
        ###########################
        while j < Diters and i < len(dataset):
            netD.zero_grad()
            netE.zero_grad()
            netG.zero_grad()

            # real data
            j += 1
            _data, _ = data.next()
            _data = _data.view(-1, 28 * 28)
            i += 1
            real_data = torch.Tensor(_data)
            if use_cuda:
                real_data = real_data.cuda(gpu)
            real_data_v = Variable(real_data)

            # h input for G
            h = netE(real_data_v.view(-1, 1, 28, 28))[ARGS.Glayer].data
            if use_cuda:
                h = h.cuda(gpu)

            # train with real
            D_real = netD(real_data_v)
            D_real = D_real.mean()
コード例 #5
0
        else:
            self.data, self.targets = img_test, target_test

    def __getitem__(self, index):
        img, target = self.data[index], int(self.targets[index])

        img = img.reshape(34, 34, 10)

        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        return img, target

    def __len__(self):
        return len(self.data)


if __name__ == '__main__':
    batch_size = 32
    kwargs = {'num_workers': 1, 'pin_memory': True}
    dataloader = torch.utils.data.DataLoader(NMNIST(
        '../data', transform=transforms.Compose([transforms.ToTensor()])),
                                             batch_size=batch_size,
                                             shuffle=True)

    data = iter(dataloader)
    print(data.next()[0].shape)
コード例 #6
0
ファイル: real.py プロジェクト: hesampakdaman/ppgn-disc
# ==================Definition Start======================


def generate_image(x):
    input.save(x.data, 'mnist', ARGS.samplepth, 'reals', 10)


# ==================Definition End======================

dataset = input.get_dataset('mnist', BATCH_SIZE)

sorted = torch.zeros(100, 1, 28, 28)

data = iter(dataset)
_data, _label = data.next()

sample = 0  # current sample
for i in range(10):  # all digits
    class_sample = 0  # track num of sample for each class
    for j in range(len(_label)):  # search through labels until we find a match
        if (class_sample == 10):
            break
        elif (_label[j] == i):
            sorted[sample, :] = _data[j]
            sample += 1
            digit_sample += 1
            j += 1

# generate_image(Variable(sorted))