Exemple #1
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            dataset = svhn.SVHN()
            train_data, train_label = dataset.get_trainset()
            test_data, test_label = dataset.get_testset()
            train_data = train_data.reshape(-1, 3072)
            test_data = test_data.reshape(-1, 3072)

            test_indices = np.arange(len(test_data))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:1000]

            name = self.info()
            path = "svhn/" + str(step_limit) + name

            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, len(train_data), self.batch_size),
                        range(self.batch_size, len(train_data),
                              self.batch_size)):
                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.training, self.cost,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, \
                loss, \
                accuracy = sess.run([merged, self.cost, self.accuracy],
                                    feed_dict={self.x: test_data,
                                               self.y: test_label,
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                writer_test.add_summary(summary, step)
                print("test results : ", accuracy, loss)
                saver.save(sess, path + "/" + name + ".ckpt", step)
Exemple #2
0
def select_dataset(name):
    x_size, y_size, train_data, train_label, test_data, test_label = 0, 0, [], [] ,[] ,[] #초기화
    if name == 'cifar':
        dataset = cifar.CIFAR()
        train_data, train_label, test_data, test_label = dataset.getdata()

        train_data = train_data.reshape(-1, 3072)
        test_data = test_data.reshape(-1, 3072)
        x_size = 3072
        y_size = 10

    elif name == 'svhn':
        dataset = svhn.SVHN()
        train_data, train_label = dataset.get_trainset()
        test_data, test_label = dataset.get_testset()

        train_data = train_data.reshape(-1, 3072)
        test_data = test_data.reshape(-1, 3072)
        x_size = 3072
        y_size = 10

    elif name == 'mnist':
        dataset = mnist.read_data_sets(flags.MNIST_DIR, one_hot=True)
        train_data, train_label, test_data, test_label = dataset.train.images, dataset.train.labels, \
                                                         dataset.test.images, dataset.test.labels
        x_size = 784
        y_size = 10

    elif name == 'news':
        trainset = fetch_20newsgroups(data_home=flags.NEWS_DIR, subset='train')
        testset = fetch_20newsgroups(data_home=flags.NEWS_DIR, subset='test')

        vectorizer = TfidfVectorizer(analyzer='word', max_features=3072)

        vectorizer.fit(trainset.data)
        train_data = vectorizer.transform(trainset.data)
        train_data = csr_matrix.todense(train_data)
        train_label = trainset.target
        train_label = NNutils.onehot(train_label, 20, list=True)
        # print(train_label.shape)

        test_data = vectorizer.transform(testset.data)
        test_data = csr_matrix.todense(test_data)
        test_label = testset.target
        test_label = NNutils.onehot(test_label, 20, list=True)

        x_size = 3072
        y_size = 20

    return Dataset(name, x_size, y_size, train_data, train_label, test_data,
                   test_label)
Exemple #3
0
def test(nnName, dataName, CUDA_DEVICE, epsilon, temperature):
    
    net1 = torch.load("../models/{}.pth".format(nnName))
    optimizer1 = optim.SGD(net1.parameters(), lr = 0, momentum = 0)
    net1.cuda(CUDA_DEVICE)
    
    if dataName != "Uniform" and dataName != "Gaussian":
        if dataName == "SVHN":
            testsetout = svhn.SVHN("../data/SVHN", split='test', transform=transform, download=True)
        elif dataName in ["HFlip","VFlip"]:
            testsetout = torchvision.datasets.CIFAR10('../data', train=False, download=True, 
                                                       transform=Flip[dataName])
        elif dataName == "CelebA":
            testsetout = torchvision.datasets.ImageFolder(
                "../data/{}".format(dataName), 
                transform=transforms.Compose([transforms.CenterCrop(178), Resize(32), transform]))
        else:
            testsetout = torchvision.datasets.ImageFolder("../data/{}".format(dataName), 
                                                          transform=transform)
        testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=1,
                                         shuffle=False, num_workers=2)

    if nnName == "densenet10" or nnName == "wideresnet10": 
	testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform)
	testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1,
                                         shuffle=False, num_workers=2)
    if nnName == "densenet100" or nnName == "wideresnet100": 
	testset = torchvision.datasets.CIFAR100(root='../data', train=False, download=True, transform=transform)
	testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1,
                                         shuffle=False, num_workers=2)
    
    if dataName == "Gaussian":
        d.testGaussian(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature)
        m.metric(nnName, dataName)

    elif dataName == "Uniform":
        d.testUni(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature)
        m.metric(nnName, dataName)
    else:
	d.testData(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderOut, nnName, dataName, epsilon, temperature) 
	m.metric(nnName, dataName)
Exemple #4
0
def collate(batch):
    transposed = zip(*batch)
    (images, input_labels, output_labels) = list(transposed)
    images = torch.stack(images, 0)
    input_labels = torch.LongTensor(input_labels)
    output_labels = torch.LongTensor(output_labels)
    return (images, input_labels, output_labels)


composed = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Scale(100),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
train_dataset = svhn.SVHN('./svhn', split='train', transform=composed)
train_dataloader = DataLoader(train_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=0,
                              collate_fn=collate)
n_iters = len(train_dataloader)

val_dataset = svhn.SVHN('./svhn', split='val', transform=composed)
val_dataloader = DataLoader(val_dataset,
                            batch_size=1,
                            num_workers=0,
                            collate_fn=collate)

test_dataset = svhn.SVHN('./svhn', split='test', transform=composed)
test_dataloader = DataLoader(test_dataset,