コード例 #1
0
def random_projection_mnist(opt):

	import data
	# import seaborn as sns
	sigmoid = nn.Sigmoid()
	data = data.MNIST(opt)

	batches = [data.next_batch_from_class(i) for i in range(10)]

	from sklearn.manifold import TSNE
	tsne_model = TSNE(n_components=2, random_state=0)
	np.set_printoptions(suppress=True)

	mnist = batches[0]
	for i in range(1,10):
		mnist = torch.cat((mnist, batches[i]), 0)
	output = tsne_model.fit_transform(mnist.numpy())

	print(mnist)

	# for i in range(10):
	#     plt.plot(output[i*100:(i+1)*100, 0], output[i*100:(i+1)*100, 1], '+')
	#
	# plt.pause(10000)

	M = torch.FloatTensor(784, 1024).normal_(0,200)

	embedding = [torch.round(sigmoid(Variable(torch.mm(batches[i], M)))) for i in range(10)]

	mnist = embedding[0]
	for i in range(1,10):
		mnist = torch.cat((mnist, embedding[i]), 0)
	print(mnist)
	output = tsne_model.fit_transform(mnist.data.numpy())

	for i in range(10):
		plt.plot(output[i*100:(i+1)*100, 0], output[i*100:(i+1)*100, 1], '+')

	plt.pause(10000)

	print(embedding[0][:10])
	print(embedding[4][:10])

	hamming = np.zeros((10,10,3))
	for i in range(10):
		for k in range(10):
			hamming[i,k,0] = hamming_distance(embedding[i], embedding[i])
			hamming[i,k,1] = hamming_distance(embedding[k], embedding[k])
			hamming[i,k,2] = hamming_distance(embedding[i], embedding[k])
			print(i,k,hamming[i,k])

	print(hamming[:,:,2])

	sns.heatmap(hamming[:,:,2])
	plt.pause(100)
コード例 #2
0
def train_as_binary():
    num_classes = 1
    dataloader = [data_.MNIST(opt, class_=i) for i in range(num_classes)]
    classifier = [
        mlp.Classifier(opt.input_size, opt.feature_size)
        for i in range(num_classes)
    ]
    criterion = [nn.BCELoss() for i in range(num_classes)]
    optimiser = [
        optim.Adam(classifier[i].parameters(), lr=opt.lr)
        for i in range(num_classes)
    ]
    logs = [[] for i in range(num_classes)]

    for i in range(num_classes):
        for epoch in range(opt.epochs):
            for idx in range(len(dataloader[i])):

                classifier[i].zero_grad()

                dataset, targets = dataloader[i].next()
                output = classifier[i](Variable(dataset))
                loss = criterion[i](output, Variable(targets))

                loss.backward()
                optimiser[i].step()

            test_dataset, test_targets = dataloader[i].next_test()
            test_output = classifier[i](Variable(test_dataset))
            loss = criterion[i](test_output, Variable(test_targets))

            pred = torch.round(test_output.data)
            positive_correct = pred.eq(test_targets) * test_targets.byte()
            positive_correct = float(
                positive_correct.sum()) / (test_targets.sum())

            negative_correct = pred.eq(test_targets) * (1 -
                                                        test_targets).byte()
            negative_correct = float(
                negative_correct.sum()) / (test_targets.sum())
            # correct = float(pred.eq(test_targets).cpu().sum()) / test_output.size(0)

            logs[i].append(positive_correct)

            print("{0} - {1} - {2}".format(epoch, logs[i][-1],
                                           negative_correct))

            for k in range(len(logs)):
                plt.plot(logs[k])
            plt.pause(0.01)
            plt.clf()

    for k in range(len(logs)):
        plt.plot(logs[k])
    plt.pause(1000)
コード例 #3
0
def trainGANs(n=100, datadir='data/gan/'):
    for i in range(n):
       try:
          os.mkdir(datadir+str(i))
       except FileExistsError:
          pass
       loader = data.MNIST(batch=128)
       model = SimpleGAN(28*28, zdim=64, hd=64, hg=64, lr=2e-4).cuda()
       print('Network: ' + str(i) + ', Params: ' + str(utils.count_parameters(model)))
       #model = DCGAN(zdim=16, h=4, lr=2e-4).cuda()
       trainer = MNISTTrainer(model, loader, datadir+str(i)+'/')
       trainer.train(epochs=100)
コード例 #4
0
import numpy as np
import os
import matplotlib.pyplot as plt
import SNN
import data

SAVE_PATH = os.getcwd() + '/weight_mnist'
mnist = data.MNIST(
    path=["MNIST/t10k-images.idx3-ubyte", "MNIST/t10k-labels.idx1-ubyte"])

w1 = np.load(SAVE_PATH + '1.npy')
w2 = np.load(SAVE_PATH + '2.npy')

Ts = 1e-3
scale = 2
view_max = 2

l1 = SNN.SNNDiscrete(w1, Ts, scale)
l2 = SNN.SNNDiscrete(w2, Ts, scale)

correct = 0

for i in range(mnist.datasize):
    xs, ys = mnist.next_batch(1, shuffle=True)
    xs = (1 - xs[0, :]) / Ts

    input_mat = np.zeros([784, int(1 / Ts * view_max)])
    input_mat[range(784), xs.astype(int)] = 1

    l1out = l1.forward(input_mat)
    l2out = l2.forward(l1out)
コード例 #5
0
ファイル: main.py プロジェクト: tachitachi/BackpropTest
        (inputs, outputs)) * 2 - 1) * np.sqrt(2 / (inputs + outputs)))
    if bias:
        b = Tensor(np.zeros((outputs, )))
        return w, b
    return w


def accuracy(y, output):
    correct = np.sum(np.argmax(y.value, -1) == np.argmax(output.value, -1))
    total = y.shape[0]
    return correct / total, correct, total


if __name__ == '__main__':

    train = data.MNIST('train', one_hot=True)
    test = data.MNIST('test', one_hot=True)
    train_loader = data.DataLoader(train,
                                   batch_size=32,
                                   shuffle=True,
                                   repeat=False)
    test_loader = data.DataLoader(train,
                                  batch_size=32,
                                  shuffle=False,
                                  repeat=False)

    w1, b1 = Linear(28 * 28, 256)
    w2, b2 = Linear(256, 10)

    opt = bp.SGD([w1, b1, w2, b2], lr=1e-3)
コード例 #6
0
        ## create a data loader for training
        ds = Subset(MNISTDataset(x_train, y_train, classes, class_to_idx, transform=tforms.Compose(tforms_train)),
                    data.get_random_index(len(y_train), len(y_train), args.seed))
        self.train = DataLoader(ds, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers)

        ## create a data loader for validation
        ds = Subset(MNISTDataset(x_val, y_val, classes, class_to_idx, transform=tforms.Compose(tforms_val)),
                    data.get_random_index(len(y_val), len(y_val), args.seed))
        self.val = DataLoader(ds, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers)

        ## create a data loader for test
        ds = Subset(MNISTDataset(x_test, y_test, classes, class_to_idx, transform=tforms.Compose(tforms_test)),
                    data.get_random_index(len(y_test), len(y_test), args.seed))
        self.test = DataLoader(ds, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers)
        
        ## print data statistics
        print(f'#train = {len(self.train.dataset)}, #val = {len(self.val.dataset)}, #test = {len(self.test.dataset)}')

        
if __name__ == '__main__':
    dsld = data.MNIST(types.SimpleNamespace(src='MNIST', batch_size=100, seed=0, n_workers=10))

    
## MNIST
#train =  60000
#val =  5000
#test =  5000



コード例 #7
0
ファイル: mnist_GAN.py プロジェクト: Gerryflap/master_thesis
#                     help="When given, loads models from LOAD_PATH folder")
# parser.add_argument("--save_path", action="store", type=str, default=None,
#                     help="When given, saves models to LOAD_PATH folder after all epochs (or every epoch)")
# parser.add_argument("--save_every_epoch", action="store_true", default=False,
#                     help="When a save path is given, store the model after every epoch instead of only the last")
# parser.add_argument("--img_path", action="store", type=str, default=None,
#                     help="When given, saves samples to the given directory")

args = parser.parse_args()

output_path = util.output.init_experiment_output_dir("mnist", "gan", args)

dataset = data.MNIST("data/downloads/mnist",
                     train=True,
                     download=True,
                     transform=transforms.Compose([
                         transforms.ToTensor(),
                         transforms.Lambda(lambda img: img * 2 - 1)
                     ]))
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=12)

G = Generator28(args.l_size, args.h_size, args.use_mish)
D = Discriminator28(args.h_size,
                    use_bn=args.use_batchnorm_in_D,
                    use_mish=args.use_mish)

if args.cuda:
    G = G.cuda()