def trainingStep(net, NTRAIN, min_difficulty = 1.0, max_difficulty = 1.0, min_sparseness = 0, max_sparseness = 0, min_imbalance = 0, max_imbalance = 0, feature_variation = True, class_variation = True, BS = 200):
	FEATURES = net.FEATURES
	CLASSES = net.CLASSES
	
	net.zero_grad()
	batch_mem = []
	batch_test = []
	batch_label = []
	class_count = []
	
	for i in range(BS):
		if feature_variation:
			feat = np.random.randint(2.5*FEATURES) + FEATURES//2
		else:
			feat = FEATURES
		
		if class_variation:
			classes = np.random.randint(CLASSES-2) + 2
		else:
			classes = CLASSES
			
		xd,yd = problemGenerator(N=NTRAIN+100, FEATURES=feat, CLASSES=classes, 
								 sigma = np.random.rand()*(max_difficulty - min_difficulty) + min_difficulty,
								 sparseness = np.random.rand()*(max_sparseness - min_sparseness) + min_sparseness,
								 imbalance = np.random.rand()*(max_imbalance - min_imbalance) + min_imbalance)
		
		if classes<CLASSES:
			yd = np.pad(yd, ( (0,0), (0,CLASSES-classes)), 'constant', constant_values=0)
		xd = normalizeAndProject(xd, NTRAIN, FEATURES)
		
		trainset = np.hstack([xd[0:NTRAIN],yd[0:NTRAIN]])
		testset = xd[NTRAIN:]
		labelset = yd[NTRAIN:]

		batch_mem.append(trainset)
		batch_test.append(testset)
		batch_label.append(labelset)
		class_count.append(classes)

	batch_mem = tovar(np.array(batch_mem).transpose(0,2,1).reshape(BS,1,FEATURES+CLASSES,NTRAIN))
	batch_test = tovar(np.array(batch_test).transpose(0,2,1).reshape(BS,1,FEATURES,100))
	batch_label = tovar(np.array(batch_label).transpose(0,2,1))
	class_count = torch.cuda.FloatTensor(np.array(class_count))
	
	net.zero_grad()
	p = net.forward(batch_mem, batch_test, class_count)
	loss = -torch.sum(p*batch_label,1).mean()
	loss.backward()
	net.adam.step()
	err = loss.cpu().data.numpy()[0]
	
	return err
Ejemplo n.º 2
0
def timeMethodsOnProblem(methods,
                         classes,
                         features,
                         sigma,
                         N=100,
                         samples=20,
                         NTEST=400):
    alltrain_x = []
    alltrain_y = []

    alltest_x = []
    alltest_y = []

    for i in range(samples):
        data_x, data_y = problemGenerator(N + NTEST, classes, features, sigma)
        data_y = np.argmax(data_y, axis=1)

        # Make sure we have examples of all of the classes included
        for j in range(classes):
            k = np.where(data_y[classes:] == j)[0][0] + classes
            data_x[[j, k]] = data_x[[k, j]]
            data_y[[j, k]] = data_y[[k, j]]

        mu = np.mean(data_x, axis=0, keepdims=True)
        std = np.std(data_x, axis=0, keepdims=True) + 1e-16

        train_x = (data_x[0:N] - mu) / std
        test_x = (data_x[N:] - mu) / std

        train_y = data_y[0:N]
        test_y = data_y[N:]

        alltrain_x.append(train_x)
        alltrain_y.append(train_y)
        alltest_x.append(test_x)
        alltest_y.append(test_y)

    t0 = time.time()
    results = [
        evalClassifier(m, alltrain_x, alltrain_y, alltest_x, alltest_y)
        for m in methods
    ]
    t1 = time.time()

    return t1 - t0
Ejemplo n.º 3
0
def trainingStep(net,
                 NTRAIN,
                 min_difficulty=1.0,
                 max_difficulty=1.0,
                 min_sparseness=0,
                 max_sparseness=0,
                 min_imbalance=0,
                 max_imbalance=0,
                 feature_variation=True,
                 class_variation=True,
                 BS=20):
    FEATURES = net.FEATURES
    CLASSES = net.CLASSES

    net.zero_grad()
    err = []

    for i in range(BS):
        if feature_variation:
            feat = np.random.randint(2.5 * FEATURES) + FEATURES // 2
        else:
            feat = FEATURES

        if class_variation:
            classes = np.random.randint(CLASSES - 2) + 2
        else:
            classes = CLASSES

        xd, yd = problemGenerator(
            N=NTRAIN + 100,
            FEATURES=feat,
            CLASSES=classes,
            sigma=np.random.rand() * (max_difficulty - min_difficulty) +
            min_difficulty,
            sparseness=np.random.rand() * (max_sparseness - min_sparseness) +
            min_sparseness,
            imbalance=np.random.rand() * (max_imbalance - min_imbalance) +
            min_imbalance)

        if classes < CLASSES:
            yd = np.pad(yd, ((0, 0), (0, CLASSES - classes)),
                        'constant',
                        constant_values=0)
        xd = normalizeAndProject(xd, NTRAIN, FEATURES)

        yd = np.argmax(yd, axis=1)

        trainset_x = tovar(xd[0:NTRAIN])
        trainset_y = toivar(yd[0:NTRAIN])
        testset = tovar(xd[NTRAIN:])
        labelset = toivar(yd[NTRAIN:])

        idx = torch.arange(100).cuda().long()

        p = net.fullpass(trainset_x, trainset_y, testset, classes)
        loss = -torch.mean(p[idx, labelset[idx]])
        loss.backward()
        err.append(loss.cpu().detach().item())

    net.adam.step()

    return np.mean(err)
net2_4_100_1 = ClassifierGenerator(2, 4, 384).cuda()
net2_4_100_1.load_state_dict(
    torch.load("models/classifier-generator-2-4-N100.pth"))

net2_4_100_4 = ClassifierGenerator(2, 4, 384).cuda()
net2_4_100_4.load_state_dict(
    torch.load("models/classifier-generator-2-4-diff4.pth"))

net2_4_gen = ClassifierGenerator(2, 4, 384).cuda()
net2_4_gen.load_state_dict(
    torch.load("models/classifier-generator-2-4-general.pth"))

np.random.seed(12345)
torch.manual_seed(12345)

xd1, yd1 = problemGenerator(100, CLASSES=4, FEATURES=2, sigma=0.25)
xd2, yd2 = problemGenerator(100, CLASSES=4, FEATURES=2, sigma=1)


def rollGenerator(N, CLASSES):
    yl = np.random.randint(CLASSES, size=(N, ))
    y = np.zeros((N, CLASSES))
    y[np.arange(N), yl[np.arange(N)]] = 1

    u = np.random.rand(N)
    v = np.random.randn(N, 2)

    r = 0.5 + 2.5 * u
    theta = (2 * pi / CLASSES) * yl + 3 * (2 * pi / CLASSES) * u

    x = np.array([r * np.cos(theta), r * np.sin(theta)]).transpose(1,