def trainingStep(net, NTRAIN, min_difficulty = 1.0, max_difficulty = 1.0, min_sparseness = 0, max_sparseness = 0, min_imbalance = 0, max_imbalance = 0, feature_variation = True, class_variation = True, BS = 200):
	FEATURES = net.FEATURES
	CLASSES = net.CLASSES
	
	net.zero_grad()
	batch_mem = []
	batch_test = []
	batch_label = []
	class_count = []
	
	for i in range(BS):
		if feature_variation:
			feat = np.random.randint(2.5*FEATURES) + FEATURES//2
		else:
			feat = FEATURES
		
		if class_variation:
			classes = np.random.randint(CLASSES-2) + 2
		else:
			classes = CLASSES
			
		xd,yd = problemGenerator(N=NTRAIN+100, FEATURES=feat, CLASSES=classes, 
								 sigma = np.random.rand()*(max_difficulty - min_difficulty) + min_difficulty,
								 sparseness = np.random.rand()*(max_sparseness - min_sparseness) + min_sparseness,
								 imbalance = np.random.rand()*(max_imbalance - min_imbalance) + min_imbalance)
		
		if classes<CLASSES:
			yd = np.pad(yd, ( (0,0), (0,CLASSES-classes)), 'constant', constant_values=0)
		xd = normalizeAndProject(xd, NTRAIN, FEATURES)
		
		trainset = np.hstack([xd[0:NTRAIN],yd[0:NTRAIN]])
		testset = xd[NTRAIN:]
		labelset = yd[NTRAIN:]

		batch_mem.append(trainset)
		batch_test.append(testset)
		batch_label.append(labelset)
		class_count.append(classes)

	batch_mem = tovar(np.array(batch_mem).transpose(0,2,1).reshape(BS,1,FEATURES+CLASSES,NTRAIN))
	batch_test = tovar(np.array(batch_test).transpose(0,2,1).reshape(BS,1,FEATURES,100))
	batch_label = tovar(np.array(batch_label).transpose(0,2,1))
	class_count = torch.cuda.FloatTensor(np.array(class_count))
	
	net.zero_grad()
	p = net.forward(batch_mem, batch_test, class_count)
	loss = -torch.sum(p*batch_label,1).mean()
	loss.backward()
	net.adam.step()
	err = loss.cpu().data.numpy()[0]
	
	return err
def trainingStep(net, NTRAIN, NTEST, data_x, data_y, BS=200):
    FEATURES = net.FEATURES
    CLASSES = net.CLASSES

    net.zero_grad()
    err = []

    for i in range(BS):
        j = np.random.randint(len(data_x))
        feat = data_x[j].shape[1]
        classes = np.unique(data_y[j]).shape[0]

        xd = data_x[j].copy()

        # Data augmentation
        f_idx = np.random.permutation(feat)
        xd = xd[:, f_idx]

        c_idx = np.random.permutation(classes)
        yd = np.zeros((data_y[j].shape[0], classes))
        yd[np.arange(data_y[j].shape[0]),
           c_idx[data_y[j][np.arange(data_y[j].shape[0])]]] = 1

        idx = np.random.permutation(xd.shape[0])
        xd = xd[idx]
        yd = yd[idx]

        if classes < CLASSES:
            yd = np.pad(yd, ((0, 0), (0, CLASSES - classes)),
                        'constant',
                        constant_values=0)
        xd = normalizeAndProject(xd, NTRAIN, FEATURES)
        yd = np.argmax(yd, axis=1)

        trainset_x = tovar(xd[0:NTRAIN])
        trainset_y = toivar(yd[0:NTRAIN])
        testset = tovar(xd[NTRAIN:NTRAIN + NTEST])
        labelset = toivar(yd[NTRAIN:NTRAIN + NTEST])

        idx = torch.arange(NTEST).cuda().long()

        p = net.fullpass(trainset_x, trainset_y, testset, classes)
        loss = -torch.mean(p[idx, labelset[idx]])
        loss.backward()
        err.append(loss.cpu().detach().item())

    net.adam.step()

    return np.mean(err)
def plotDecisionBoundary(x, y, net):
    x = (x - np.mean(x, axis=0, keepdims=True)) / (
        1e-16 + np.std(x, axis=0, keepdims=True))

    x = x.reshape((1, 1, x.shape[0], x.shape[1])).transpose(0, 1, 3, 2)
    y = y.reshape((1, 1, y.shape[0], y.shape[1])).transpose(0, 1, 3, 2)

    trainset = np.concatenate([x, y], axis=2)

    xx, yy = np.meshgrid(np.arange(-3.0, 3.05, 0.05),
                         np.arange(-3.0, 3.05, 0.05))
    XR = xx.shape[0]

    xx = xx.reshape((1, 1, 1, XR * XR))
    yy = yy.reshape((1, 1, 1, XR * XR))

    testset = np.concatenate([xx, yy], axis=2)

    p = np.exp(
        net.forward(tovar(trainset), tovar(testset),
                    torch.cuda.FloatTensor(np.array([4]))).cpu().data.numpy())

    p = p.reshape((4, XR, XR)).transpose(1, 2, 0)
    xx = xx.reshape((XR, XR))
    yy = yy.reshape((XR, XR))

    colors = np.array([[0.7, 0.2, 0.2], [0.2, 0.7, 0.2], [0.2, 0.2, 0.7],
                       [0.7, 0.2, 0.7]])

    im = np.zeros((XR, XR, 3))
    for j in range(4):
        im += p[:, :, j].reshape((XR, XR, 1)) * np.array(colors[j]).reshape(
            (1, 1, 3))

    yl = np.argmax(y, axis=2)[0, 0]

    plt.imshow(im, extent=[-3, 3, 3, -3])
    for j in range(4):
        plt.scatter(x[0, 0, 0, yl == j],
                    x[0, 0, 1, yl == j],
                    c=colors[j],
                    edgecolors='k',
                    lw=1,
                    s=10)

    plt.xticks([])
    plt.yticks([])
    plt.xlim(-3, 3)
    plt.ylim(-3, 3)
Beispiel #4
0
def trainingStep(net,
                 NTRAIN,
                 min_difficulty=1.0,
                 max_difficulty=1.0,
                 min_sparseness=0,
                 max_sparseness=0,
                 min_imbalance=0,
                 max_imbalance=0,
                 feature_variation=True,
                 class_variation=True,
                 BS=20):
    FEATURES = net.FEATURES
    CLASSES = net.CLASSES

    net.zero_grad()
    err = []

    for i in range(BS):
        if feature_variation:
            feat = np.random.randint(2.5 * FEATURES) + FEATURES // 2
        else:
            feat = FEATURES

        if class_variation:
            classes = np.random.randint(CLASSES - 2) + 2
        else:
            classes = CLASSES

        xd, yd = problemGenerator(
            N=NTRAIN + 100,
            FEATURES=feat,
            CLASSES=classes,
            sigma=np.random.rand() * (max_difficulty - min_difficulty) +
            min_difficulty,
            sparseness=np.random.rand() * (max_sparseness - min_sparseness) +
            min_sparseness,
            imbalance=np.random.rand() * (max_imbalance - min_imbalance) +
            min_imbalance)

        if classes < CLASSES:
            yd = np.pad(yd, ((0, 0), (0, CLASSES - classes)),
                        'constant',
                        constant_values=0)
        xd = normalizeAndProject(xd, NTRAIN, FEATURES)

        yd = np.argmax(yd, axis=1)

        trainset_x = tovar(xd[0:NTRAIN])
        trainset_y = toivar(yd[0:NTRAIN])
        testset = tovar(xd[NTRAIN:])
        labelset = toivar(yd[NTRAIN:])

        idx = torch.arange(100).cuda().long()

        p = net.fullpass(trainset_x, trainset_y, testset, classes)
        loss = -torch.mean(p[idx, labelset[idx]])
        loss.backward()
        err.append(loss.cpu().detach().item())

    net.adam.step()

    return np.mean(err)
def trainingStep(net, NTRAIN, NTEST, data_x, data_y, BS=200):
    FEATURES = net.FEATURES
    CLASSES = net.CLASSES

    net.zero_grad()
    batch_mem = []
    batch_test = []
    batch_label = []
    class_count = []

    for i in range(BS):
        j = np.random.randint(len(data_x))
        feat = data_x[j].shape[1]
        classes = np.unique(data_y[j]).shape[0]

        xd = data_x[j].copy()

        # Data augmentation
        f_idx = np.random.permutation(feat)
        xd = xd[:, f_idx]

        c_idx = np.random.permutation(classes)
        yd = np.zeros((data_y[j].shape[0], classes))
        yd[np.arange(data_y[j].shape[0]),
           c_idx[data_y[j][np.arange(data_y[j].shape[0])]]] = 1

        idx = np.random.permutation(xd.shape[0])
        xd = xd[idx]
        yd = yd[idx]

        if classes < CLASSES:
            yd = np.pad(yd, ((0, 0), (0, CLASSES - classes)),
                        'constant',
                        constant_values=0)
        xd = normalizeAndProject(xd, NTRAIN, FEATURES)

        trainset = np.hstack([xd[0:NTRAIN], yd[0:NTRAIN]])
        testset = xd[NTRAIN:NTRAIN + NTEST]
        labelset = yd[NTRAIN:NTRAIN + NTEST]

        batch_mem.append(trainset)
        batch_test.append(testset)
        batch_label.append(labelset)
        class_count.append(classes)

    batch_mem = tovar(
        np.array(batch_mem).transpose(0, 2,
                                      1).reshape(BS, 1, FEATURES + CLASSES,
                                                 NTRAIN))
    batch_test = tovar(
        np.array(batch_test).transpose(0, 2,
                                       1).reshape(BS, 1, FEATURES, NTEST))
    batch_label = tovar(np.array(batch_label).transpose(0, 2, 1))
    class_count = torch.cuda.FloatTensor(np.array(class_count))

    net.zero_grad()
    p = net.forward(batch_mem, batch_test, class_count)
    loss = -torch.sum(p * batch_label, 1).mean()
    loss.backward()
    net.adam.step()
    err = loss.cpu().data.numpy()[0]

    return err