Example #1
0
 def __init__(self):
     super(SimplerCNN, self).__init__()
     self.dropout2d_input = nn.Dropout2d(rate=0.3)
     self.conv1 = nn.Conv2d(in_channels=3,
                            out_channels=15,
                            kernel_size=3,
                            stride=3,
                            padding=2)
     self.relu1 = nn.LeakyRelu()
     self.conv2 = nn.Conv2d(in_channels=15,
                            out_channels=30,
                            kernel_size=3,
                            stride=3,
                            padding=3)
     self.relu2 = nn.LeakyRelu()
     self.dropout2d_conv1 = nn.Dropout2d(rate=0.5)
     self.conv3 = nn.Conv2d(in_channels=30, out_channels=40, kernel_size=4)
     self.relu3 = nn.LeakyRelu()
     self.flatten = nn.Flatten()
     self.dropout2d_conv2 = nn.Dropout2d(rate=0.2)
     self.linear = nn.Linear(in_dimension=360, out_dimension=180)
     self.relu4 = nn.LeakyRelu()
     self.bn1 = nn.BatchNorm()
     self.dropout3 = nn.Dropout(rate=0.3)
     self.linear2 = nn.Linear(in_dimension=180, out_dimension=10)
     self.bn2 = nn.BatchNorm()
     self.softmax = nn.Softmax()
     self.set_forward()
Example #2
0
 def __init__(self):
     super(NN, self).__init__()
     self.linear1 = nn.Linear(in_dimension=3072, out_dimension=256)
     self.relu1 = nn.LeakyRelu()
     self.dropout1 = nn.Dropout(rate=0.3)
     self.linear2 = nn.Linear(in_dimension=256, out_dimension=10)
     self.softmax = nn.Softmax()
     self.set_forward()
Example #3
0
 def __init__(self):
     super(LinearNet, self).__init__([
         lrp_module.Reshape(28, 28, 1),
         lrp_module.Linear(28 * 28, 1296),
         lrp_module.ReLU(),
         lrp_module.Linear(1296, 1296),
         lrp_module.ReLU(),
         lrp_module.Linear(1296, 1296),
         lrp_module.ReLU(),
         lrp_module.Linear(1296, 10)
     ])
     self.outputLayers = [0, 3, 5, 7, 8]
    def __init__(self, in_features, hidden_features, num_layers, num_classes):
        super(MLPClassifier, self).__init__()

        net = []
        net.append(modules.Linear(in_features, hidden_features))
        net.append(modules.BatchRegularization(hidden_features))
        net.append(modules.CPReLU(hidden_features))
        for i in range(num_layers - 1):
            net.append(
                modules.LinearResidueBlock(hidden_features,
                                           residue_ratio=1 / (i + 2),
                                           batch_reg=True))
        net.append(modules.Linear(hidden_features, num_classes))
        self.net = nn.Sequential(*net)
def testNN2(runs=1,
            width=3,
            data="iris.txt",
            iters=20000,
            std=True,
            trainPct=0.666):
    if data == 'gauss':
        X, y = multimodalData(numModes=4, numPerMode=30)
        # X, y = sklearn.datasets.make_classification()
        XY = np.asarray(np.hstack([X, y.reshape((X.shape[0], 1))]))
    else:
        XY = data_io.read(data)
    nclass = len(set(XY[:, -1]))  # number of classes

    # y has nclass classes (0, ..., nclass-1)
    def unary(yi):
        return [(1 if i == yi else 0) for i in range(nclass)]

    # build a network
    u = width
    nn = modules.Sequential([
        modules.Linear(XY.shape[1] - 1, u),
        modules.Tanh(),
        modules.Linear(u, u),
        modules.Tanh(),
        modules.Linear(u, nclass),
        modules.SoftMax()
    ])
    results = {False: [], True: []}
    for run in range(runs):
        Xtrain, ytrain, Xtest, ytest = splitByClass(XY, trainPct)
        # Map into n softmax outputs
        Ytrain = np.array([unary(yi) for yi in ytrain])
        for rms in (False, True):
            # train the network.
            nn.clean()
            nn.train2(np.asarray(Xtrain),
                      np.asarray(Ytrain),
                      batchsize=1,
                      iters=iters,
                      lrate_decay_step=1000,
                      rms=rms,
                      momentum=(0.9 if rms else None))
            errors = predictionErrors(nn, Xtest, ytest)
            accuracy = 1.0 - (float(errors) / Xtest.shape[0])
            print 'RMS', rms, 'Prediction accuracy', accuracy
            results[rms].append(accuracy)
    print 'Results', results
    print 'Average accuracy', 'rms=False', sum(
        results[False]) / runs, 'rms=True', sum(results[True]) / runs,
Example #6
0
    def test_updateWeights(self):
        in_features = 3
        out_features = 2

        x = FloatTensor([[1, 2, 3], [3, 2, -1]])
        A = FloatTensor([[1, 2, 3], [4, 5, 6]])
        b = FloatTensor([[1, 2]])

        expected_output = FloatTensor([[15, 34], [5, 18]])

        m = M.Linear(in_features, out_features)
        m.weights = A
        m.bias = b
        m.forward(x)

        grad = FloatTensor(([1, 4], [-1, -1]))
        output = m.backward(grad)

        expected_gradient = grad.transpose(0, 1).mm(x)

        eta = 0.1
        m.updateWeights(eta)

        new_value = A - eta * expected_gradient
        if not areEqual(m.weights, new_value):
            return 1

        new_value = b - eta * FloatTensor(np.sum(grad.numpy(), axis=0))
        if not areEqual(m.bias, new_value):
            return 1

        return 0
Example #7
0
 def __init__(self):
     super(ConvNet, self).__init__([
         lrp_module.Conv2d(1, 6, 5),
         lrp_module.ReLU(),
         lrp_module.MaxPool2d(2, 2),
         lrp_module.Conv2d(6, 16, 5),
         lrp_module.ReLU(),
         lrp_module.MaxPool2d(2, 2),
         lrp_module.Reshape(4, 4, 16),
         lrp_module.Linear(4 * 4 * 16, 120),
         lrp_module.ReLU(),
         lrp_module.Linear(120, 100),
         lrp_module.ReLU(),
         lrp_module.Linear(100, 10)
     ])
     self.outputLayers = [0, 2, 3, 5, 6, 9, 11, 12]
        def conv_layer(x):
            """
            the derivative check in the gradient checker relates to the input of the function
            hence, the input should be z - since the backward step computes @loss / @z
            """

            conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=2)
            relu1 = nn.Relu()
            conv2 = nn.Conv2d(in_channels=2, out_channels=4, kernel_size=2)
            relu2 = nn.Relu()
            flatten = nn.Flatten()
            linear = nn.Linear(4, 2)
            softmax = nn.Softmax()

            # forward pass
            a = relu1(conv1(x))
            a = relu2(conv2(a))
            a_flatten = flatten(a)
            dist = softmax(linear(a_flatten))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            flatten_grad = flatten.backward(linear_grad)
            relu2_grad = relu2.backward(flatten_grad)
            conv2_grad = conv2.backward(relu2_grad)
            relu1_grad = relu1.backward(conv2_grad)
            conv1_grad = conv1.backward(relu1_grad)

            return loss, conv1_grad
        def conv(b):
            """
            the derivative check in the gradient checker relates to the input of the function
            hence, the input should be z - since the backward step computes @loss / @z
            """

            # simulate end of classification
            conv = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=2)
            relu = nn.Relu()
            flatten = nn.Flatten()
            linear = nn.Linear(in_dimension=12, out_dimension=4)
            softmax = nn.Softmax()

            conv.set_biases(b.reshape(3, 1))

            # forward
            a = flatten(relu(conv(x)))
            dist = softmax(linear(a))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            flatten_grad = flatten.backward(linear_grad)
            relu_grad = relu.backward(flatten_grad)
            conv_grad = conv.backward(relu_grad)

            b_grad = conv.b_grad

            return loss, b_grad
Example #10
0
    def test_resetGradient(self):
        in_features = 3
        out_features = 2

        x = FloatTensor([[1, 2, 3], [3, 2, -1]])
        A = FloatTensor([[1, 2, 3], [4, 5, 6]])
        b = FloatTensor([[1, 2]])

        expected_output = FloatTensor([[15, 34], [5, 18]])

        m = M.Linear(in_features, out_features)
        m.weights = A
        m.bias = b
        m.forward(x)

        grad = FloatTensor(([1, 4], [-1, -1]))
        output = m.backward(grad)

        m.resetGradient()

        if torch.max(torch.abs(m.weights_grad)) != 0 or \
           torch.max(torch.abs(m.bias_grad)) != 0:
            return 1

        return 0
Example #11
0
    def __init__(self,
                 num_phn=hp.num_phn,
                 encoder_dim=hp.encoder_dim,
                 encoder_n_layer=hp.encoder_n_layer,
                 kernel_size=hp.kernel_size,
                 stride=hp.stride,
                 padding=hp.padding,
                 decoder_dim=hp.decoder_dim,
                 decoder_n_layer=hp.decoder_n_layer,
                 dropout=hp.dropout):
        super(DurIAN, self).__init__()

        self.n_step = hp.n_frames_per_step
        self.embedding = nn.Embedding(num_phn, encoder_dim)

        self.conv_banks = nn.ModuleList([
            modules.BatchNormConv1d(encoder_dim,
                                    encoder_dim,
                                    kernel_size,
                                    stride,
                                    padding,
                                    activation=nn.ReLU(),
                                    w_init_gain="relu") for _ in range(3)
        ])
        self.encoder = nn.GRU(encoder_dim,
                              encoder_dim // 2,
                              encoder_n_layer,
                              batch_first=True,
                              bidirectional=True)

        self.length_regulator = modules.LengthRegulator()

        self.prenet = modules.Prenet(hp.num_mels, hp.prenet_dim, hp.prenet_dim)

        self.decoder = nn.GRU(decoder_dim,
                              decoder_dim,
                              decoder_n_layer,
                              batch_first=True,
                              bidirectional=False)

        self.mel_linear = modules.Linear(decoder_dim,
                                         hp.num_mels * self.n_step)
        self.postnet = modules.CBHG(hp.num_mels,
                                    K=8,
                                    projections=[256, hp.num_mels])
        self.last_linear = modules.Linear(hp.num_mels * 2, hp.num_mels)
Example #12
0
def dense(layer):
    m = layer.input_shape[-1]
    n = layer.output_shape[-1]
    module = modules.Linear(m, n)
    W, B = layer.get_weights()
    module.W = W
    module.B = B
    activation_module = get_activation_lrpmodule(layer.activation)
    return module, activation_module
Example #13
0
    def test_backwardBeforeForward(self):
        in_features = 1
        out_features = 10

        m = M.Linear(in_features, out_features)
        try:
            m.backward(FloatTensor([[1]]))
        except ValueError:
            return 0

        return 1
Example #14
0
    def test_forwardWrongInputDimension(self):
        in_features = 3
        out_features = 10

        m = M.Linear(in_features, out_features)
        try:
            m.forward(FloatTensor([[1, 2]]))
        except ValueError:
            return 0

        return 1
Example #15
0
    def test_backwardWrongInputDimension(self):
        in_features = 1
        out_features = 10

        m = M.Linear(in_features, out_features)
        m.forward(FloatTensor([[1]]))
        try:
            m.backward(FloatTensor([[1, 2], [3, 4]]),
                       FloatTensor([[1, 2, 3], [3, 4, 1]]))
        except ValueError:
            return 0

        return 1
Example #16
0
    def test_linear_module_1(self):
        x = np.array([[1, 2, 33, 15]])
        w = np.array([[0., 1., 0., 0.], [0., 2., 2., 0.]])
        b = np.zeros(2)
        expected_res = np.array([[2., 70.]])

        linear_layer = nn.Linear(4, 2)
        linear_layer.set_weights(w)
        linear_layer.set_biases(b)

        z = linear_layer(x)

        np.testing.assert_allclose(z, expected_res, atol=0.0001)
 def __init__(self,
              w_in,
              h_in,
              num_features,
              blocks,
              adain_features,
              adain_blocks,
              num_classes=-1,
              in_features=4):
     super(ClassifierOrDiscriminator,
           self).__init__(w_in,
                          h_in,
                          num_features,
                          blocks,
                          adain_features,
                          adain_blocks,
                          batch_reg=True,
                          in_features=in_features)
     self.num_classes = num_classes
     if num_classes > 0:
         self.cla = modules.Linear(self.out_features, num_classes)
     else:
         self.dis = modules.Linear(self.out_features, 1)
Example #18
0
    def test_linear_module_softmax_1(self):
        x = np.array([[1, 2, 0, -1], [1, 2, -1, -2]])
        w = np.array([[0., 1., 0., 0.], [0., 2., 2., 0.]])
        b = np.zeros(2)
        expected_res = np.array([[.119202, .88079], [.5, .5]])

        linear = nn.Linear(4, 2)
        softmax = nn.Softmax()
        linear.set_weights(w)
        linear.set_biases(b)

        z = linear(x)
        a = softmax(z)

        np.testing.assert_allclose(a, expected_res, atol=0.0001)
Example #19
0
    def test_linear_module_relu_2(self):
        x = np.array([[1, 2, 0, -1], [1, 2, -1, -2]])
        w = np.array([[0., 1., 0., 0.], [0., 2., 2., 0.]])
        b = np.array([-5., -1.])
        expected_res = np.array([[0., 3.], [0., 1.]])

        linear_layer = nn.Linear(4, 2)
        linear_layer.set_weights(w)
        linear_layer.set_biases(b)

        relu_layer = nn.Relu()
        z = linear_layer(x)
        a = relu_layer(z)

        np.testing.assert_allclose(a, expected_res, atol=0.0001)
Example #20
0
    def test_forwardCorrectOutput(self):
        in_features = 3
        out_features = 2

        x = FloatTensor([[1, 2, 3], [3, 2, -1]])
        A = FloatTensor([[1, 2, 3], [4, 5, 6]])
        b = FloatTensor([[1, 2]])

        expected_output = FloatTensor([[15, 34], [5, 18]])

        m = M.Linear(in_features, out_features)
        m.weights = A
        m.bias = b
        output = m.forward(x)
        if not areEqual(expected_output, output):
            return 1

        return 0
Example #21
0
        def flatten(x):

            flatten_ = nn.Flatten()
            linear = nn.Linear(in_dimension=48, out_dimension=4)
            softmax = nn.Softmax()

            # forward
            flatten_x = flatten_(x)
            dist = softmax(linear(flatten_x))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            flatten_grad = flatten_.backward(linear_grad)
            return loss, flatten_grad
Example #22
0
 def __init__(self):
     super(SimpleCNN, self).__init__()
     self.conv1 = nn.Conv2d(in_channels=3,
                            out_channels=6,
                            kernel_size=3,
                            stride=3,
                            padding=2)
     self.tanh1 = nn.Tanh()
     self.conv2 = nn.Conv2d(in_channels=6,
                            out_channels=10,
                            kernel_size=3,
                            stride=3,
                            padding=3)
     self.tanh2 = nn.Tanh()
     self.dropout2d = nn.Dropout2d(rate=0.5)
     self.flatten = nn.Flatten()
     self.linear = nn.Linear(in_dimension=360, out_dimension=10)
     self.softmax = nn.Softmax()
     self.set_forward()
Example #23
0
    def test_constructor(self):
        in_features = 4
        out_features = 10

        m = M.Linear(in_features, out_features)

        if m.weights.size() != m.weights_grad.size():
            return 1

        if m.bias.size() != m.bias_grad.size():
            return 1

        if m.weights.size() != (out_features, in_features):
            return 1

        if m.bias.size() != (1, out_features):
            return 1

        return 0
Example #24
0
        def linear_layer(z):
            """
            the derivative check in the gradient checker relates to the input of the function
            hence, the input should be z - since the backward step computes @loss / @z
            """

            # simulate end of classification
            relu_layer = nn.Relu()
            linear = nn.Linear(in_dimension=2, out_dimension=5)
            softmax = nn.Softmax()

            a_L_mins_1 = relu_layer(z)
            z_L = linear(a_L_mins_1)
            a_L = softmax(z_L)

            labels = np.zeros(a_L.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(a_L * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            layer_L_grad = linear.backward(softmax_grad)
            relu_grad = relu_layer.backward(layer_L_grad)

            return loss, relu_grad
Example #25
0
        def linear_layer(b):
            """
            the derivative check in the gradient checker relates to the input of the function
            hence, the input should be z - since the backward step computes @loss / @z
            """

            # simulate end of classification
            linear = nn.Linear(in_dimension=3, out_dimension=2)
            linear.set_biases(b)
            softmax = nn.Softmax()

            # forward
            dist = softmax(linear(z))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            b_grad = linear.b_grad

            return loss, b_grad
Example #26
0
import tools.data_loader
import tools.model_io
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(32**2, 2),
    modules.NegAbs(),
    modules.BinStep(),
    modules.Linear(2, 2),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
Example #27
0
import tools.data_loader
import tools.model_io
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(settings.nrOfPixels, 4),
    modules.Tanh(),
    modules.Linear(4, 4),
    modules.Tanh(),
    modules.Linear(4, 2),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)
def testNN(iters=10000, width=3, learning_rates=[0.005], std=True):
    # Test cases
    # X, y = xor()
    # X, y = xor_more()
    X, y = multimodalData(numModes=4)
    # X, y = sklearn.datasets.make_moons(noise=0.25)
    # X, y = sklearn.datasets.make_classification()
    # y has 2 classes (0, 1)

    # Map into 2 softmax outputs
    nclass = len(set(listify(y)))

    def unary(yi):
        return [(1 if i == yi else 0) for i in range(nclass)]

    Y = np.array([unary(yi) for yi in y])
    #build a network
    nd = X.shape[1]  # number of features
    u = width
    nn = modules.Sequential([
        modules.Linear(nd, u),
        modules.Tanh(),
        modules.Linear(u, u),
        modules.Tanh(),
        modules.Linear(u, nclass),
        modules.SoftMax()
    ])

    # train the network.
    errors = []
    for lrate in learning_rates:
        nn.clean()
        # Default does not do learning rate decay or early stopping.
        #print ('X,Y,',X,Y)

        nn.train2(np.asarray(X),
                  np.asarray(Y),
                  batchsize=1,
                  iters=iters,
                  lrate=lrate,
                  lrate_decay_step=100000)
        errors.append((lrate, predictionErrors(nn, X, y)))
    print 'Errors for learning rates', errors

    # Plot the last result
    if nd > 2: return  # only plot two-feature cases
    eps = .1
    xmin = np.min(X[:, 0]) - eps
    xmax = np.max(X[:, 0]) + eps
    ymin = np.min(X[:, 1]) - eps
    ymax = np.max(X[:, 1]) + eps
    ax = tidyPlot(xmin, xmax, ymin, ymax, xlabel='x', ylabel='y')

    def fizz(x1, x2):
        y = nn.forward(np.array([x1, x2]))
        return y[0, 1]  # class 1

    res = 30  # resolution of plot
    ima = np.array([[fizz(xi, yi) for xi in np.linspace(xmin, xmax, res)] \
                                for yi in np.linspace(ymin, ymax, res)])
    im = ax.imshow(np.flipud(ima),
                   interpolation='none',
                   extent=[xmin, xmax, ymin, ymax],
                   cmap='viridis' if std else 'jet')
    plt.colorbar(im)
    if std:
        colors = [('r' if l == 0 else 'g') for l in y]
        ax.scatter(X[:, 0],
                   X[:, 1],
                   c=colors,
                   marker='o',
                   s=80,
                   edgecolors='none')
    else:
        pinds = np.where(y == 0)
        ninds = np.where(y == 1)
        plt.plot(X[pinds[0], 0], X[pinds[0], 1], 'ob')
        plt.plot(X[ninds[0], 0], X[ninds[0], 1], 'or')
Example #29
0
import tools.data_loader
import tools.model_io
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(32**2, 1),
    modules.NegAbs(),
    modules.Tanh(),
    modules.Linear(1, 2),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)

# determine training name of neural net
Example #30
0
import tools.data_loader
import tools.model_io
import modules
import shutil
import settings

# user init
batchsize = 10
numbIters = 10000

# load data
X, Y = tools.data_loader.load_data()

# setup neural network
nn = modules.Sequential([
    modules.Linear(settings.nrOfPixels, 16),
    modules.Tanh(),
    modules.Linear(16, 16),
    modules.Tanh(),
    modules.Linear(16, 3),
    modules.SoftMax()
])

# train neural network
nn.train(X['train'],
         Y['train'],
         Xval=X['valid'],
         Yval=Y['valid'],
         batchsize=batchsize,
         iters=numbIters)