Esempio n. 1
0
 def test_forward_backward(self):
     l = SigmoidLayer()
     y = l.forward(np.array([5, 5]))
     self.assertEqual(y.shape, (2, ))
     assert_almost_equal(y, np.array([0.993307, 0.993307]), decimal=5)
     d = l.backward(np.array([2, 3]))
     self.assertEqual(d.shape, (2, ))
     assert_almost_equal(d, np.array([0.0132961, 0.0199442]), decimal=5)
     return
Esempio n. 2
0
    def __init__(self):
        super().__init__()
        # First layer: a fully connected layer with shape = 784 x 20
        self.l1 = DenseLayer(28 * 28, 20, w_std=0.01)
        # Activation of the first layer: Sigmoid
        self.sig1 = SigmoidLayer()

        # Second layer: a fully connected layer with shape = 20 x 1
        self.l2 = DenseLayer(20, 1, w_std=0.01)
        # Activation of the second layer: Sigmoid
        self.sig2 = SigmoidLayer()
Esempio n. 3
0
    def __init__(self,
                 input_dim=2,
                 n_coupling=4,
                 hidden_dim=10,
                 prior='uniform'):
        super(RealNVP2D, self).__init__()
        self.n_coupling = n_coupling
        self.input_dim = input_dim
        layers = []
        for i in range(n_coupling):
            mask = th.zeros(2)
            mask[i % 2] = 1.
            # layers.append(CouplingLayer(mask, input_dim=input_dim))
            layers.extend([
                CouplingLayer(mask, input_dim=input_dim,
                              hidden_dim=hidden_dim),
                ActNorm(input_dim)
            ])
        # layers.pop()
        if prior == 'gauss':

            # self.prior_logprob = lambda x: distributions.multivariate_normal.MultivariateNormal(
            #     th.zeros(x.shape[1]), th.eye(x.shape[1])).log_prob(x)
            self.prior = distributions.multivariate_normal.MultivariateNormal(
                th.zeros(input_dim), th.eye(input_dim))
        else:
            # assume uniform prior
            layers.append(SigmoidLayer())
            self.prior = distributions.uniform.Uniform(th.zeros(input_dim),
                                                       th.ones(input_dim))
        self.fw_net = nn.Sequential(*layers)
        self.bw_net = nn.Sequential(*layers[::-1])
Esempio n. 4
0
 def __init__(self, num_neurons: List[int]):
     self.layers = []
     for index, neuron in enumerate(num_neurons[:-1]):
         print(neuron, num_neurons[index + 1])
         layer = LinearLayer((neuron, num_neurons[index + 1]))
         self.layers.append(layer)
         layer = SigmoidLayer()
         self.layers.append(layer)
     self.loss_layer = None
Esempio n. 5
0
    def test_SigmoidLayer(self):
        l1 = SigmoidLayer()
        n = Sequential([l1])
        y = n.forward(np.array([0]))
        self.assertEqual(y.shape, (1, ))
        assert_array_equal(y, np.array([0.5]))

        d = n.backward(np.array([1]))
        self.assertEqual(d.shape, (1, ))
        assert_array_equal(d, np.array([0.25]))
Esempio n. 6
0
 def test_numeric_gradient(self):
     l = SigmoidLayer()
     x = np.random.rand(2)
     gradient = l.numeric_gradient(x)
     l.forward(x)
     delta = l.backward([1, 1])
     assert_almost_equal(np.diag(gradient), delta)
Esempio n. 7
0
        def view_rec_test(x_curr, prev_s_tensor, prev_in_gate_tensor):
            count = 0
            params = get_trainable_params()
            for p in params:
                count += 1
            print('view rec test : num of params %d' % count)

            rect8_ = InputLayer(view_features_shape, x_curr)
            prev_s_ = InputLayer(s_shape, prev_s_tensor)

            t_x_s_update_ = FCConv3DLayer(
                prev_s_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_s_update.params,
                isTrainable=True)

            t_x_s_reset_ = FCConv3DLayer(
                prev_s_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_s_reset.params,
                isTrainable=True)

            update_gate_ = SigmoidLayer(t_x_s_update_)
            comp_update_gate_ = ComplementLayer(update_gate_)
            reset_gate_ = SigmoidLayer(t_x_s_reset_)

            rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)
            t_x_rs_ = FCConv3DLayer(
                rs_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_rs.params,
                isTrainable=True)

            tanh_t_x_rs_ = TanhLayer(t_x_rs_)

            gru_out_ = AddLayer(
                EltwiseMultiplyLayer(update_gate_, prev_s_),
                EltwiseMultiplyLayer(comp_update_gate_, tanh_t_x_rs_))

            return gru_out_.output, update_gate_.output
Esempio n. 8
0
    def build_net(self, n_in, n_out):
        layers = [n_in]
        layers.extend(self.hidden_layers)
        layers.append(n_out)
        self.weights_info = [(layers[i], layers[i + 1])
                             for i in range(len(layers) - 1)]

        self.layers = list()
        for w_info in self.weights_info:
            n_in = w_info[0]
            n_out = w_info[1]
            self.layers.append(
                SigmoidLayer(n_in=n_in, n_out=n_out, random_state=self.rnd))
Esempio n. 9
0
    def activationLayerFromType(self, activation_type, index=None):
        """Return activation layer from ActivationType.

        Returns an instance of
        - ReLULayer
        - SigmoidLayer
        """
        if activation_type == ActivationType.ReLU:
            return ReLULayer(index=index)
        elif activation_type == ActivationType.Sigmoid:
            return SigmoidLayer(index=index)
        else:
            print('[WARNING] activation_type {} is not recognized.'.format(
                activation_type))
def main():
    mnist_path = os.path.join(os.getcwd(), "MNIST")
    (train_images, train_labels), (test_images,
                                   test_labels) = load_data(mnist_path)

    layers = [
        LinearLayer(32, 28**2, xavier),
        SigmoidLayer(),
        LinearLayer(32, 32, xavier),
        SigmoidLayer(),
        LinearLayer(10, 32, xavier),
        SigmoidLayer()
    ]
    net = NeuralNet(layers)

    np.seterr(over='ignore')
    train(net,
          train_images,
          train_labels,
          flatten_mnist_input,
          mnist_label_as_one_hot,
          epoch_count=1000,
          batch_size=1)

    confusion_matrix = DataFrame(np.zeros((10, 10)),
                                 index=range(10),
                                 columns=range(10))
    evaluator = test(net,
                     test_images,
                     test_labels,
                     confusion_matrix,
                     flatten_mnist_input,
                     highest_output_neuron,
                     mnist_label_as_one_hot,
                     title="POST-TRAIN")
    evaluator.plot()
if __name__ == '__main__':
    from mnist import MNIST

    # Load MNIST dataset
    mndata = MNIST('./mnist')
    train_img, train_label = mndata.load_training()
    train_img = np.array(train_img, dtype=float)/255.0
    train_label = np.array(train_label, dtype=float)

    # Input vector (Layer 0)
    n_output_0 = len(train_img[0])

    # Middle layer (Layer 1)
    n_output_1 = 200
    layer1 = SigmoidLayer(n_output_1, n_output_0)

    # Output layer (Layer 2)
    n_output_2 = 10
    layer2 = SigmoidLayer(n_output_2, n_output_1)

    # FP, BP and learning
    epsilon = 0.15
    n_training_data = 1000
    se_history = []
    y1_history = []
    y2_history = []
    W1_history = []
    W2_history = []
    cpr_history = []
    for loop in range(100):
Esempio n. 12
0
# ## 1.1 MLP with Euclidean Loss and Sigmoid Activation Function
# Build and train a MLP contraining one hidden layer with 128 units using Sigmoid activation function and Euclidean loss function.
#
# ### TODO
# Before executing the following code, you should complete **layers/fc_layer.py** and **layers/sigmoid_layer.py**.

# In[7]:

from layers import FCLayer, SigmoidLayer

sigmoidMLP = Network()
# Build MLP with FCLayer and SigmoidLayer
# 128 is the number of hidden units, you can change by your own
sigmoidMLP.add(FCLayer(784, 128))
sigmoidMLP.add(SigmoidLayer())
sigmoidMLP.add(FCLayer(128, 10))

# In[15]:

sigmoidMLP, sigmoid_loss, sigmoid_acc = train(sigmoidMLP, criterion, sgd,
                                              data_train, max_epoch,
                                              batch_size, disp_freq)

# In[16]:

test(sigmoidMLP, criterion, data_test, batch_size, disp_freq)

# ## 1.2 MLP with Euclidean Loss and ReLU Activation Function
# Build and train a MLP contraining one hidden layer with 128 units using ReLU activation function and Euclidean loss function.
#
Esempio n. 13
0
    def __init__(self, numpy_rng = numpy.random.RandomState(2**30), theano_rng=None, n_ins=601,
                 n_outs=259, l1_reg = None, l2_reg = None, 
                 hidden_layers_sizes= [256, 256, 256, 256, 256], 
                 hidden_activation='tanh', output_activation='sigmoid'):
        
        print "DNN Initialisation"
        #logger = logging.getLogger("DNN initialization")

        self.sigmoid_layers = []
        self.params = []
        self.delta_params   = []
        self.n_layers = len(hidden_layers_sizes)
        
        self.n_ins = n_ins
        self.n_outs = n_outs
        #self.speaker_ID = []
        
        self.output_activation = output_activation

        self.l1_reg = l1_reg
        self.l2_reg = l2_reg       
        #vctk_class = Code_01.VCTK_feat_collection()
        
        assert self.n_layers > 0
        
        if not theano_rng:
            theano_rng = RandomStreams(numpy.random.randint(2 ** 30))

        # allocate symbolic variables for the data
        self.x = T.matrix('x') 
        self.y = T.matrix('y') 
        
        
        for i in xrange(self.n_layers):
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layers_sizes[i - 1]

            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].output

            sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        activation=T.tanh)  ##T.nnet.sigmoid)  # 
           
           
            self.sigmoid_layers.append(sigmoid_layer)
            self.params.extend(sigmoid_layer.params) 
            self.delta_params.extend(sigmoid_layer.delta_params)
         
     
        # add final layer
        if self.output_activation == 'linear':
            self.final_layer = LinearLayer(rng = numpy_rng,
                                           input=self.sigmoid_layers[-1].output,
                                           n_in=hidden_layers_sizes[-1],
                                           n_out=n_outs)
            
        elif self.output_activation == 'sigmoid':
            self.final_layer = SigmoidLayer(
                 rng = numpy_rng,
                 input=self.sigmoid_layers[-1].output,
                 n_in=hidden_layers_sizes[-1],
                 n_out=n_outs, activation=T.nnet.sigmoid)
        else:
            print ("This output activation function: %s is not supported right now!" %(self.output_activation))
            sys.exit(1)

        self.params.extend(self.final_layer.params)
        self.delta_params.extend(self.final_layer.delta_params)
    
        ### MSE
        self.finetune_cost = T.mean(T.sum( (self.final_layer.output-self.y)*(self.final_layer.output-self.y), axis=1 ))
        
        self.errors = T.mean(T.sum( (self.final_layer.output-self.y)*(self.final_layer.output-self.y), axis=1 ))
        
        ### L1-norm
        if self.l1_reg is not None:
            for i in xrange(self.n_layers):
                W = self.params[i * 2]
                self.finetune_cost += self.l1_reg * (abs(W).sum())

        ### L2-norm
        if self.l2_reg is not None:
            for i in xrange(self.n_layers):
                W = self.params[i * 2]
                self.finetune_cost += self.l2_reg * T.sqr(W).sum()  
Esempio n. 14
0
import numpy as np

from layers import SigmoidLayer, LogitLayer
from nn import Network

train_images = np.load('train_images.npy')
train_labels = np.load('train_labels.npy')
test_images = np.load('test_images.npy')
test_labels = np.load('test_labels.npy')

net = Network([SigmoidLayer(28 * 28, 200), LogitLayer(200, 10)])

net.train(train_images, train_labels, test_images, test_labels, 1000, 0.1, 100)
Esempio n. 15
0
 def build(self):
     l1 = DenseLayer(1, 4)
     sig1 = SigmoidLayer()
     l2 = DenseLayer(4, 1)
     self._layers = [l1, sig1, l2]
    def __init__(self,
                 numpy_rng=numpy.random.RandomState(2**30),
                 theano_rng=None,
                 n_ins=601,
                 n_outs=259,
                 l1_reg=None,
                 l2_reg=None,
                 hidden_layers_sizes=[512, 512, 512, 512, 512, 512, 512],
                 n_speakers_accent=2,
                 hidden_activation='tanh',
                 output_activation='linear'):

        print "DNN MULTI-SPEAKER INITIALISATION"

        self.sigmoid_layers = []
        self.params = []
        self.delta_params = []
        self.n_layers = len(hidden_layers_sizes)

        self.n_ins = n_ins
        self.n_outs = n_outs

        self.output_activation = output_activation

        self.l1_reg = l1_reg
        self.l2_reg = l2_reg

        self.final_layer_accent = []
        self.error_cost = []

        #finetune_cost = []
        #self.finetune_costs_accent = []

        self.errors_accent = []

        assert self.n_layers > 0

        if not theano_rng:
            theano_rng = RandomStreams(numpy.random.randint(2**30))

        # allocate symbolic variables for the data
        self.x = T.matrix('x')
        self.y = T.matrix('y')

        for i in xrange(self.n_layers):

            if i == 0:

                input_size = n_ins
            else:

                input_size = hidden_layers_sizes[i - 1]

            if i == 0:

                layer_input = self.x
            else:

                layer_input = self.sigmoid_layers[-1].output

            sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        activation=T.tanh)

            self.sigmoid_layers.append(sigmoid_layer)
            self.params.extend(sigmoid_layer.params)
            self.delta_params.extend(sigmoid_layer.delta_params)

        ####Final Layer for speaker

        if self.output_activation == 'linear':
            self.final_layer_accent = LinearLayer(
                rng=numpy_rng,
                input=self.sigmoid_layers[-1].output,
                n_in=hidden_layers_sizes[-1],
                n_out=n_outs)

        elif self.output_activation == 'sigmoid':
            self.final_layer_accent = SigmoidLayer(
                rng=numpy_rng,
                input=self.sigmoid_layers[-1].output,
                n_in=hidden_layers_sizes[-1],
                n_out=n_outs,
                activation=T.nnet.sigmoid)
        else:
            print(
                "This output activation function: %s is not supported right now!"
                % (self.output_activation))
            sys.exit(1)

        self.params.extend(self.final_layer_accent.params)
        self.delta_params.extend(self.final_layer_accent.delta_params)

        ##MSE FOR EACH SPEAKER
        self.error_cost = T.mean(
            T.sum((self.final_layer_accent.output - self.y) *
                  (self.final_layer_accent.output - self.y),
                  axis=1))

        ###L1-norm
        if self.l1_reg is not None:
            for i in xrange(self.n_layers):
                W = self.params[i * 2]
                self.error_cost += self.l1_reg * (abs(W).sum())

        ###L2-norm
        if self.l2_reg is not None:
            for i in xrange(self.n_layers):
                W = self.params[i * 2]
                self.error_cost += self.l2_reg * T.sqr(W).sum()
Esempio n. 17
0
if __name__ == '__main__':
    from mnist import MNIST

    # Load MNIST dataset
    mndata = MNIST('./mnist')
    train_img, train_label = mndata.load_training()
    train_img = np.array(train_img, dtype=float) / 255.0
    train_label = np.array(train_label, dtype=float)

    # Input vector (Layer 0)
    n_output_0 = len(train_img[0])

    # Middle layer (Layer 1)
    n_output_1 = 200
    layer1 = SigmoidLayer(n_output_1, n_output_0)

    # Output layer (Layer 2)
    n_output_2 = 10
    layer2 = SigmoidLayer(n_output_2, n_output_1)

    # FP, BP and learning
    epsilon = 0.15
    n_training_data = 1000
    se_history = []
    y1_history = []
    y2_history = []
    W1_history = []
    W2_history = []
    cpr_history = []
    for loop in range(100):
Esempio n. 18
0
        def recurrence(x_curr, prev_s_tensor, prev_in_gate_tensor):
            # Scan function cannot use compiled function.
            input_ = InputLayer(input_shape, x_curr)
            conv1a_ = ConvLayer(input_, (n_convfilter[0], 7, 7),
                                params=conv1a.params)
            rect1a_ = LeakyReLU(conv1a_)
            conv1b_ = ConvLayer(rect1a_, (n_convfilter[0], 3, 3),
                                params=conv1b.params)
            rect1_ = LeakyReLU(conv1b_)
            pool1_ = PoolLayer(rect1_)

            conv2a_ = ConvLayer(pool1_, (n_convfilter[1], 3, 3),
                                params=conv2a.params)
            rect2a_ = LeakyReLU(conv2a_)
            conv2b_ = ConvLayer(rect2a_, (n_convfilter[1], 3, 3),
                                params=conv2b.params)
            rect2_ = LeakyReLU(conv2b_)
            conv2c_ = ConvLayer(pool1_, (n_convfilter[1], 1, 1),
                                params=conv2c.params)
            res2_ = AddLayer(conv2c_, rect2_)
            pool2_ = PoolLayer(res2_)

            conv3a_ = ConvLayer(pool2_, (n_convfilter[2], 3, 3),
                                params=conv3a.params)
            rect3a_ = LeakyReLU(conv3a_)
            conv3b_ = ConvLayer(rect3a_, (n_convfilter[2], 3, 3),
                                params=conv3b.params)
            rect3_ = LeakyReLU(conv3b_)
            conv3c_ = ConvLayer(pool2_, (n_convfilter[2], 1, 1),
                                params=conv3c.params)
            res3_ = AddLayer(conv3c_, rect3_)
            pool3_ = PoolLayer(res3_)

            conv4a_ = ConvLayer(pool3_, (n_convfilter[3], 3, 3),
                                params=conv4a.params)
            rect4a_ = LeakyReLU(conv4a_)
            conv4b_ = ConvLayer(rect4a_, (n_convfilter[3], 3, 3),
                                params=conv4b.params)
            rect4_ = LeakyReLU(conv4b_)
            pool4_ = PoolLayer(rect4_)

            conv5a_ = ConvLayer(pool4_, (n_convfilter[4], 3, 3),
                                params=conv5a.params)
            rect5a_ = LeakyReLU(conv5a_)
            conv5b_ = ConvLayer(rect5a_, (n_convfilter[4], 3, 3),
                                params=conv5b.params)
            rect5_ = LeakyReLU(conv5b_)
            conv5c_ = ConvLayer(pool4_, (n_convfilter[4], 1, 1),
                                params=conv5c.params)
            res5_ = AddLayer(conv5c_, rect5_)
            pool5_ = PoolLayer(res5_)

            conv6a_ = ConvLayer(pool5_, (n_convfilter[5], 3, 3),
                                params=conv6a.params)
            rect6a_ = LeakyReLU(conv6a_)
            conv6b_ = ConvLayer(rect6a_, (n_convfilter[5], 3, 3),
                                params=conv6b.params)
            rect6_ = LeakyReLU(conv6b_)
            res6_ = AddLayer(pool5_, rect6_)
            pool6_ = PoolLayer(res6_)

            flat6_ = FlattenLayer(pool6_)
            fc7_ = TensorProductLayer(flat6_,
                                      n_fc_filters[0],
                                      params=fc7.params)
            rect7_ = LeakyReLU(fc7_)

            prev_s_ = InputLayer(s_shape_1d, prev_s_tensor)
            #print(self.prev_s_._output_shape)

            t_x_s_update_ = FCConv1DLayer(prev_s_,
                                          rect7_,
                                          n_fc_filters[0],
                                          params=self.t_x_s_update.params,
                                          isTrainable=True)

            t_x_s_reset_ = FCConv1DLayer(prev_s_,
                                         rect7_,
                                         n_fc_filters[0],
                                         params=self.t_x_s_reset.params,
                                         isTrainable=True)

            update_gate_ = SigmoidLayer(t_x_s_update_)
            comp_update_gate_ = ComplementLayer(update_gate_)
            reset_gate_ = SigmoidLayer(t_x_s_reset_)

            rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)
            t_x_rs_ = FCConv1DLayer(rs_,
                                    rect7_,
                                    n_fc_filters[0],
                                    params=self.t_x_rs.params,
                                    isTrainable=True)

            tanh_t_x_rs_ = TanhLayer(t_x_rs_)

            gru_out_ = AddLayer(
                EltwiseMultiplyLayer(update_gate_, prev_s_),
                EltwiseMultiplyLayer(comp_update_gate_, tanh_t_x_rs_))

            return gru_out_.output, update_gate_.output
Esempio n. 19
0
    def __init__(self, x, input_shape):
        n_convfilter = [16, 32, 64, 64, 64, 64]
        n_fc_filters = [1024]
        n_deconvfilter = [64, 64, 64, 16, 8, 2]

        self.x = x
        # To define weights, define the network structure first
        x_ = InputLayer(input_shape)
        conv1a = ConvLayer(x_, (n_convfilter[0], 7, 7))
        conv1b = ConvLayer(conv1a, (n_convfilter[0], 3, 3))
        pool1 = PoolLayer(conv1b)

        print(
            'Conv1a = ConvLayer(x, (%s, 7, 7) => input_shape %s,  output_shape %s)'
            % (n_convfilter[0], conv1a._input_shape, conv1a._output_shape))
        print(
            'Conv1b = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[0], conv1b._input_shape, conv1b._output_shape))
        print('pool1 => input_shape %s,  output_shape %s)' %
              (pool1._input_shape, pool1._output_shape))

        conv2a = ConvLayer(pool1, (n_convfilter[1], 3, 3))
        conv2b = ConvLayer(conv2a, (n_convfilter[1], 3, 3))
        conv2c = ConvLayer(pool1, (n_convfilter[1], 1, 1))
        pool2 = PoolLayer(conv2c)

        print(
            'Conv2a = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[1], conv2a._input_shape, conv2a._output_shape))
        print(
            'Conv2b = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[1], conv2b._input_shape, conv2b._output_shape))
        conv3a = ConvLayer(pool2, (n_convfilter[2], 3, 3))
        conv3b = ConvLayer(conv3a, (n_convfilter[2], 3, 3))
        conv3c = ConvLayer(pool2, (n_convfilter[2], 1, 1))
        pool3 = PoolLayer(conv3b)

        print(
            'Conv3a = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[2], conv3a._input_shape, conv3a._output_shape))
        print(
            'Conv3b = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[2], conv3b._input_shape, conv3b._output_shape))
        print(
            'Conv3c = ConvLayer(x, (%s, 1, 1) => input_shape %s,  output_shape %s)'
            % (n_convfilter[1], conv3c._input_shape, conv3c._output_shape))
        print('pool3 => input_shape %s,  output_shape %s)' %
              (pool3._input_shape, pool3._output_shape))

        conv4a = ConvLayer(pool3, (n_convfilter[3], 3, 3))
        conv4b = ConvLayer(conv4a, (n_convfilter[3], 3, 3))
        pool4 = PoolLayer(conv4b)

        conv5a = ConvLayer(pool4, (n_convfilter[4], 3, 3))
        conv5b = ConvLayer(conv5a, (n_convfilter[4], 3, 3))
        conv5c = ConvLayer(pool4, (n_convfilter[4], 1, 1))
        pool5 = PoolLayer(conv5b)

        conv6a = ConvLayer(pool5, (n_convfilter[5], 3, 3))
        conv6b = ConvLayer(conv6a, (n_convfilter[5], 3, 3))
        pool6 = PoolLayer(conv6b)

        print(
            'Conv6a = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[5], conv6a._input_shape, conv6a._output_shape))
        print(
            'Conv6b = ConvLayer(x, (%s, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_convfilter[5], conv6b._input_shape, conv6b._output_shape))
        print('pool6 => input_shape %s,  output_shape %s)' %
              (pool6._input_shape, pool6._output_shape))

        flat6 = FlattenLayer(pool6)
        print('flat6 => input_shape %s,  output_shape %s)' %
              (flat6._input_shape, flat6._output_shape))

        fc7 = TensorProductLayer(flat6, n_fc_filters[0])
        print('fc7 => input_shape %s,  output_shape %s)' %
              (fc7._input_shape, fc7._output_shape))

        # Set the size to be 64x4x4x4
        #s_shape_1d = (cfg.batch, n_deconvfilter[0])
        s_shape_1d = (cfg.batch, n_fc_filters[0])
        self.prev_s = InputLayer(s_shape_1d)
        #view_features_shape = (cfg.batch, n_fc_filters[0], cfg.CONST.N_VIEWS)

        self.t_x_s_update = FCConv1DLayer(self.prev_s,
                                          fc7,
                                          n_fc_filters[0],
                                          isTrainable=True)

        self.t_x_s_reset = FCConv1DLayer(self.prev_s,
                                         fc7,
                                         n_fc_filters[0],
                                         isTrainable=True)

        self.reset_gate = SigmoidLayer(self.t_x_s_reset)

        self.rs = EltwiseMultiplyLayer(self.reset_gate, prev_s)
        self.t_x_rs = FCConv1DLayer(self.rs,
                                    fc7,
                                    n_fc_filters[0],
                                    isTrainable=True)

        def recurrence(x_curr, prev_s_tensor, prev_in_gate_tensor):
            # Scan function cannot use compiled function.
            input_ = InputLayer(input_shape, x_curr)
            conv1a_ = ConvLayer(input_, (n_convfilter[0], 7, 7),
                                params=conv1a.params)
            rect1a_ = LeakyReLU(conv1a_)
            conv1b_ = ConvLayer(rect1a_, (n_convfilter[0], 3, 3),
                                params=conv1b.params)
            rect1_ = LeakyReLU(conv1b_)
            pool1_ = PoolLayer(rect1_)

            conv2a_ = ConvLayer(pool1_, (n_convfilter[1], 3, 3),
                                params=conv2a.params)
            rect2a_ = LeakyReLU(conv2a_)
            conv2b_ = ConvLayer(rect2a_, (n_convfilter[1], 3, 3),
                                params=conv2b.params)
            rect2_ = LeakyReLU(conv2b_)
            conv2c_ = ConvLayer(pool1_, (n_convfilter[1], 1, 1),
                                params=conv2c.params)
            res2_ = AddLayer(conv2c_, rect2_)
            pool2_ = PoolLayer(res2_)

            conv3a_ = ConvLayer(pool2_, (n_convfilter[2], 3, 3),
                                params=conv3a.params)
            rect3a_ = LeakyReLU(conv3a_)
            conv3b_ = ConvLayer(rect3a_, (n_convfilter[2], 3, 3),
                                params=conv3b.params)
            rect3_ = LeakyReLU(conv3b_)
            conv3c_ = ConvLayer(pool2_, (n_convfilter[2], 1, 1),
                                params=conv3c.params)
            res3_ = AddLayer(conv3c_, rect3_)
            pool3_ = PoolLayer(res3_)

            conv4a_ = ConvLayer(pool3_, (n_convfilter[3], 3, 3),
                                params=conv4a.params)
            rect4a_ = LeakyReLU(conv4a_)
            conv4b_ = ConvLayer(rect4a_, (n_convfilter[3], 3, 3),
                                params=conv4b.params)
            rect4_ = LeakyReLU(conv4b_)
            pool4_ = PoolLayer(rect4_)

            conv5a_ = ConvLayer(pool4_, (n_convfilter[4], 3, 3),
                                params=conv5a.params)
            rect5a_ = LeakyReLU(conv5a_)
            conv5b_ = ConvLayer(rect5a_, (n_convfilter[4], 3, 3),
                                params=conv5b.params)
            rect5_ = LeakyReLU(conv5b_)
            conv5c_ = ConvLayer(pool4_, (n_convfilter[4], 1, 1),
                                params=conv5c.params)
            res5_ = AddLayer(conv5c_, rect5_)
            pool5_ = PoolLayer(res5_)

            conv6a_ = ConvLayer(pool5_, (n_convfilter[5], 3, 3),
                                params=conv6a.params)
            rect6a_ = LeakyReLU(conv6a_)
            conv6b_ = ConvLayer(rect6a_, (n_convfilter[5], 3, 3),
                                params=conv6b.params)
            rect6_ = LeakyReLU(conv6b_)
            res6_ = AddLayer(pool5_, rect6_)
            pool6_ = PoolLayer(res6_)

            flat6_ = FlattenLayer(pool6_)
            fc7_ = TensorProductLayer(flat6_,
                                      n_fc_filters[0],
                                      params=fc7.params)
            rect7_ = LeakyReLU(fc7_)

            prev_s_ = InputLayer(s_shape_1d, prev_s_tensor)
            #print(self.prev_s_._output_shape)

            t_x_s_update_ = FCConv1DLayer(prev_s_,
                                          rect7_,
                                          n_fc_filters[0],
                                          params=self.t_x_s_update.params,
                                          isTrainable=True)

            t_x_s_reset_ = FCConv1DLayer(prev_s_,
                                         rect7_,
                                         n_fc_filters[0],
                                         params=self.t_x_s_reset.params,
                                         isTrainable=True)

            update_gate_ = SigmoidLayer(t_x_s_update_)
            comp_update_gate_ = ComplementLayer(update_gate_)
            reset_gate_ = SigmoidLayer(t_x_s_reset_)

            rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)
            t_x_rs_ = FCConv1DLayer(rs_,
                                    rect7_,
                                    n_fc_filters[0],
                                    params=self.t_x_rs.params,
                                    isTrainable=True)

            tanh_t_x_rs_ = TanhLayer(t_x_rs_)

            gru_out_ = AddLayer(
                EltwiseMultiplyLayer(update_gate_, prev_s_),
                EltwiseMultiplyLayer(comp_update_gate_, tanh_t_x_rs_))

            return gru_out_.output, update_gate_.output

        time_features, _ = theano.scan(
            recurrence,
            sequences=[
                self.x
            ],  # along with images, feed in the index of the current frame
            outputs_info=[
                tensor.zeros_like(np.zeros(s_shape_1d),
                                  dtype=theano.config.floatX),
                tensor.zeros_like(np.zeros(s_shape_1d),
                                  dtype=theano.config.floatX)
            ])
        time_all = time_features[0]
        time_last = time_all[-1]

        self.features = time_last
Esempio n. 20
0
    def network_definition(self):

        # (multi_views, time, self.batch_size, 3, self.img_h, self.img_w),
        self.x = tensor6()
        self.is_x_tensor4 = False

        img_w = self.img_w
        img_h = self.img_h
        n_gru_vox = 4
        # n_vox = self.n_vox

        n_convfilter = [16, 32, 64, 64, 64, 64]
        n_fc_filters = [1024]
        n_deconvfilter = [64, 64, 64, 16, 8, 2]

        # Set the size to be 64x4x4x4
        s_shape = (self.batch_size, n_gru_vox, n_deconvfilter[0], n_gru_vox,
                   n_gru_vox)
        # Dummy 3D grid hidden representations
        prev_s = InputLayer(s_shape)

        input_shape = (self.batch_size, 3, img_w, img_h)

        s_shape_1d = (
            cfg.batch,
            n_fc_filters[0],
        )

        lstm1d_all = []

        def get_viewfeats(x_curr):
            lstm1d_all.append(LSTM1D(x_curr, input_shape))
            params_temp = get_trainable_params()
            self.params_lst.append(len(params_temp))
            '''
            count = 0
            for p in params:
                count += 1
            self.param_count
            print('num of params %d' %count)
            '''
            return lstm1d_all[-1].feat()

        view_features_shape = (self.batch_size, n_fc_filters[0])

        view_features, _ = theano.scan(get_viewfeats, sequences=[self.x])
        self.view_features = view_features

        fc7 = InputLayer(view_features_shape)
        t_x_s_update = FCConv3DLayer(
            prev_s,
            fc7, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
            isTrainable=True)
        t_x_s_reset = FCConv3DLayer(
            prev_s,
            fc7, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
            isTrainable=True)

        rll = time_features[0]
        time_last = time_all[-1]

        reset_gate = SigmoidLayer(t_x_s_reset)

        rs = EltwiseMultiplyLayer(reset_gate, prev_s)
        t_x_rs = FCConv3DLayer(rs,
                               fc7,
                               (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                               isTrainable=True)

        def view_rec_test(x_curr, prev_s_tensor, prev_in_gate_tensor):
            count = 0
            params = get_trainable_params()
            for p in params:
                count += 1
            print('view rec test : num of params %d' % count)

            rect8_ = InputLayer(view_features_shape, x_curr)
            prev_s_ = InputLayer(s_shape, prev_s_tensor)

            t_x_s_update_ = FCConv3DLayer(
                prev_s_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_s_update.params,
                isTrainable=True)

            t_x_s_reset_ = FCConv3DLayer(
                prev_s_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_s_reset.params,
                isTrainable=True)

            update_gate_ = SigmoidLayer(t_x_s_update_)
            comp_update_gate_ = ComplementLayer(update_gate_)
            reset_gate_ = SigmoidLayer(t_x_s_reset_)

            rs_ = EltwiseMultiplyLayer(reset_gate_, prev_s_)
            t_x_rs_ = FCConv3DLayer(
                rs_,
                rect8_, (n_deconvfilter[0], n_deconvfilter[0], 3, 3, 3),
                params=t_x_rs.params,
                isTrainable=True)

            tanh_t_x_rs_ = TanhLayer(t_x_rs_)

            gru_out_ = AddLayer(
                EltwiseMultiplyLayer(update_gate_, prev_s_),
                EltwiseMultiplyLayer(comp_update_gate_, tanh_t_x_rs_))

            return gru_out_.output, update_gate_.output

        s_update, _ = theano.scan(
            view_rec_test,
            sequences=[
                view_features
            ],  # along with images, feed in the index of the current frame
            outputs_info=[
                tensor.zeros_like(np.zeros(s_shape),
                                  dtype=theano.config.floatX),
                tensor.zeros_like(np.zeros(s_shape),
                                  dtype=theano.config.floatX)
            ])

        update_all = s_update[-1]
        s_all = s_update[0]
        s_last = s_all[-1]

        #s_last = np.random.rand(self.batch_size, n_gru_vox, n_deconvfilter[0], n_gru_vox, n_gru_vox)
        self.gru_s = InputLayer(s_shape, s_last)

        unpool7 = Unpool3DLayer(self.gru_s)
        self.conv7a = Conv3DLayer(unpool7, (n_deconvfilter[1], 3, 3, 3))
        self.rect7a = LeakyReLU(self.conv7a)
        self.conv7b = Conv3DLayer(self.rect7a, (n_deconvfilter[1], 3, 3, 3))
        self.rect7 = LeakyReLU(self.conv7b)
        self.res7 = AddLayer(unpool7, self.rect7)

        print('unpool7 => input_shape %s,  output_shape %s)' %
              (unpool7._input_shape, unpool7._output_shape))

        unpool8 = Unpool3DLayer(self.res7)
        conv8a = Conv3DLayer(unpool8, (n_deconvfilter[2], 3, 3, 3))
        rect8a = LeakyReLU(conv8a)
        self.conv8b = Conv3DLayer(rect8a, (n_deconvfilter[2], 3, 3, 3))
        self.rect8 = LeakyReLU(self.conv8b)
        self.res8 = AddLayer(unpool8, self.rect8)

        print('unpool8 => input_shape %s,  output_shape %s)' %
              (unpool8._input_shape, unpool8._output_shape))

        unpool12 = Unpool3DLayer(self.res8)
        conv12a = Conv3DLayer(unpool12, (n_deconvfilter[2], 3, 3, 3))
        rect12a = LeakyReLU(conv12a)
        self.conv12b = Conv3DLayer(rect12a, (n_deconvfilter[2], 3, 3, 3))
        self.rect12 = LeakyReLU(self.conv12b)
        self.res12 = AddLayer(unpool12, self.rect12)

        print('unpool12 => input_shape %s,  output_shape %s)' %
              (unpool12._input_shape, unpool12._output_shape))

        unpool9 = Unpool3DLayer(self.res12)
        self.conv9a = Conv3DLayer(unpool9, (n_deconvfilter[3], 3, 3, 3))
        self.rect9a = LeakyReLU(self.conv9a)
        self.conv9b = Conv3DLayer(self.rect9a, (n_deconvfilter[3], 3, 3, 3))
        self.rect9 = LeakyReLU(self.conv9b)
        self.conv9c = Conv3DLayer(unpool9, (n_deconvfilter[3], 1, 1, 1))
        self.res9 = AddLayer(self.conv9c, self.rect9)

        print('unpool9 => input_shape %s,  output_shape %s)' %
              (unpool9._input_shape, unpool9._output_shape))

        unpool10 = Unpool3DLayer(self.res9)
        self.conv10a = Conv3DLayer(unpool10, (n_deconvfilter[4], 3, 3, 3))
        self.rect10a = LeakyReLU(self.conv10a)
        self.conv10b = Conv3DLayer(self.rect10a, (n_deconvfilter[4], 3, 3, 3))
        self.rect10 = LeakyReLU(self.conv10b)
        self.conv10c = Conv3DLayer(self.rect10a, (n_deconvfilter[4], 3, 3, 3))
        self.res10 = AddLayer(self.conv10c, self.rect10)

        print('unpool9 => input_shape %s,  output_shape %s)' %
              (unpool10._input_shape, unpool10._output_shape))

        self.conv11 = Conv3DLayer(self.res10, (n_deconvfilter[5], 3, 3, 3))
        #self.conv11 = TanhLayer(conv11)
        print(
            'Conv11 = Conv3DLayer(x, (%s, 3, 3, 3) => input_shape %s,  output_shape %s)'
            % (n_deconvfilter[5], self.conv11._input_shape,
               self.conv11._output_shape))

        #self.conv11 = np.random.rand(cfg.batch, 128, 2, 128, 128)
        softmax_loss = SoftmaxWithLoss3D(self.conv11.output)
        self.softloss = softmax_loss
        self.loss = softmax_loss.loss(self.y)
        self.error = softmax_loss.error(self.y)
        self.params = get_trainable_params()
        self.output = softmax_loss.prediction()
        #update_all = [1,2,3]
        self.activations = [update_all]
Esempio n. 21
0
path_r = 'C:/Users/wojtek/Desktop/projekt1-oddanie/regression/data.multimodal.train.500.csv'
X, y = reader.read_data(path_r)
output = len(np.unique(y))
# circles/ XOR CLASS
# network = MultilayerPerceptron(100, 2, 0.05, 0.009, ProblemType.CLASSIFICATION, ErrorType.CLASSIC, True)

# network.add_layer(ReluLayer(32, 2))
# network.add_layer(TanhLayer(32,32))
# network.add_layer(TanhLayer(output, 32))

# REG
network = MultilayerPerceptron(30, 2, 0.05, 0.009, ProblemType.REGRESSION,
                               ErrorType.CLASSIC, True)

network.add_layer(TanhLayer(64, 1))
network.add_layer(SigmoidLayer(64, 64))
network.add_layer(TanhLayer(64, 64))
network.add_layer(SigmoidLayer(1, 64))

vs.plot_network(network)

network.fit(X, y)
pred = network.pred_for_show(X)  # network.predict(X[0])5
accuracy = np.sum(y == pred) / len(y) * 100
# print(y)
# print(pred)
print(accuracy)
# print(np.mean(y-pred))
plt.plot(X, y, 'bo', X, pred, 'ro')
plt.show()
Esempio n. 22
0
if mode:
    print("Creating network...")
    network = MultilayerPerceptron(30, 2, 0.05, 0.009,
                                   ProblemType.CLASSIFICATION, ErrorType.MSE,
                                   True)

    network = MultilayerPerceptron(16, 2, 0.09, 0.009,
                                   ProblemType.CLASSIFICATION, ErrorType.MSE,
                                   True)

    input_size = len(images[0])

    print("Adding layers...")

    network.add_layer(TanhLayer(32, input_size))
    network.add_layer(SigmoidLayer(32, 32))
    network.add_layer(SigmoidLayer(10, 32))
    print("Learning...")
    network.fit(test_images, test_labels)
    ser.save_to_file("a_30.p", network)
else:
    print("Reading network from file...")
    network = ser.read_from_file("a.p")

print("Classification...")
pred = network.pred_for_show(images)
pred_values = [v[0] for v in pred]
counter = 0

for i in range(len(pred_values)):
    if labels[i] == pred_values[i]: