コード例 #1
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 optimizer,
                 activation=(utils.sigmoid, utils.sigmoid_prime)):
        print("Instatiating Vanilla Recurrent")

        NeuralLayer.__init__(self, input_dim, output_dim)

        self.Wxz = np.random.randn(output_dim[0], input_dim[0]) * 0.01
        self.Wyz = np.random.randn(output_dim[0], output_dim[0]) * 0.01
        self.bz = np.zeros((output_dim[0], 1))

        self.act_fxn = activation[0]
        self.act_prime = activation[1]

        self.optimizer = optimizer

        # Stored values for single samples: This way, can preserve state for next single sample
        # A convenience for testing (generating sentences from a single word)
        self.y = np.zeros((output_dim[0], 1))

        # Stored values for batch back propagation through time and batch updates
        self.seq_x = None
        self.seq_z = None
        self.seq_y = None
コード例 #2
0
 def __init__(self, design, minimum, maximum):
     self.neural_net = []
     l = len(design)
     self.amount_of_layers = l
     for i in range(l):
         if i != (l - 1):
             layer = NeuralLayer(design[i + 1], design[i] + 1, minimum, maximum)
         else:
             layer = NeuralLayer(1, design[i] + 1, 1, 1)
         self.neural_net.append(layer)
コード例 #3
0
    def __init__(self, input_dim, pool_dim):
        print("instantiating Max Pool")
        self.pool_dim = pool_dim
        output_dim = [
            input_dim[0] / pool_dim[0], input_dim[1] / pool_dim[1],
            input_dim[2] / pool_dim[2]
        ]

        NeuralLayer.__init__(self, input_dim, output_dim)

        # Stored values for back propagation and updates
        self.batch_max_binary_filter = None
コード例 #4
0
class NeuralLayerTest(unittest.TestCase):
    def setUp(self):
        self.layer = NeuralLayer(3, 2, -1, 1)

    def matrix_create_test(self):
        matrix = self.layer.return_matrix()
        print(matrix)
        shape = matrix.shape
        self.assertEqual(shape, (3, 2))
コード例 #5
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 optimizer,
                 activation=(utils.sigmoid, utils.sigmoid_prime)):
        print("instantiating Vanilla Feed Forward")

        NeuralLayer.__init__(self, input_dim, output_dim)

        self.input_dim, self.output_dim = input_dim, output_dim
        self.Wxz = np.random.randn(output_dim[0], input_dim[0]) / np.sqrt(
            input_dim[0])
        self.bz = np.zeros(output_dim)
        self.act_fxn = activation[0]
        self.act_prime = activation[1]
        self.optimizer = optimizer

        # Stored values for batch back propagation and batch updates
        self.batch_x = None
        self.batch_z = None
コード例 #6
0
    def __init__(self, input_dim, num_patterns, filter_dim, optimizer, activation=(utils.softplus, utils.softplus_prime)):
        print("instantiating Convolutional")

        self.num_patterns = num_patterns
        self.x_depth, self.x_height, self.x_width = input_dim[0], input_dim[1], input_dim[2]
        self.w_depth, self.w_height, self.w_width = filter_dim[0], filter_dim[1], filter_dim[2]
        self.z_depth, self.z_height, self.z_width \
            = self.num_patterns, self.x_height-self.w_height+1, self.x_width-self.w_width+1

        NeuralLayer.__init__(self, input_dim, [self.z_depth, self.z_height, self.z_width])

        self.Wxz = np.random.randn(self.num_patterns, self.w_depth, self.w_height, self.w_width)
        self.bz = np.zeros(self.num_patterns)
        self.act_fxn = activation[0]
        self.act_prime = activation[1]
        self.optimizer = optimizer

        # Stored values for back propagation and batch updates
        self.batch_x = None
        self.batch_z = None
コード例 #7
0
 def __init__(self, input_dim, output_dim):
     NeuralLayer.__init__(self, input_dim, output_dim)
     print("instantiating SoftMax")
コード例 #8
0
 def setUp(self):
     self.layer = NeuralLayer(3, 2, -1, 1)