コード例 #1
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_LinearTransformation_givesCorrectWeightsGradient_with1DFromLayerAnd3DToLayer_optimalConnection(self):
     # Example: from_layer.shape = [2,], to_layer.shape = [3,2,2] => weights.shape = [3,2]; axis_length = 1
     output_layer_delta = [np.array(np.arange(12)).reshape([3, 2, 2])]
     activated_input = [np.array([1, 2])]
     expected_gradient_for_weights = np.array([[2, 8], [14, 20], [26, 32]])
     transformation6 = Linear("optimal", list(activated_input[0].shape), list(output_layer_delta[0].shape))
     actual_gradient_for_weights = transformation6.get_gradient_for_weights(output_layer_delta, activated_input)
     self.assertArrayEqual(expected_gradient_for_weights, actual_gradient_for_weights)
コード例 #2
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_LinearTransformation_givesCorrectWeightsGradient_with1DFullyConnectedLayers(self):
     # Example: from_layer.shape = [2,], to_layer.shape = [3,] => weights.shape = [3,2]; axis_length = 1
     output_layer_delta = [np.array([1, 2, 3])]
     activated_input = [np.array([1, 2])]
     old_method_gradients = np.dot(output_layer_delta[0].reshape([1, 3]).transpose(),
                                   activated_input[0].reshape([1, 2]))
     transformation6 = Linear("optimal", list(activated_input[0].shape), list(output_layer_delta[0].shape))
     actual_gradient_for_weights = transformation6.get_gradient_for_weights(output_layer_delta, activated_input)
     self.assertArrayEqual(old_method_gradients, actual_gradient_for_weights)
コード例 #3
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_transformFunction_givesCorrectTransformation_withFullyConnected2DLayers(self):
     # Example: input_shape = [2,2], output_shape = [3,2] => weights_shape = [3,2,2,2]; FP_axes = 2
     input_array = [np.array([[1, 0], [1, 1]])]
     weight_array = np.array([[[[2, 1], [2, 2]], [[1, 1], [1, 2]]],
                              [[[2, 0], [2, 3]], [[1, 0], [1, 0]]],
                              [[[2, 0], [2, 3]], [[1, 1], [2, 0]]]])
     expected_output = [np.array([[6, 4], [7, 2], [7, 3]])]
     transformation2 = Linear(connection_type="fully", input_layer_shape=[2, 2], output_layer_shape=[3, 2])
     actual_output = transformation2.transform(input_array, weight_array)
     self.assertArrayEqual(expected_output[0], np.around(actual_output[0], 3))
コード例 #4
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_transformFunction_givesCorrectTransformation_withOptimallyConnected1DInputAnd2DOutputLayers(self):
     # Example: input_shape = [3,], output_shape = [3,2] => weights_shape = [3,2,3]; FP_axes = 1
     input_array = [np.array([1, 0, 1])]
     weight_array = np.array([[[2, 1, 0], [2, 2, 3]],
                              [[1, 1, 0], [1, 2, 0]],
                              [[0, 1, 2], [1, 1, 1]]])
     expected_output = [np.array([[2, 5], [1, 1], [2, 2]])]
     transformation2 = Linear(connection_type="optimal", input_layer_shape=[3], output_layer_shape=[3, 2])
     actual_output = transformation2.transform(input_array, weight_array)
     self.assertArrayEqual(expected_output[0], actual_output[0])
コード例 #5
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
    def testThat_backPropagateDelta_givesCorrectDelta_withOptimallyConnected1DLayers(self):
        # Example: from_layer.shape = [3,], to_layer.shape = [2,] => weights.shape = [2,3]; axis_length = 1
        transformed_input = [np.array([1, 0, 1])]
        activation = Sigmoid()
        weights = np.array([[2, 1, 0], [2, 2, 3]])
        output_layer_delta = [np.array([0.1, 0.2])]
        expected_input_layer_delta = [np.array([0.12, 0.12, 0.12])]

        transformation4 = Linear("optimal", list(transformed_input[0].shape), list(output_layer_delta[0].shape))
        actual_input_layer_delta = transformation4.back_propagate_delta(output_layer_delta, weights, activation,
                                                                        transformed_input)
        self.assertArrayEqual(expected_input_layer_delta[0], np.around(actual_input_layer_delta[0], 2))
コード例 #6
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
    def testThat_backPropagateDelta_givesCorrectDeltaDimensions_forVariousInputAndOutputShapes(self):
        input_shapes, output_shapes, weights_shapes, _, _, _, _, _ = self.get_test_data_for_various_input_and_output_shapes()
        actual_delta_shapes = list()
        for i in range(len(input_shapes)):
            transformation4 = Linear("optimal", input_shapes[i], output_shapes[i])
            output_layer_delta = [np.zeros(output_shapes[i])]
            weights = np.zeros(weights_shapes[i])
            activation = Sigmoid()
            transformed_input = [np.zeros(input_shapes[i])]
            input_layer_delta = transformation4.back_propagate_delta(output_layer_delta, weights, activation,
                                                                     transformed_input)
            actual_delta_shapes.append(list(input_layer_delta[0].shape))

        self.assertEqual(input_shapes, actual_delta_shapes)
コード例 #7
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_backPropagateDelta_givesCorrectDelta_withFullyConnected2DLayers(self):
     # Example: from_layer.shape = [2,2], to_layer.shape = [3,2] => weights.shape = [3,2,2,2]; axis_length = 2
     transformed_input = [np.array([[1, 0], [1, 1]])]
     activation = Sigmoid()
     weights = np.array([[[[2, 1], [2, 2]], [[1, 1], [1, 2]]],
                         [[[2, 0], [2, 3]], [[1, 0], [1, 0]]],
                         [[[2, 0], [2, 3]], [[1, 1], [2, 0]]]])
     output_layer_delta = [np.array([[0.1, 0.2], [0.3, 0.1], [0.2, 0.3]])]
     # tensordot = [[1.8, 0.6], [2.1, 2.1]]
     # activation_derivative = [[0.197, 0.25], [0.197, 0.197]]
     expected_input_layer_delta = [np.array([[0.354, 0.150], [0.413, 0.413]])]
     transformation4 = Linear("fully", list(transformed_input[0].shape), list(output_layer_delta[0].shape))
     actual_input_layer_delta = transformation4.back_propagate_delta(output_layer_delta, weights, activation,
                                                                     transformed_input)
     self.assertArrayEqual(expected_input_layer_delta[0], np.around(actual_input_layer_delta[0], 3))
コード例 #8
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
    def testThat_backPropagateDelta_givesCorrectDelta_withOptimallyConnected2DInputAnd3DOutputLayers(self):
        # Example: from_layer.shape = [3,2], to_layer.shape = [2,3,2] => weights.shape = [2,]; axis_length = 0
        transformed_input = [np.array([[1, 0], [0, 1], [1, 1]])]
        activation = Sigmoid()
        weights = np.array([2, 3])
        output_layer_delta = [np.array([[[0.1, 0.1], [0.2, 0.2], [0.2, 0.1]],
                                        [[0.2, 0.1], [0.3, 0.1], [0.1, 0.3]]])]
        # tensordot = [[0.8, 0.5], [1.3, 0.7], [0.7, 1.1]]
        # activation_derivative = [[0.197, 0.25], [0.25, 0.197], [0.197, 0.197]]
        expected_input_layer_delta = [np.array([[0.157, 0.125], [0.325, 0.138], [0.138, 0.216]])]

        transformation4 = Linear("optimal", list(transformed_input[0].shape), list(output_layer_delta[0].shape))
        actual_input_layer_delta = transformation4.back_propagate_delta(output_layer_delta, weights, activation,
                                                                        transformed_input)
        self.assertArrayEqual(expected_input_layer_delta[0], np.around(actual_input_layer_delta[0], 3))
コード例 #9
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_getForwardPropagationParameters_givesCorrectDetails_with2DInputAnd3DOutputLayerButNoCommonality(self):
     # Here even optimal connection works like fully connected
     transformation1 = Linear(connection_type="optimal", input_layer_shape=[10, 5], output_layer_shape=[2, 3, 4])
     expected_weights_shape = [2, 3, 4, 10, 5]
     expected_forward_propagation_axes = 2
     self.assertEqual(expected_forward_propagation_axes, transformation1.forward_propagation_axes)
     self.assertEqual(expected_weights_shape, transformation1.weights_shape)
コード例 #10
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_getForwardPropagationParameters_givesCorrectDetails_withOptimallyConnected1DInputAnd2DOutputLayers(
         self):
     transformation1 = Linear(connection_type="optimal", input_layer_shape=[10, ], output_layer_shape=[5, 10])
     expected_weights_shape = [5]
     expected_forward_propagation_axes = 0
     self.assertEqual(expected_forward_propagation_axes, transformation1.forward_propagation_axes)
     self.assertEqual(expected_weights_shape, transformation1.weights_shape)
コード例 #11
0
 def __init__(self,
              input_layer: Layer,
              output_layer: Layer,
              connection_type: str = "fully",
              transformation: Transformation = None,
              initial_weights: np.array = np.array([]),
              initial_weights_distribution: str = "zeros"):
     self.id = (input_layer.id, output_layer.id)
     self.input_layer = input_layer
     self.output_layer = output_layer
     self.connection_type = connection_type
     self.transformation = transformation
     if self.transformation is None:
         self.transformation = Linear(self.connection_type,
                                      self.input_layer.shape,
                                      self.output_layer.shape)
     self.weights = self.initialize_weights(initial_weights,
                                            initial_weights_distribution)
コード例 #12
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_getForwardPropagationParameters_givesCorrectDetails_forCollapsibleInputAndOutputShapes(self):
     input_shapes, output_shapes, weights_shapes, forward_propagation_axes, _, _, _, _ = self.get_test_data_for_collapsible_input_and_output_shapes()
     actual_weight_shapes = list()
     actual_forward_propagation_axes = list()
     for i in range(len(input_shapes)):
         transformation1 = Linear("optimal", input_shapes[i], output_shapes[i])
         actual_weight_shapes.append(transformation1.weights_shape)
         actual_forward_propagation_axes.append(transformation1.forward_propagation_axes)
     self.assertEqual(forward_propagation_axes, actual_forward_propagation_axes)
     self.assertEqual(weights_shapes, actual_weight_shapes)
コード例 #13
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_getBackPropagationParameters_givesCorrectDetails_forCollapsibleInputAndOutputShapes(self):
     input_shapes, output_shapes, _, _, transposed_weights_axes, back_propagation_axes, _, _ = self.get_test_data_for_collapsible_input_and_output_shapes()
     actual_weight_transposition_axes = list()
     actual_back_propagation_axes = list()
     for i in range(len(input_shapes)):
         transformation3 = Linear("optimal", input_shapes[i], output_shapes[i])
         actual_weight_transposition_axes.append(transformation3.transposed_weights_axes)
         actual_back_propagation_axes.append(transformation3.back_propagation_axes)
     self.assertEqual(transposed_weights_axes, actual_weight_transposition_axes)
     self.assertEqual(back_propagation_axes, actual_back_propagation_axes)
コード例 #14
0
class Connection:
    def __init__(self,
                 input_layer: Layer,
                 output_layer: Layer,
                 connection_type: str = "fully",
                 transformation: Transformation = None,
                 initial_weights: np.array = np.array([]),
                 initial_weights_distribution: str = "zeros"):
        self.id = (input_layer.id, output_layer.id)
        self.input_layer = input_layer
        self.output_layer = output_layer
        self.connection_type = connection_type
        self.transformation = transformation
        if self.transformation is None:
            self.transformation = Linear(self.connection_type,
                                         self.input_layer.shape,
                                         self.output_layer.shape)
        self.weights = self.initialize_weights(initial_weights,
                                               initial_weights_distribution)

    def initialize_weights(self, initial_weights,
                           initial_weights_distribution) -> [int]:
        if list(initial_weights.shape) == self.transformation.weights_shape:
            return initial_weights
        if initial_weights_distribution == "normal":
            return np.random.normal(0, 1, self.transformation.weights_shape)
        return np.zeros(self.transformation.weights_shape)

    def transform_input(self, input_array: np.array) -> [np.array]:
        return self.transformation.transform(input_array, self.weights)

    def get_input_layer_delta(self) -> [np.array]:
        return self.transformation.back_propagate_delta(
            self.output_layer.delta, self.weights, self.input_layer.activation,
            self.input_layer.input_array)

    def get_gradient_for_weights(self) -> np.array:
        return self.transformation.get_gradient_for_weights(
            self.output_layer.delta, self.input_layer.output_array)

    def update_weights(self):
        pass
コード例 #15
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
    def testThat_getGradientParameters_givesDetails_forCollapsibleInputAndOutputShapes(self):
        input_shapes, output_shapes, _, _, _, _, transposed_input_axes, gradient_axes = self.get_test_data_for_collapsible_input_and_output_shapes()
        actual_transposed_input_axes = list()
        actual_gradient_axes = list()
        for i in range(len(input_shapes)):
            transformation5 = Linear("optimal", input_shapes[i], output_shapes[i])
            actual_transposed_input_axes.append(transformation5.transposed_input_axes)
            actual_gradient_axes.append(transformation5.weight_gradient_axes)

        self.assertEqual(transposed_input_axes, actual_transposed_input_axes)
        self.assertEqual(gradient_axes, actual_gradient_axes)
コード例 #16
0
 def create_list_of_connections_having_fully_connection_type(
         self,
         layers_list: [Layer],
         layer_connections: [tuple],
         weight_distribution: str = "zeros") -> [Connection]:
     connections_list = list()
     for connection in layer_connections:
         input_layer = self.get_layer_from_layer_id(connection[0],
                                                    layers_list)
         output_layer = self.get_layer_from_layer_id(
             connection[1], layers_list)
         transformation = Linear("fully", input_layer.shape,
                                 output_layer.shape)
         new_connection = Connection(input_layer,
                                     output_layer, "fully", transformation,
                                     np.array([]), weight_distribution)
         connections_list.append(new_connection)
     return connections_list
コード例 #17
0
ファイル: linear_test.py プロジェクト: swapnil-bit/NeuralNet
 def testThat_getForwardPropagationParameters_givesCorrectDetails_withFullyConnected1DLayers(self):
     transformation1 = Linear(connection_type="fully", input_layer_shape=[10, ], output_layer_shape=[10, ])
     expected_weights_shape = [10, 10]
     expected_forward_propagation_axes = 1
     self.assertEqual(expected_forward_propagation_axes, transformation1.forward_propagation_axes)
     self.assertEqual(expected_weights_shape, transformation1.weights_shape)