コード例 #1
0
 def __init__(self, conv_param):
     """
     Args:
         conv_param: A dictionary containing the following keys
             - stride: The number of pixels between adjacent receptive fields in the horizontal and vertical directions
             - pad: The number of pixels that will be used to zero-pad the input
     """
     self.conv_layer = Conv(stride=conv_param['stride'],
                            pad=conv_param['pad'])
     self.relu_layer = ReLU()
コード例 #2
0
 def __init__(self, dropout_param=None):
     """
     Args:
         dropout_param: A dictionary with the following key(s):
             - prob: Probability for each neuron to drop out, required
             - seed: Seeding integer for random generator, optional
     """
     self.affine_layer = Affine()
     self.relu_layer = ReLU()
     if dropout_param is not None:
         self.dropout_layer = Dropout(prob=dropout_param['prob'],
                                      seed=dropout_param.get('seed', None))
     else:
         self.dropout_layer = Dropout()
コード例 #3
0
class AffineReLUDropout(object):
    def __init__(self, dropout_param=None):
        """
        Args:
            dropout_param: A dictionary with the following key(s):
                - prob: Probability for each neuron to drop out, required
                - seed: Seeding integer for random generator, optional
        """
        self.affine_layer = Affine()
        self.relu_layer = ReLU()
        if dropout_param is not None:
            self.dropout_layer = Dropout(prob=dropout_param['prob'],
                                         seed=dropout_param.get('seed', None))
        else:
            self.dropout_layer = Dropout()

    def forward_pass(self, x, w, b, mode='train'):
        """ Performs forward propagation through affine, rectinfied linear unit, and dropout layers

        Args:
            x: Input
            w: Weights
            b: Bias
            mode: 'train' or 'test'

        Returns:
            dropout_out: Output from Dropout layer
        """
        affine_out = self.affine_layer.forward_pass(x, w, b)
        relu_out = self.relu_layer.forward_pass(affine_out)
        dropout_out = self.dropout_layer.forward_pass(relu_out, mode)

        return dropout_out

    def backward_pass(self, grad_out):
        """Performs back propagation through  affine, rectinfied linear unit, and dropout layers

        Args:
            grad_out: Upstream gradient

        Returns:
            grad_x: Gradient w.r.t. input
            grad_w: Gradient w.r.t. weight
            grad_b: Gradient w.r.t. bias
        """
        grad_dropout = self.dropout_layer.backward_pass(grad_out)
        grad_relu = self.relu_layer.backward_pass(grad_dropout)
        grad_x, grad_w, grad_b = self.affine_layer.backward_pass(grad_relu)

        return grad_x, grad_w, grad_b
コード例 #4
0
 def __init__(self, batch_norm_param=None):
     """
     Optional argument:
         batch_norm_param: A dictionary containing the following keys
             - eps: constant for numeric stability, required
             - momentum: constant for running mean/variance calculation, required
             - running_mean: if input has shape (N, D), then this is array of shape (D,)
             - running_var: if input has shape (N, D), then this is array of shape (D,)
     """
     self.affine_layer = Affine()
     self.relu_layer = ReLU()
     if batch_norm_param is not None:
         self.batch_norm_layer = BatchNorm(
             eps=batch_norm_param['eps'],
             momentum=batch_norm_param['momentum'],
             running_mean=batch_norm_param.get('running_mean', None),
             running_var=batch_norm_param.get('running_var', None))
     else:
         self.batch_norm_layer = BatchNorm()
コード例 #5
0
class ConvReLU(object):
    def __init__(self, conv_param):
        """
        Args:
            conv_param: A dictionary containing the following keys
                - stride: The number of pixels between adjacent receptive fields in the horizontal and vertical directions
                - pad: The number of pixels that will be used to zero-pad the input
        """
        self.conv_layer = Conv(stride=conv_param['stride'],
                               pad=conv_param['pad'])
        self.relu_layer = ReLU()

    def forward_pass(self, x, w, b):
        """
        Args:
            x: Input to convolutional layer
            w: Weights for convolutional layer
            b: Biases for convolutional layer

        Returns:
            relu_out: Output from the ReLU layer
        """
        conv_out = self.conv_layer.forward_pass(x, w, b)
        relu_out = self.relu_layer.forward_pass(conv_out)

        return relu_out

    def backward_pass(self, grad_out):
        """
        Args:
            grad_out

        Returns:
            grad_x: Gradients w.r.t. input to convolutional layer
            grad_w: Gradient w.r.t. weights to convolutional layer
            grad_b: Gradient w.r.t. biases to convolutional layer
        """
        grad_relu = self.relu_layer.backward_pass(grad_out)
        grad_x, grad_w, grad_b = self.conv_layer.backward_pass(grad_relu)

        return grad_x, grad_w, grad_b
コード例 #6
0
class ReLUTest(unittest.TestCase):
    def setUp(self):
        self.layer = ReLU()

    def test_forward_pass(self):
        x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
        output = self.layer.forward_pass(x)
        expected_output = np.array([[ 0.,          0.,          0.,          0.,        ],
                                    [ 0.,          0.,          0.04545455,  0.13636364,],
                                    [ 0.22727273,  0.31818182,  0.40909091,  0.5,       ]])

        self.assertAlmostEqual(rel_error(output, expected_output), 1e-9, places=2)

    def test_backward_pass(self):
        np.random.seed(231)
        x = np.random.randn(10, 10)
        grad_out = np.random.randn(*x.shape)
        num_grad_x = eval_numerical_gradient_array(lambda x: self.layer.forward_pass(x), x, grad_out)
        grad_x = self.layer.backward_pass(grad_out)

        self.assertAlmostEqual(rel_error(num_grad_x, grad_x), 1e-9, places=2)
コード例 #7
0
class AffineReLU(object):
    def __init__(self):
        self.affine_layer = Affine()
        self.relu_layer = ReLU()

    def forward_pass(self, x, w, b):
        """Performs forward propagation through affine and ReLU layers

        Args:
            x: Input
            w: Weights
            b: Bias

        Returns:
            relu_out: Output from ReLU layer
        """
        affine_out = self.affine_layer.forward_pass(x, w, b)
        relu_out = self.relu_layer.forward_pass(affine_out)

        return relu_out

    def backward_pass(self, grad_out):
        """Performs back propagation through affine and ReLU layers

        Args:
            grad_out: Upstream gradient

        Returns:
            grad_x: Gradient w.r.t. input
            grad_w: Gradient w.r.t. weight
            grad_b: Gradient w.r.t. bias
        """
        grad_relu = self.relu_layer.backward_pass(grad_out)
        grad_x, grad_w, grad_b = self.affine_layer.backward_pass(grad_relu)

        return grad_x, grad_w, grad_b
コード例 #8
0
 def setUp(self):
     self.layer = ReLU()
コード例 #9
0
 def __init__(self):
     self.affine_layer = Affine()
     self.relu_layer = ReLU()
コード例 #10
0
class AffineBatchNormReLU(object):
    def __init__(self, batch_norm_param=None):
        """
        Optional argument:
            batch_norm_param: A dictionary containing the following keys
                - eps: constant for numeric stability, required
                - momentum: constant for running mean/variance calculation, required
                - running_mean: if input has shape (N, D), then this is array of shape (D,)
                - running_var: if input has shape (N, D), then this is array of shape (D,)
        """
        self.affine_layer = Affine()
        self.relu_layer = ReLU()
        if batch_norm_param is not None:
            self.batch_norm_layer = BatchNorm(
                eps=batch_norm_param['eps'],
                momentum=batch_norm_param['momentum'],
                running_mean=batch_norm_param.get('running_mean', None),
                running_var=batch_norm_param.get('running_var', None))
        else:
            self.batch_norm_layer = BatchNorm()

    def forward_pass(self, x, w, b, gamma, beta, mode='train'):
        """ Performs forward propagation through affine, batch normalization, and rectinfied linear unit layers

        Args:
            x: Input
            w: Weights
            b: Bias
            gamma: Scale factor
            beta: Shifting factor
            mode: 'train' or 'test'

        Returns:
            relu_out: Output from ReLU layer
        """
        affine_out = self.affine_layer.forward_pass(x, w, b)
        batch_norm_out = self.batch_norm_layer.forward_pass(
            affine_out, gamma, beta, mode)
        relu_out = self.relu_layer.forward_pass(batch_norm_out)

        return relu_out

    def backward_pass(self, grad_out):
        """Performs back propagation through affine, batch normalization, and rectinfied linear unit layers

        Args:
            grad_out: Upstream gradient

        Returns:
            grad_x: Gradient w.r.t. input
            grad_w: Gradient w.r.t. weight
            grad_b: Gradient w.r.t. bias
            grad_gamma: Gradient w.r.t. gamma constant
            grad_beta: Gradient w.r.t. beta constant
        """
        grad_relu = self.relu_layer.backward_pass(grad_out)
        grad_batch_norm, grad_gamma, grad_beta = self.batch_norm_layer.backward_pass(
            grad_relu)
        grad_x, grad_w, grad_b = self.affine_layer.backward_pass(
            grad_batch_norm)

        return grad_x, grad_w, grad_b, np.sum(grad_gamma), np.sum(grad_beta)