예제 #1
0
파일: nn.py 프로젝트: sundarum10/ML
    def backward_propagation(self, daL, end=1, reg_flag=False, lambd=0.1):
        assert (1 <= end <= self.L)

        for l in range(self.L, end - 1, -1):
            params = self.map['L' + str(l)]

            # Get da
            if l == self.L:
                da = daL
            else:
                da = np.dot(self.map['L' + str(l + 1)]['W'].T,
                            self.map['L' + str(l + 1)]['dz'])

            # Get dz
            da_z = basic_func('d' + params['activation'], params['z'])
            dz = da * da_z
            params['dz'] = dz

            # Get dW, db
            a_prev = self.map['L' + str(l - 1)]['a']
            dz_w = a_prev.T

            # Calculate regularization item
            if reg_flag:
                regu_item_W = lambd * params['dW']
                regu_item_b = lambd * params['db']
            if not reg_flag:
                regu_item_W = 0
                regu_item_b = 0

            params['dW'] = np.dot(dz, dz_w) + regu_item_W
            params['db'] = np.sum(dz, axis=1, keepdims=True) + regu_item_b
예제 #2
0
파일: nn.py 프로젝트: sundarum10/ML
    def forward_propagation(self, X, start=1):
        assert (1 <= start <= self.L)

        self.map['L0']['a'] = X

        for l in range(start, self.L + 1):
            params = self.map['L' + str(l)]

            a_prev = self.map['L' + str(l - 1)]['a']
            params['z'] = np.dot(params['W'], a_prev) + params['b']
            params['a'] = basic_func(params['activation'], params['z'])

        aL = self.map['L' + str(self.L)]['a']

        return aL
예제 #3
0
파일: cnn.py 프로젝트: sundarum10/ML
    def forward_propagation(self, start=1, end=1):
        '''
        Input:
            start -- integer, from which layer to do propagation
            end -- integer, do propagation to which layer
        '''

        assert(0 < start and end <= self.L and start < end)

        for l in range(start, end+1):
            params = self.map['L'+str(l)]
            ops = params['op_list']

            A_prev = self.map['L'+str(l-1)]['cache']['A']

            for op in ops:
                if 'conv' == op:
                    W = params['cache']['W']
                    b = params['cache']['b']
                    stride = params['conv']['stride']
                    p = params['conv']['pad']
                    p_val == params['conv']['val']

                    A_prev = self.convolution_forward(A_prev, W, b, stride, p, p_val)

                if 'activation' == op:
                    activation_name = params['activation']
                    A_prev = basic_func(activation_name, A_prev)

                if 'pool' == op:
                    pool_params = params['pool']
                    stride  = pool_params['stride']
                    f_shape = (pool_params['n_fh'], pool_params['n_fw'])
                    mode = pool_params['mode']

                    A_prev = self.pool(A_prev, f_shape, stride, mode=mode)

            params['cache']['A'] = A_prev
예제 #4
0
파일: cnn.py 프로젝트: sundarum10/ML
 def softmax_backward(self, dA, A_prev):
     return np.dot(basic_func('dsoftmax', A_prev), dA)
예제 #5
0
파일: cnn.py 프로젝트: sundarum10/ML
 def softmax_forward(self, A_prev):
     return basic_func("softmax", A_prev)