Exemplo n.º 1
0
def linear_activation_forward(A_prev, W, b, activation):
    z = linear_forward(A_prev, W, b)
    if (activation == 'sigmoid'):
        a, z = sigmoid(z)
    else:
        a, z = relu(z)
    return a
Exemplo n.º 2
0
def linear_activation_forward(A_prev, W, b, activation):
    """
    实现Linear->Activation这一层的前向传播
    
    参数:
        A_prev - 来自上一层(或输入层)的激活,维度为(上一层的节点数量,示例数)
        W - 权重矩阵,numpy数组,维度为(当前层的节点数量,前一层的大小)
        b - 偏向量,numpy阵列,维度为(当前层节点的数量,1)
        activation - 选择在此层中使用的激活函数名,字符串类型,【“sigmoid”|“relu”】
    返回:
        A - 激活函数的输出,也称激活后的值
        cache - 一个包含“linear_cache”和“activation_cache”的字典,我们需要存储它,以用于有效的后向传播运算
        linear_cache为前向传播获得的A,W,b
    """

    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
Exemplo n.º 3
0
def linear_activation_forward(A_prev, W, b, activation):
    """activation part of forward prop

	Arguments:
		A_prev {np array} -- [(l-1 x m)]
		W {[np.array]} -- [(l x l-1)]
		b {[np.array]} -- [(1 x b)]
		activation {string} -- [relu or sigmoid]

	Returns:
		[np.array] -- [A , (A_prev, W , b, Z)]
	"""

    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)

    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
Exemplo n.º 4
0
def linear_activation_forward(A_prev, W, b, activation):
    """
    Implement the forward propagation for the LINEAR->ACTIVATION layer

    Arguments:
    A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
    W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
    b -- bias vector, numpy array of shape (size of the current layer, 1)
    activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"

    Returns:
    A -- the output of the activation function, also called the post-activation value
    cache -- a python dictionary containing "linear_cache" and "activation_cache";
             stored for computing the backward pass efficiently
    """

    if activation == "sigmoid":
        # Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)

    elif activation == "relu":
        # Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
Exemplo n.º 5
0
def linear_activation_forward(A_prev,W,b,activation):
    """
    实现LINEAR-> ACTIVATION 这一层的前向传播

    参数:
        A_prev - 来自上一层(或输入层)的激活,维度为(上一层的节点数量,示例数)
        W - 权重矩阵,numpy数组,维度为(当前层的节点数量,前一层的大小)
        b - 偏向量,numpy阵列,维度为(当前层的节点数量,1)
        activation - 选择在此层中使用的激活函数名,字符串类型,【"sigmoid" | "relu"】

    返回:
        A - 激活函数的输出,也称为激活后的值
        cache - 一个包含“linear_cache”和“activation_cache”的字典,我们需要存储它以有效地计算后向传递
    """

    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert(A.shape == (W.shape[0],A_prev.shape[1]))
    cache = (linear_cache,activation_cache)

    return A,cache
Exemplo n.º 6
0
def linear_activation_forward(A_prev, W, b, activation):
    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)
    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)
    return A, cache
def linear_activation_forward(A_prev, W, b, activation):

    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)

    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    cache = (linear_cache, activation_cache)
    return A, cache
Exemplo n.º 8
0
def linear_activation_forward(A_prev, W, b, activation):
    Z, linear_cache = linear_forward(A_prev, W, b)
    if activation == 'relu':
        A, activation_cache = relu(Z)
    elif activation == 'sigmoid':
        A, activation_cache = sigmoid(Z)
    else:
        A, activation_cache = Z,Z

    assert A.shape == Z.shape == (W.shape[0], A_prev.shape[1])
    cache = linear_cache, activation_cache
    return A, cache
Exemplo n.º 9
0
def linear_activation_forward(A_prev, W, b, activation):
    '''
    Implement the forward propagation for the LINEAR->ACTIVATION layer
    '''
    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)
    return A, cache
Exemplo n.º 10
0
def linear_activation_forward(A_prev, W, b, activation):
    """
    A_prev means previous A, b. activation is activation method
    """
    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)
    elif activation == "softmax":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = softmax(Z)
    cache = (linear_cache, activation_cache)
    return A, cache
Exemplo n.º 11
0
 def linear_activation_forward(self, A_prev, W, b, activation):  # 每一层的线性+激活
     """
     A_prev 是来自上一层的激活值,这一层的输入
     W,b是本层的权重,偏差
     """
     if activation == "sigmoid":
         Z, linear_cache = self.linear_forward(A_prev, W, b)
         A, activation_cache = sigmoid(Z)
     elif activation == "relu":
         Z, linear_cache = self.linear_forward(A_prev, W, b)
         A, activation_cache = relu(Z)
     assert (A.shape == (W.shape[0], A_prev.shape[1]))
     # linear_cache装有(上层的A也即本层的输入,本层的W,本层的b),activation_cache就是本层的Z
     cache = (linear_cache, activation_cache)
     return A, cache
Exemplo n.º 12
0
def linear_activation_forward(A_prev, W, b, activation):
    '''
    #2..正向传播计算激活函数,并保存计算中用到中间变量,
    包括:线性部分Z的中间变量缓存(A_prev,W,b),和激活函数计算的中间变量缓存Z 
    '''
    #线性部分计算
    Z, linear_cache = linear_forward(A_prev, W, b)

    #激活函数计算
    if activation == "relu":
        A, active_cache = utils.relu(Z)
    elif activation == "sigmoid":
        A, active_cache = utils.sigmoid(Z)
    cache = (linear_cache, active_cache)
    return A, cache
def linear_activation_forward(A_pre,W,b,activation):
	"""
	Implement neural unit with the activation of Relu or sigmoid
	"""

	if activation == "Relu":

		Z = linear_forward(W,A_pre,b)
		A,activation_cache = relu(Z)

	elif activation == "sigmoid":

		Z = linear_forward(W,A_pre,b)
		A,activation_cache = sigmoid(Z)

		backward_used_cache = (A_pre,W,b)
		cache = (backward_used_cache,activation_cache)
	return A,cache
Exemplo n.º 14
0
def linear_activation_forward(A_prev, W, b, activationFn):
    """
    Returns:
    A -- the output of the activation function, also called the post-activation value
    cache -- a python dictionary containing "A_prev", "W", "b" and corresponding "Z"; stored for computing the backward pass efficiently
    """
    Z = np.dot(W, A_prev) + b
    assert (Z.shape == (W.shape[0], A_prev.shape[1]))
    cache = (A_prev, W, b, Z)

    if activationFn == "relu":
        A, activation_cache = utils.relu(Z)
    else:
        A, activation_cache = utils.sigmoid(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))

    return A, cache
Exemplo n.º 15
0
def forward_prop(X, parameters):

    caches = {}
    n_L = len(parameters) // 2
    caches['A0'] = X
    for i in range(1, n_L):
        caches['Z' + str(i)] = np.dot(
            parameters['W' + str(i)],
            caches['A' + str(i - 1)]) + parameters['b' + str(i)]
        caches['A' + str(i)] = relU(caches['Z' + str(i)])
    # print('WL:',parameters['W'+str(n_L)])
    # print('bL:',parameters['b'+str(n_L)])
    # print('AL-1:',caches['A'+str(n_L-1)])
    caches['Z' + str(n_L)] = np.dot(
        parameters['W' + str(n_L)],
        caches['A' + str(n_L - 1)]) + parameters['b' + str(n_L)]
    # print('ZL:',caches['Z'+str(n_L)])
    caches['A' + str(n_L)] = sigmoid(caches['Z' + str(n_L)])
    # print('AL:',caches['A'+str(n_L)])
    return caches
Exemplo n.º 16
0
 def __linear_activation_forward(self, A_prev, W, b, activation):
     """
     A_prev -- activations from previous layer or input data(size of previous layer, number of examples)
     W -- weight matrix (size of current layer, previous layer)
     b -- bias vector (size of current layer, 1)
     
     returns:
         A -- the output of activation function
         cache -- a directory contain linear cache and activation cache, used in back propagation 
     """
     Z, linear_cache = self.__linear_forward(A_prev, W, b)
     if activation == "sigmoid":
         A, activation_cache = sigmoid(Z)
     elif activation == "relu":
         A, activation_cache = relu(Z)
     elif activation == "tanh":
         A, activation_cache = tanh_activate(Z)
     assert (A.shape == (W.shape[0], A_prev.shape[1]))
     cache = (linear_cache, activation_cache)
     return A, cache
Exemplo n.º 17
0
def forward_propagation(X,parameters):
    #print(type(parameters))
    W1= parameters["W1"]
    b1=parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    W3=parameters["W3"]
    b3=parameters["b3"]

    # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
    z1=np.dot(W1,X)+b1
    a1=relu(z1)
    z2=np.dot(W2,a1)+b2
    a2=relu(z2)
    z3=np.dot(W3,a2)+b3
    a3=sigmoid(z3)

    #将前向传播数据保存下来
    cache=(z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)

    return a3,cache
Exemplo n.º 18
0
def linear_activation_forward(A_prev,W,b,activation):
    '''
    实现linear->activation这一层的前向传播
    :param A_prev: 来自上一层(或输入层)的激活,维度为(上一层的节点数量,示例数)
    :param W:权重矩阵,numpy数组,维度为(当前的节点数量,前一层的大小
    :param b: 偏向量,numpy阵列,维度为(当前层的节点数量,1)
    :param activation: 选择在此层中使用的就激活函数名,字符串类型,['sigmoid'|'relu']
    :return: 
    A-激活函数的输出,也称为激活后的值
    cache-一个包含'linear_cache'和'activation_cache'的的字典,我们需要存储它以有效地计算后向传递
    '''
    if activation == 'sigmoid':
        Z,linear_cache= linear_forward(A_prev,W,b)
        A,activation_cache = sigmoid(Z)
    elif activation == 'relu':
        Z,linear_cache = linear_forward(A_prev,W,b)
        A,activation_cache = relu(Z)

    assert (A.shape == (W.shape[0],A_prev.shape[1]))
    cache = (linear_cache,activation_cache)
    return A,cache
def linear_activation_forward(A, W, b, activation):
    """
    :param A: activation from previous layers
    :param W: weight values
    :param b: bias values
    :param activation: activation function
    :return:
    A: output of activation function
    cache: tuple containing linear and activation cache
    """
    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A, W, b)
        A, activation_cache = sigmoid(Z)

    elif activation == "relu":
        Z, linear_cache = linear_forward(A, W, b)
        A, activation_cache = relu(Z)

    cache = (linear_cache, activation_cache)

    return A, cache
Exemplo n.º 20
0
def linear_activation_forward(A_prev, W, b, activation):
    """
    实现【LINEAR -> ACTIVATION】线性激活部分。

    :param A_prev: 上一层(或输入层)激活后的值,维度(l-1层单元数, 样本数)
    :param W: 权重矩阵,numpy数组,维度为(l层单元数, l-1层单元数)
    :param b: 偏置向量,numpy向量,维度为(l层单元数, 1)
    :param activation: 本层使用的激活函数,字符串类型,{"sigmoid" | "relu"}

    :returns:
        A: 激活函数的输出,也称激活后的值
        cache: 包含“linear_cache”和“activation_cache”的字典,我们需要存储它以有效地计算后向传递
    """

    # 激活函数为sigmoid
    if activation == "sigmoid":

        # 本层的Z, linear_cache(上层的A, 本层的W, 本层的b)
        Z, linear_cache = linear_forward(A_prev, W, b)
        # 本层的A(Sigmoid), activation_cache(本层的Z)
        A, activation_cache = sigmoid(Z)

    # 激活函数为relu
    elif activation == "relu":

        # 本层的Z, linear_cache(上层的A, 本层的W, 本层的b)
        Z, linear_cache = linear_forward(A_prev, W, b)
        # 本层的A(ReLU), activation_cache(本层的Z)
        A, activation_cache = relu(Z)

    # 确保数据正确性
    assert (A.shape == (W.shape[0], A_prev.shape[1]))

    # 缓存cache(上层的A, 本层的W, 本层的b, 本层的Z)由两部分组成:
    # linear_cache(上层的A, 本层的W, 本层的b)
    # activation_cache(本层的Z)
    cache = (linear_cache, activation_cache)

    return A, cache
Exemplo n.º 21
0
def linear_activation_forward(A_prev, W, b, activation):
    """
    实现LINEAR->ACTIVATION这一层的前向传播
    参数:
    A_prev-来自上一层的(或者输入层)的激活,维度(上一层节点数量,示例数)
    W - 权重矩阵,numpy数组,维度(当前层的节点数量,前一层的大小)
    b - 偏向量,维度(当前层的节点数量,1)
    activation -选择在该层使用的激活函数,字符串类型[sigmoid|relu]
    返回:
     A -激活函数的输出,也称为激活后的值
     cache - 一个包含linear_cache和activation_cache的字典,存储塔用来有效的计算后向传递
    """
    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
Exemplo n.º 22
0
def linear_activation_forward(A_prev, W, b, activation):
    """
         在上一个方法中实现了Z=W*X+b,在此方法中加上激活函数sigmoid or Relu
    :param A_prev: 上一层的激活,维度为(上一层的节点数量,示例数)
    :param W:
    :param b:
    :param activation:选择在此层中使用的激活函数名,字符串类型,【"sigmoid" | "relu"】
    :return:
        A - 激活函数的输出,也称为激活后的值
        cache - 一个包含“linear_cache”和“activation_cache”的字典,我们需要存储它以有效地计算后向传递
        linear_cache- 存着A,W,b,  也就是上一层的激活值A_prev以及W、b
        activation_cache 存着Z  Z=W*A_prev+b  activation_cache是linear_cache线性运算之后的值

    """
    if activation == "sigmoid":
        Z, linear_cache = line_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif activation == "relu":
        Z, linear_cache = line_forward(A_prev, W, b)
        A, activation_cache = relu(Z)
    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)
    return A, cache
Exemplo n.º 23
0
def linear_activation_forward(A_prev, W, b, activation):
    '''
    实现linear->activation这一层的前向传播
    :param A_prev:来自上一层(或输入层)的激活,维度为(上一层节点数,样本数)
    :param W:权重矩阵,numpy数组,维度为(当前层节点数量,上一层节点数量)
    :param b:偏向量,numpy阵列,维度为(当前层节点数量,1)
    :param activation:选择在此层中的激活函数,字符串类型,【sigmoid,relu】
    :return:
        A:激活函数的输出,也称为激活后的值
        cache:一个包含'linear_cache'和'activation_cache'的字典,我们需要存储它以有效地计算后向传播
    '''
    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W,
                                         b)  # linear_cache = (A, W, b)
        A, activation_cache = sigmoid(Z)  # activation_cache = Z
    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W,
                                         b)  # linear_cache = (A, W, b)
        A, activation_cache = relu(Z)  # activation_cache = Z

    assert (A.shape == (W.shape[0], A.shape[1]))
    cache = (linear_cache, activation_cache)  # (A,W,b,Z),其实是个一列表

    return A, cache
Exemplo n.º 24
0
def linear_activation_forward(A_prev, W, b, activation):
    """
    实现LINEAR->AVTIVATION这一层前向传播
    
    参数:
        A_prev: 来自上一层(或输入层)的激活,维度为(上一层的节点数量,示例数)
        W:权重矩阵,numpy数组,维度为(当前层的节点数量,前一层大小)
        b:偏向量,numpy阵列,维度为(当前层的节点数量,1)
        activation:此层集合函数类型,字符串类型【"sigmoid"|"relu"】
    返回:
        A:激活函数的输出,也称为激活后的值
        cache:一个包含“linear_cache”和“activation_cache”的字典,我们需要存储它以有效地计算后向传递
    """
    if (activation == "sigmoid"):
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif (activation == "relu"):
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
Exemplo n.º 25
0
def linear_activation_forward(A_prev, W, b, activation):
  if activation = "sigmoid"
    Z = np.dot(W, A_prev) + b
    linear_cache = (A_prev, W, b)
    A, activation_cache = sigmoid(Z)