Ejemplo n.º 1
0
def forward_propaganda(X,parameters):
    w1,b1,w2,b2=parameters
    Z1=np.dot(w1,X)+b1
    A1=np.tanh(Z1)
    Z2=np.dot(w2,A1)+b2
    A2=sigmoid(Z2)
    return Z1,A1,Z2,A2
Ejemplo n.º 2
0
def forward_propagation(X, parameters):
    """
    Argument:
    X -- the input matrix of size (n_x,m), from which to extract features
    parameters -- contains the initialised weights (W) and bias terms from
                  the function initialize_parameters()
               -- stored in a python dictionary
                  
        Returns:
        Y -- A2 the sigmoid output of the second activation
          -- takes on 1 if y_predicted > 0.5 and 0 otherwise
        cache -- a dictionary containing z1, z2, a1 and a2
        
        """
    # Retrieve each parameter from the dictionary parameters
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]

    # Implement forward propagation to compute a2
    z1 = np.dot(W1, X) + b1
    activation1 = np.tanh(z1)
    #print("shape[a1]:", activation1.shape)
    z2 = np.dot(W2, activation1) + b2
    activation2 = sigmoid(z2)
    #print("shape[a2]:", activation2.shape)
    assert (activation2.shape == (1, X.shape[1]))

    cache = {"z1": z1, "A1": activation1, "z2": z2, "A2": activation2}

    return activation2, cache
Ejemplo n.º 3
0
def forward_propagation(X, parameters):
    """
    Perform forward propagation 

    Parameters
    ----------
    X : numpy array
        input data size (n_x, m_size).
    paramaters : dict
        dictionary containing parameters.

    Returns
    -------
    A2 : sigmoid output of the second activation function.
    cache : dictionary containing Z1, A1, Z2, A2.

    """

    # Get randomly initialzied parameters
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]

    # Use equations to implement forward propagation
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)

    # cache the equations and return cache and A2

    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return A2, cache
Ejemplo n.º 4
0
def forward_propagation(X, parameters):
    """
        Argument:
        X -- input data of size (n_x, m)
        parameters -- python dictionary containing your parameters (output of initialization function)

        Returns:
        A2 -- The sigmoid output of the second activation
        cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
    """
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']

    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)

    assert (A2.shape == (1, X.shape[1]))

    cache = {'Z1': Z1, 'A1': A1, 'Z2': Z2, 'A2': A2}

    return A2, cache
def forward_propagation(X, parameters):
    """
    Argument:
    X -- input data of size (n_x, m)
    parameters -- python dictionary containing your parameters (output of initialization function)

    Returns:
    A2 -- The sigmoid output of the second activation
    cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
    """
    # Retrieve each parameter from the dictionary "parameters"
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']

    # Implement Forward Propagation to calculate A2 (probabilities)
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)

    assert(A2.shape == (1, X.shape[1]))

    cache = {"Z1": Z1,
             "A1": A1,
             "Z2": Z2,
             "A2": A2}

    return A2, cache
def forward_propagation(X, parameters):
    """
    Argument:
    X -- input data of size (n_x, m)
    parameters -- python dictionary containing your parameters (output of initialization function)
    
    Returns:
    A2 -- The sigmoid output of the second activation
    cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
    """
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)
    
    ##assert(A2.shape == (1, X.shape[1]))##hard coded for our dataset
    
    cache = {"Z1": Z1,
             "A1": A1,
             "Z2": Z2,
             "A2": A2}
    
    return A2, cache
def forward_propagation(X, parameters):
    """
    Argument:
    X -- input data of size (n_x, m)
    parameters -- python dictionary containing your parameters (output of initialization function)
    m -- size of training set.
    
    Returns:
    A2 --    The sigmoid output of the second activation
    cache -- a dictionary containing "Z1", "A1", "Z2" and "A2", We need these values to calculate the partial derivation of the 
    loss function w.r.t to the parameters. Mainly, we will use them in the chain rule. 
    """
    # Retrieve each parameter from the dictionary "parameters"
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']

    # Implement Forward Propagation to calculate A2 (probabilities)
    # Tangent hyberbolic function is used as the non-linear activation function for the hidden layer. And a sigmoid activation
    # is used for the output layer.
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)

    assert (A2.shape == (1, X.shape[1]))

    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return A2, cache
def forward_propagation(X, parameters):
    """
    Argument:
    X -- input data of size (n_x, m)
    parameters -- python dictionary containing your parameters (output of initialization function)
    
    Returns:
    A2 -- The sigmoid output of the second activation
    cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
    """
    # Retrieve each parameter from the dictionary "parameters"
    ### START CODE HERE ### (≈ 4 lines of code)
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    ### END CODE HERE ###
    
    # Implement Forward Propagation to calculate A2 (probabilities)
    ### START CODE HERE ### (≈ 4 lines of code)
    Z1 = W1 @ X + b1  ## (n_h, m) = (n_h, n_x) @ (n_x, m) + (n_h, 1)
    A1 = np.tanh(Z1)  ## (n_h, m) = (n_h, m)
    Z2 = W2 @ A1 + b2 ## (n_y, m) = (n_y, n_h) @ (n_h, m) + (n_y, 1)
    A2 = sigmoid(Z2)  ## (n_y, m) = (n_y, m)
    ### END CODE HERE ###
    
    assert(A2.shape == (1, X.shape[1]))
    
    cache = {"Z1": Z1,
             "A1": A1,
             "Z2": Z2,
             "A2": A2}
    
    return A2, cache
def forward_propagation( X , parameters ):
    """
    参数:
         X - 维度为(n_x,m)的输入数据。
         parameters - 初始化函数(initialize_parameters)的输出

    返回:
         A2 - 使用sigmoid()函数计算的第二次激活后的数值
         cache - 包含“Z1”,“A1”,“Z2”和“A2”的字典类型变量
     """
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    #前向传播计算A2
    Z1 = np.dot(W1 , X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2 , A1) + b2
    A2 = sigmoid(Z2)
    #使用断言确保我的数据格式是正确的
    assert(A2.shape == (1,X.shape[1]))
    cache = {"Z1": Z1,
             "A1": A1,
             "Z2": Z2,
             "A2": A2}

    return (A2, cache)
Ejemplo n.º 10
0
def forward_propagation(X, parameters):
    """
    Argument:
    X -- input data of size (n_x, m)
    parameters -- python dictionary containing your parameters (output of initialization function)
    
    Returns:
    A2 -- The sigmoid output of the second activation
    cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
    """
    # Retrieve each parameter from the dictionary "parameters"
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']
    
    # Implement Forward Propagation to calculate A2 (probabilities)
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)
    
    assert(A2.shape == (1, X.shape[1]))
    
    cache = {"Z1": Z1,
             "A1": A1,
             "Z2": Z2,
             "A2": A2}
    
    return A2, cache
Ejemplo n.º 11
0
def forward_propagation(X, parameters):
    """
    Argument:
    X -- input data of size (n_x, m)
    parameters -- python dictionary containing your parameters (output of initialization function)
    
    Returns:
    A2 -- The sigmoid output of the second activation
    cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
    """
    # Retrieve each parameter from the dictionary "parameters"
    ### START CODE HERE ### (≈ 4 lines of code)
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    ### END CODE HERE ###
    # print('> X ' + str(X.shape))
    # print('> W1 ' + str(W1.shape))
    # print('> b1 ' + str(b1.shape))
    # print('> W2 ' + str(W2.shape))
    # print('> b2 ' + str(b2.shape))
    # Implement Forward Propagation to calculate A2 (probabilities)
    ### START CODE HERE ### (≈ 4 lines of code)
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)
    ### END CODE HERE ###

    assert (A2.shape == (1, X.shape[1]))

    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return A2, cache
Ejemplo n.º 12
0
def forward_propagation(X,parameters):
    '''
    :param X:维度为(n_x,m)的输入数据. 
    :param parameters: 初始化函数(initialize_parameters)
    :return: 
    A2 - 使用sigmoid()函数计算的第二次激活后的数值
    cache-包含'Z1','A1','Z2','A2'的字典类型变量
    '''
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']
    #前向计算A2
    Z1 = np.dot(W1,X)+b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2,A1)+b2
    A2 = sigmoid(Z2)
    #使用断言确保数据格式正确
    assert(A2.shape ==(1,X.shape[1]))
    cache = {
        'Z1':Z1,
        'A1':A1,
        'Z1':Z2,
        'A2':A2
    }
    return (A2,cache)
Ejemplo n.º 13
0
def forward_propagation1(X, parameters):
    # Retrieve each parameter from the dictionary "parameters"
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]

    # Implement Forward Propagation to calculate A2 (probabilities)
    Z1 = np.dot(W1, X) + b1
    A1 = sigmoid(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)

    assert (A2.shape == (1, X.shape[1]))

    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return A2, cache
Ejemplo n.º 14
0
    def predict(self, x):
        assert self.is_trained
        print(x.shape)
        assert x.shape == (2, x.shape[1])

        Z1 = np.dot(self.W1, x) + self.b1
        A1 = np.tanh(Z1)
        Z2 = np.dot(self.W2, A1) + self.b2
        A2 = sigmoid(Z2)

        return A2
Ejemplo n.º 15
0
 def evolution(self, X):
     '''
     :param X:输入测试数据
     :return: 返回其类别
     '''
     Z1 = np.dot(self.W1, X) + self.B1
     A1 = np.tanh(Z1)
     Z2 = np.dot(self.W2, A1) + self.B2
     A2 = sigmoid(Z2)
     predictions = np.round(A2)
     return predictions
def forward_propagation(X, parameters):
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)
    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}
    return A2, cache
Ejemplo n.º 17
0
def forward_propagation(X, parameters):
    W1 = parameters['W1']
    W2 = parameters['W2']
    b1 = parameters['b1']
    b2 = parameters['b2']
    Z1 = np.dot(W1, X) + b1  # (n_h,m)
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2  #(n_y,m)
    A2 = sigmoid(Z2)
    cache = {'Z1': Z1, 'A1': A1, 'Z2': Z2, 'A2': A2}
    return cache, A2
Ejemplo n.º 18
0
def forward_propagation(X, parameters):

    Z1 = np.dot(parameters['W1'], X) + parameters['b1']
    A1 = np.tanh(Z1)

    Z2 = np.dot(parameters['W2'], A1) + parameters['b2']
    A2 = sigmoid(Z2)

    assert (A2.shape == (1, X.shape[1]))

    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}
    return A2, cache
Ejemplo n.º 19
0
def forward_propagation(X, b1, W1, b2, W2):
    """
    Returns:
    A2 -- The sigmoid output of the second activation
    "Z1", "A1", "Z2" and "A2"
    """
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    # A1 = ReLu(Z1)   # using ReLu as the activation function.
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)

    return Z1, A1, Z2, A2
Ejemplo n.º 20
0
    def forward_propagation(self):
        Z1 = np.dot(self.W1, self.X) + self.b1
        self.A1 = np.tanh(Z1)

        Z2 = np.dot(self.W2, self.A1) + self.b2
        self.A2 = sigmoid(Z2)
        self.validate()

        cost = -1.0 / self.sample_amount * np.sum(
            np.log(self.A2) * self.Y + (1 - self.Y) * np.log(1 - self.A2))
        self.current_cost = np.squeeze(cost)

        return self
Ejemplo n.º 21
0
def forward_propagation(X, parameters):
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)
    assert (A2.shape == (1, X.shape[1]))

    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return A2, cache
Ejemplo n.º 22
0
def forward_propagation(X, params):
    W1 = params['W1']
    B1 = params['B1']
    W2 = params['W2']
    B2 = params['B2']

    Z1 = np.dot(W1, X) + B1
    A1 = np.tanh(Z1)

    Z2 = np.dot(W2, A1) + B2
    A2 = sigmoid(Z2)

    cache = {'Z1': Z1, 'A1': A1, 'Z2': Z2, 'A2': A2}
    return A2, cache
Ejemplo n.º 23
0
def forward_propagation(X, parameters):
    np.random.seed(2)
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]

    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)

    cache = {"Z1": Z1,"A1": A1,"Z2": Z2,"A2": A2}
    return A2, cache
Ejemplo n.º 24
0
def forward_propagation(X, parameters):
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']

    Z1 = np.dot(W1.T, X) + b1
    A1 = np.tanh(Z1)

    Z2 = np.dot(W2.T, A1) + b2
    A2 = sigmoid(Z2)

    cache = {'Z1': Z1, 'A1': A1, 'Z2': Z2, 'A2': A2}

    return A2, cache
Ejemplo n.º 25
0
def forward_propagation(X, parameters):
    w1 = parameters['w1']
    b1 = parameters['b1']
    w2 = parameters['w2']
    b2 = parameters['b2']

    #前向传播
    z1 = np.dot(w1, X) + b1
    a1 = np.tanh(z1)
    z2 = np.dot(w2, a1) + b2
    a2 = sigmoid(z2)

    assert (a2.shape == (1, X.shape[1]))

    cache = {'Z1': z1, 'A1': a1, 'Z2': z2, 'A2': a2}
    return (a2, cache)
Ejemplo n.º 26
0
def forward_propagation(X, parameters):
    # Retriving the parameters
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]

    # Forword propagation calcualtion
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)

    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return A2, cache
Ejemplo n.º 27
0
def forward_propagation(X, parameters):
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    # 前向传播法计算
    # 第一层到第二层
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    # 第二层到第五层
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)
    # 使用断言确保数据格式正确
    assert (A2.shape == (1, X.shape[1]))
    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return (A2, cache)
def forward_propagation(X, parameters):
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']

    # 前向传播
    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)
    assert (A2.shape == (1, X.shape[1]))
    cache = {'Z1': Z1,
             'A1': A1,
             'Z2': Z2,
             'A2': A2}
    return A2, cache
Ejemplo n.º 29
0
def forward_propagation(x, parameters):
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']

    z1 = np.dot(W1, x) + b1
    A1 = np.tanh(z1)

    z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(z2)

    assert (A2.shape == (1, x.shape[1]))

    cache = {"z1": z1, "A1": A1, "z2": z2, "A2": A2}

    return A2, cache
Ejemplo n.º 30
0
def forward_propagation(X, parameters):
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]

    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    #print("W2",W2)
    #print("A1",A1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)
    liA2.append(A2.tolist()[0])

    assert (A2.shape == (1, X.shape[1]))
    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return (A2, cache)
Ejemplo n.º 31
0
def forward_propagation(X, parameters):

    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']

    Z1 = np.dot(W1, X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2, A1) + b2
    A2 = sigmoid(Z2)
    ### END CODE HERE ###

    assert (A2.shape == (1, X.shape[1]))

    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return A2, cache
Ejemplo n.º 32
0
def forward_prop(X,params):
    # Retrieve parameters
    W1 = params['W1']
    b1 = params['b1']
    W2 = params['W2']
    b2 = params['b2']

    # Implement forward propagation
    Z1 = np.dot(W1,X) + b1
    A1 = np.tanh(Z1)
    Z2 = np.dot(W2,A1) + b2
    A2 = sigmoid(Z2)

    assert(A2.shape == (1,X.shape[1]))

    cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2}

    return A2,cache