def test_Sigmoid():
    X = np.random.random((3, 2))
    assert np.array_equal(
        (1 / (1 + np.exp(-X))), Sigmoid.activation(X)) is True
    assert np.array_equal(
        (1 / (1 + np.exp(-X))) *
        (1 - (1 / (1 + np.exp(-X)))), Sigmoid.derivative(X)) is True
    def loss(X, Y, W):
        """
        Calculate error by cosine similarity method

        PARAMETERS
        ==========

        X:ndarray(dtype=float,ndim=1)
          input vector
        Y:ndarray(dtype=float)
          output vector
        W:ndarray(dtype=float)
          Weights

         RETURNS
         =======

         Percentage of error in the actural value and predicted value
         """
        sigmoid = Sigmoid()
        H = sigmoid.activation(np.dot(X, W).T)
        DP = np.sum(np.dot(H, Y))
        S = DP/((np.sum(np.square(H))**(0.5))*(np.sum(np.square(Y))**(0.5)))
        dissimilarity = 1-S
        return dissimilarity*(np.sum(np.square(Y))**(0.5))
예제 #3
0
    def predict(self, X):
        """
        Predict the Probabilistic Value of
        Input, in accordance with
        Logistic Regression Model.

        PARAMETERS
        ==========

        X: ndarray(dtype=float,ndim=1)
            1-D Array of Dataset's Input.

        prediction: ndarray(dtype=float,ndim=1)
            1-D Array of Predicted Values
            corresponding to each Input of
            Dataset.

        RETURNS
        =======

        ndarray(dtype=float,ndim=1)
            1-D Array of Probabilistic Values
            of whether the particular Input
            belongs to class 0 or class 1.
        """
        prediction = np.dot(X, self.weights).T
        sigmoid = Sigmoid()
        return sigmoid.activation(prediction)
def test_Sigmoid():
    X = np.random.random((3, 2))
    if np.array_equal(
        (1 / (1 + np.exp(-X))),
        Sigmoid.activation(X)
            ) is not True:
        raise AssertionError
    if np.array_equal(
        (1 / (1 + np.exp(-X)))*(1-(1 / (1 + np.exp(-X)))),
        Sigmoid.derivative(X)
            ) is not True:
        raise AssertionError
예제 #5
0
    def classify(self, X):
        """
        Classify the Input, according to
        Logistic Regression Model,i.e in this
        case, either class 0 or class 1.

        PARAMETERS
        ==========

        X: ndarray(dtype=float,ndim=1)
            1-D Array of Dataset's Input.

        prediction: ndarray(dtype=float,ndim=1)
            1-D Array of Predicted Values
            corresponding to their Inputs.

        actual_predictions: ndarray(dtype=int,ndim=1)
            1-D Array of Output, associated
            to each Input of Dataset,
            Predicted by Trained Logistic
            Regression Model.

        RETURNS
        =======

        ndarray
            1-D Array of Predicted classes
            (either 0 or 1) corresponding
            to their inputs.

        """
        prediction = np.dot(X, self.weights).T
        sigmoid = Sigmoid()
        prediction = sigmoid.activation(prediction)
        actual_predictions = np.zeros((1, X.shape[0]))
        for i in range(prediction.shape[1]):
            if prediction[0][i] > 0.5:
                actual_predictions[0][i] = 1

        return actual_predictions
    def derivative(X, Y, W):
        """
        Calculate derivative for logarithmic error method.

        PARAMETERS
        ==========

        X:ndarray(dtype=float,ndim=1)
          input vector
        Y:ndarray(dtype=float)
          output vector
        W:ndarray(dtype=float)
          Weights

         RETURNS
         =======

         array of derivates
        """
        M = X.shape[0]
        sigmoid = Sigmoid()
        H = sigmoid.activation(np.dot(X, W).T)
        return (1/M)*(np.dot(X.T, (H-Y).T))
    def loss(X, Y, W):
        """
        Calculate loss by logarithmic error method.

        PARAMETERS
        ==========

        X:ndarray(dtype=float,ndim=1)
          input vector
        Y:ndarray(dtype=float)
          output vector
        W:ndarray(dtype=float)
          Weights

         RETURNS
         =======

         array of logarithmic losses
        """
        M = X.shape[0]
        sigmoid = Sigmoid()
        H = sigmoid.activation(np.dot(X, W).T)
        return (1/M)*(np.sum((-Y)*np.log(H)-(1-Y)*np.log(1-H)))
    def loss(X, Y, W):
        """
            Calculate  Mean Squared Log Loss

            PARAMETERS
            ==========

            X:ndarray(dtype=float,ndim=1)
              input vector
            Y:ndarray(dtype=float)
              output vector
            W:ndarray(dtype=float)
              Weights

            RETURNS
            =======

            array of mean of logarithmic losses
        """

        M = X.shape[0]
        sigmoid = Sigmoid()
        H = sigmoid.activations(np.dot(X, W).T)
        return np.sqrt((1 / M) * (np.sum(np.log(Y + 1) - np.log(H + 1))))
예제 #9
0
    def Plot(self,
             X,
             Y,
             actual_predictions,
             optimizer=GradientDescent,
             epochs=25,
             zeros=False
             ):
        """
        Plots for Logistic Regression.

        PARAMETERS
        ==========

        X: ndarray(dtype=float,ndim=1)
            1-D Array of Dataset's Input.

        Y: ndarray(dtype=float,ndim=1)
            1-D Array of Dataset's Output.

        actual_predictions: ndarray(dtype=int,ndim=1)
            1-D Array of Output, associated
            to each Input of Dataset,
            Predicted by Trained Logistic
            Regression Model.

        optimizer: class
           Class of one of the Optimizers like
           AdamProp,SGD,MBGD,GradientDescent etc

        epochs: int
           Number of times, the loop to calculate loss
           and optimize weights, will going to take
           place.

        error: float
           The degree of how much the predicted value
           is diverted from actual values, given by implementing
           one of choosen loss functions from loss_func.py .

        zeros: boolean
            Condition to initialize Weights as either
            zeroes or some random decimal values.

        RETURNS
        =======

        2-D graph of Sigmoid curve,
        Comparision Plot of True output and Predicted output versus Feacture.
        2-D graph of Loss versus Number of iterations.
        """
        Plot = plt.figure(figsize=(8, 8))
        plot1 = Plot.add_subplot(2, 2, 1)
        plot2 = Plot.add_subplot(2, 2, 2)
        plot3 = Plot.add_subplot(2, 2, 3)

        # 2-D graph of Sigmoid curve.
        x = np.linspace(- max(X[:, 0]) - 2, max(X[:, 0]) + 2, 1000)
        plot1.set_title('Sigmoid curve')
        plot1.grid()
        sigmoid = Sigmoid()
        plot1.scatter(X.T[0], Y, color="red", marker="+", label="labels")
        plot1.plot(x, 0*x+0.5, linestyle="--", label="Decision bound, y=0.5")
        plot1.plot(x, sigmoid.activation(x),
                   color="green", label='Sigmoid function: 1 / (1 + e^-x)'
                   )
        plot1.legend()

        # Comparision Plot of Actual output and Predicted output vs Feacture.
        plot2.set_title('Actual output and Predicted output versus Feacture')
        plot2.set_xlabel("x")
        plot2.set_ylabel("y")
        plot2.scatter(X[:, 0], Y, color="orange", label='Actual output')
        plot2.grid()
        plot2.scatter(X[:, 0], actual_predictions,
                      color="blue", marker="+", label='Predicted output'
                      )
        plot2.legend()

        # 2-D graph of Loss versus Number of iterations.
        plot3.set_title("Loss versus Number of iterations")
        plot3.set_xlabel("iterations")
        plot3.set_ylabel("Cost")
        iterations = []
        cost = []
        self.weights = generate_weights(X.shape[1], 1, zeros=zeros)
        for epoch in range(1, epochs + 1):
            iterations.append(epoch)
            self.weights = optimizer.iterate(X, Y, self.weights)
            error = optimizer.loss_func.loss(X, Y, self.weights)
            cost.append(error)
        plot3.plot(np.array(iterations), np.array(cost))

        plt.show()