Exemplo n.º 1
0
	def iterate(self):
		'''
			Propagate phase
		'''

		if self.algorithm == 0 or self.algorithm ==1:
			x = 0
			flag = True
			#infinite loop
			while flag:
				#repeat the epoch until there is a convergence
				print('******************** Start of Epoch {0} ********************'.format(x))
				errors = []
				for data in self.normalized:
					#loop thru each data input
					inputs = data[:-1]
					d_output = data[-1:][0]
					print("The inputs to the network are {0} and the desired output is {1}".format(inputs, d_output))

					temp = np.matrix(inputs)
					weights = np.matrix(self.weights[0]).T
					product = np.dot(temp, weights)


					if self.algorithm ==0:
						activated = threshold_function(self.threshold, product.item())
					else:
						activated = line_equation(product.item())	
				
					print("The current weights and model outputs are {0} and {1} respectively".format(weights, activated))
					
					error = d_output - activated
					self.average_errors.append(error)
					errors.append(error * error)
					weight_change = temp.T * (self.learning_rate * error) 
					print("The models error is {0} and the change in weights is {1}".format(error, weight_change))
					self.weights = np.matrix(weight_change) + np.matrix(self.weights).T
					self.weights = self.weights.T
					print("The updated weights are:")
					self.output_weights()
				print('******************** End of Epoch {0}   ********************'.format(x))
	
				average_error = np.mean(errors)
				print("The average error is, {0}".format(average_error))
				if round(average_error,1) <= self.tolerance:
					print("The desired error tolerance has been achieved.")
					print("Exiting... ")
					flag = False
					break
						
				
				x += 1	
		elif self.algorithm == 2:
			self.backprop()

		print("Testing completed successfully...")	
		print("The acceptable weights for the model are:")
		self.output_weights()	
Exemplo n.º 2
0
    def test_data(self):
        '''Test data from a data file'''
        print(
            "*******************************TESTDATA*************************")
        inputs = []
        input_file = [self.bias, 7, 83, 78, 26, 71, 29.3, 0.767, 36]
        for i in input_file:
            inputs.append(round((float(i) / self.largest_data), 4))

        d_output = 1

        if self.algorithm == 0 or self.algorithm == 1:
            print(
                "The inputs to the network are {0} and the desired output is {1}"
                .format(inputs, d_output))

            temp = np.matrix(inputs)
            weights = np.matrix(self.weights[0]).T
            print(temp, weights)
            product = np.dot(temp, weights)

            if self.algorithm == 0:
                activated = threshold_function(self.threshold, product.item())
            else:
                activated = line_equation(product.item())

            print(
                "The current weights and model outputs are {0} and {1} respectively"
                .format(weights, activated))

        else:
            #backprop
            #loop thru each data input

            print(
                "The inputs to the network are {0} and the desired output is {1}"
                .format(inputs, d_output))

            input_0 = np.matrix(inputs)
            output_0 = sigmoid_function(input_0)
            print("The outputs of the input layer are {0}".format(output_0))
            weights = np.matrix(self.weights[0]).T
            input_1 = np.dot(weights, input_0.T)
            print("The inputs of the hidden layer are {0}".format(input_1))
            output_1 = sigmoid_function(input_1)
            print("The outputs of the hidden layer are {0}.".format(output_1))
            input_2 = np.dot(self.weights[1], output_1)
            print("The input to the output layer is {0}".format(input_2))
            output_2 = sigmoid_function(input_2.item())
            print("The output to the output layer is {0}".format(output_2))
Exemplo n.º 3
0
    def test(self):
        '''Test the model to check if it conforms to
		the standards'''
        #true class
        class_p = 1
        class_n = 0

        tp = 0
        fn = 0
        fp = 0
        tn = 0

        print("******************TESTING**************")

        for data in self.test_set:
            #loop thru each data input
            inputs = data[:-1]
            d_output = data[-1:][0]
            print(
                "The inputs to the network are {0} and the desired output is {1}"
                .format(inputs, d_output))

            if self.algorithm == 0 or self.algorithm == 1:

                temp = np.matrix(inputs)
                weights = np.matrix(self.weights[0]).T
                product = np.dot(temp, weights)

                if self.algorithm == 0:
                    m_output = threshold_function(self.threshold,
                                                  product.item())
                else:
                    m_output = line_equation(product.item())
                print("The models output is {0}".format(m_output))

            else:
                input_0 = np.matrix(inputs)
                output_0 = line_equation(input_0)
                print(
                    "The outputs of the input layer are {0}".format(output_0))
                weights = np.matrix(self.weights[0]).T
                input_1 = np.dot(weights, input_0.T)
                print("The inputs of the hidden layer are {0}".format(input_1))
                output_1 = sigmoid_function(input_1)
                print("The outputs of the hidden layer are {0}.".format(
                    output_1))
                input_2 = np.dot(self.weights[1], output_1)
                print("The input to the output layer is {0}".format(input_2))
                m_output = sigmoid_function(input_2.item())
                print("The output to the output layer is {0}".format(m_output))

            if m_output == d_output == class_p:
                tp += 1
            elif m_output == d_output == class_n:
                tn += 1
            elif m_output == class_p and d_output == class_n:
                fp += 1
            elif m_output == class_n and d_output == class_p:
                fn += 1
            else:
                print("An error has occured")

        print("**********COMPLETE***************")
        print("Analysis....")

        recall = tp / (tp + fn)
        precision = tp / (tp + fp)
        f_score = (2 * (precision * recall)) / (precision + recall)
        print("The recall is: {0}".format(recall))
        print("The precision is: {0}".format(precision))
        print("The F-score is: {0}".format(f_score))
Exemplo n.º 4
0
    def backprop(self):
        '''
			Backprop learning and training
		'''

        x = 0
        flag = True
        #infinite loop
        while flag:
            #repeat the epoch until there is a convergence
            print(
                '******************** Start of Epoch {0} ********************'.
                format(x))
            errors = []
            for data in self.normalized:
                #loop thru each data input
                inputs = data[:-1]
                d_output = data[-1:][0]
                print(
                    "The inputs to the network are {0} and the desired output is {1}"
                    .format(inputs, d_output))

                input_0 = np.matrix(inputs)
                output_0 = line_equation(input_0)
                print(
                    "The outputs of the input layer are {0}".format(output_0))
                weights = np.matrix(self.weights[0]).T
                input_1 = np.dot(weights, input_0.T)
                print("The inputs of the hidden layer are {0}".format(input_1))
                output_1 = sigmoid_function(input_1)
                print("The outputs of the hidden layer are {0}.".format(
                    output_1))
                input_2 = np.dot(self.weights[1], output_1)
                print("The input to the output layer is {0}".format(input_2))
                output_2 = sigmoid_function(input_2.item())
                print("The output to the output layer is {0}".format(output_2))

                error = d_output - output_2
                self.average_errors.append(error)
                errors.append(error * error)

                #calculate weight for the second weights

                d = (d_output - output_2) * output_2 * (1 - output_2)
                weight_change_w = self.learning_rate * d * output_1

                #calculate weights for the first weights

                w = np.matrix(self.weights[1]).T * d
                a, b, g, h = (w[0].item(), w[1].item(), output_1[0].item(),
                              output_1[1].item())

                weight_change_v = [[a * g * (1 - g)], [b * h * (1 - h)]]
                weight_change_v = np.matrix(weight_change_v).T
                #update weights
                print("Updating weights.......")

                self.weights[0] = ((output_0.T * weight_change_v) *
                                   self.learning_rate) + self.weights[0]

                self.weights[1] = np.matrix(
                    self.weights[1]).T + weight_change_w
                self.weights[1] = self.weights[1].T

                print("The updated weights are:")
                self.output_weights()
            print(
                '******************** End of Epoch {0}   ********************'.
                format(x))

            average_error = np.mean(errors)
            print("The average error is, {0}".format(average_error))
            if round(average_error, 1) <= self.tolerance:
                print("The desired error tolerance has been achieved.")
                print("Exiting... ")
                flag = False
                break

            x += 1