コード例 #1
0
	def evaluate(self):
		""" Evaluate the model on itself
		"""

		## We should be evaluating on dev dataset as well, so commenting x_test
		#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)
		self.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)
		print("%s score = %f\n" %(self.modelName, self.model_score[1]))

		##Saving atucal vs predicted predictions
		##np.argmax returns the index where it see's 1 in the row
		#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)
		y_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)

		## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack
		#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T
		output_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T

		self.outputFile = self.resultDir + '/' + self.configName + '/' + self.modelName + '_' + str(len(self.hiddenLayer)) + 'HL_'  + str(self.history.epoch[-1]+1) + 'epochs_' + '{0:.2f}'.format(self.model_score[1]*100).replace('.', 'p') + '_acc_' + "outputPredict.csv" 
		
		np.savetxt(self.outputFile, output_predict, fmt="%5.0f", delimiter=",")

		##Error Analysis of the prediction
		errorAnalysis(self.outputFile)

		return self.model_score
コード例 #2
0
    def evaluateTest(self):
        """ Evaluate the model on itself
		"""

        ## We should be evaluating on dev dataset as well, so commenting x_test
        self.model_score = self.model.evaluate(self.x_test,
                                               self.y_test_oh,
                                               batch_size=2048)
        print("%s score = %f\n" % (self.modelName, self.model_score[1]))

        ##Saving atucal vs predicted predictions
        ##np.argmax returns the index where it see's 1 in the row
        y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048),
                           axis=1)

        ## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack
        output_predict = np.vstack((np.argmax(self.y_test_oh,
                                              axis=1), y_pred)).T
        outputFile = self.resultDir + "/outputPredict.csv"
        np.savetxt(outputFile, output_predict, fmt="%5.0f", delimiter=",")

        ##Error Analysis of the prediction
        errorAnalysis(outputFile)

        return self.model_score
コード例 #3
0
	def evaluate(self):
		""" Evaluate the model on itself
		"""

		## We should be evaluating on dev dataset as well, so commenting x_test
		#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)
		self.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)
		print("%s score = %f\n" %(self.modelName, self.model_score[1]))

		##Saving atucal vs predicted predictions
		##np.argmax returns the index where it see's 1 in the row
		#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)
		y_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)

		## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack
		#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T
		output_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T

		outputFile = self.resultDir + '/' + self.modelName +'_8HLw_1000_700_500_500_300_300_256_256' + str(self.history.epoch[-1]+1) + 'epochs_' + 'Dropout_' + str(self.drop1).replace('.', 'p') + '_' + str(self.drop2).replace('.', 'p') + '_' + str(self.drop3).replace('.', 'p')  + '_' + str(self.drop4).replace('.', 'p')  +'_' + str(self.drop5).replace('.', 'p')  +  '_' + str(self.drop6).replace('.', 'p')  + '_' + str(self.drop7).replace('.', 'p')  + '_' + str(self.drop8).replace('.', 'p')  + "_outputPredict.csv" 
		
		np.savetxt(outputFile, output_predict, fmt="%5.0f", delimiter=",")

		##Error Analysis of the prediction
		errorAnalysis(outputFile)

		return self.model_score
コード例 #4
0
        ## Convert from one-hot to numerical prediction
        y_pred = np.argmax(model.predict(x_dev[:, :inputTraces],
                                         batch_size=256),
                           axis=1)

        ## vstack the actual and predicted output and take transpose
        output_predict = np.vstack((np.argmax(y_dev_oh, axis=1), y_pred)).T

        ## Save it to csv file for future analysis
        ## Split the modelName so that it has config and run number in the name
        outputFile = modelDir + "/" + devDataConfig + "/" + "dataOf_" + devDataConfig + "_modelOf_" + "_".join(
            modelName.split("_")[1:2]) + "_dev.csv"
        np.savetxt(outputFile, output_predict, fmt="%5.0f", delimiter=",")

        ##Error Analysis
        errorAnalysis(outputFile)

        df = pd.read_csv(outputFile, header=None)

        error_df = df[df[0] != df[1]].astype('category')
        error_df[2] = error_df[0].astype(str).str.cat(error_df[1].astype(str),
                                                      sep="-")

        totalCount = df[0].count()
        errorCount = error_df[2].count()
        accuracy = (
            (df[0].count() - error_df[2].count()) / df[0].count()) * 100

        ## to get the accuracy of individual keys, we need to count the number of rows in error_df for the same key
        ## dubtract it from total data elements and divide it by the total number of data elements for each.