def test_calculate_probability(self):
     model = LogisticRegression.train(xs, ys)
     q, p = LogisticRegression.calculate(model, [6, -173.143442352])
     self.assertAlmostEqual(p, 0.993242, 6)
     self.assertAlmostEqual(q, 0.006758, 6)
     q, p = LogisticRegression.calculate(model, [309, -271.005880394])
     self.assertAlmostEqual(p, 0.000321, 6)
     self.assertAlmostEqual(q, 0.999679, 6)
Пример #2
0
 def test_calculate_probability(self):
     model = LogisticRegression.train(xs, ys)
     q, p = LogisticRegression.calculate(model, [6,-173.143442352])
     self.assertAlmostEqual(p, 0.993242, places=6)
     self.assertAlmostEqual(q, 0.006758, places=6)
     q, p = LogisticRegression.calculate(model, [309, -271.005880394])
     self.assertAlmostEqual(p, 0.000321, places=6)
     self.assertAlmostEqual(q, 0.999679, places=6)
 def test_model_accuracy(self):
     correct = 0
     model = LogisticRegression.train(xs, ys)
     predictions = [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
     for i in range(len(predictions)):
         prediction = LogisticRegression.classify(model, xs[i])
         self.assertEqual(prediction, predictions[i])
         if prediction == ys[i]:
             correct += 1
     self.assertEqual(correct, 16)
Пример #4
0
 def test_leave_one_out(self):
     correct = 0
     predictions = [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0]
     for i in range(len(predictions)):
         model = LogisticRegression.train(xs[:i]+xs[i+1:], ys[:i]+ys[i+1:])
         prediction = LogisticRegression.classify(model, xs[i])
         self.assertEqual(prediction, predictions[i])
         if prediction==ys[i]:
             correct+=1
     self.assertEqual(correct, 15)
Пример #5
0
 def test_model_accuracy(self):
     correct = 0
     model = LogisticRegression.train(xs, ys)
     predictions = [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
     for i in range(len(predictions)):
         prediction = LogisticRegression.classify(model, xs[i])
         self.assertEqual(prediction, predictions[i])
         if prediction==ys[i]:
             correct+=1
     self.assertEqual(correct, 16)
Пример #6
0
 def test_leave_one_out(self):
     correct = 0
     predictions = [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0]
     for i in range(len(predictions)):
         model = LogisticRegression.train(xs[:i] + xs[i + 1:], ys[:i] + ys[i + 1:])
         prediction = LogisticRegression.classify(model, xs[i])
         self.assertEqual(prediction, predictions[i])
         if prediction == ys[i]:
             correct += 1
     self.assertEqual(correct, 15)
 def test_classify(self):
     model = LogisticRegression.train(xs, ys)
     result = LogisticRegression.classify(model, [6, -173.143442352])
     self.assertEqual(result, 1)
     result = LogisticRegression.classify(model, [309, -271.005880394])
     self.assertEqual(result, 0)
 def test_calculate_model(self):
     model = LogisticRegression.train(xs, ys)
     beta = model.beta
     self.assertAlmostEqual(beta[0], 8.9830, 4)
     self.assertAlmostEqual(beta[1], -0.0360, 4)
     self.assertAlmostEqual(beta[2], 0.0218, 4)
Пример #9
0
    def post(self):
        alldata = self.getRequestData()
        user = self.objUserInfo
        s = Entity.model(self.db)
        print(alldata)
        if alldata['model_type'] == 1:
            xss = alldata['xs'].split()
            xs = []
            ys = []
            q = 0
            for i in xss:
                xs.append([float(i.split(',')[0]), float(i.split(',')[1])])
            for i in range(len(xs)):
                ys.append(int(alldata['ys'].split(',')[q]))
                q = q + 1
            print(len(xs), len(ys))
            model = LogisticRegression.train(xs, ys)
            if model.beta:
                lsData = {
                    "create_id": user['id'],
                    "name": alldata['name'],
                    "beta": str(model.beta),
                    "note": alldata['note']
                }
                id = s.save(lsData, table='public.logistis')
                self.response(id)
        elif alldata['model_type'] == 2:
            xss = alldata['xs'].split()
            xs = []
            ys = []
            q = 0
            for i in xss:
                xs.append([float(i.split(',')[0]), float(i.split(',')[1])])
            for i in range(len(xs)):
                ys.append(int(alldata['ys'].split(',')[q]))
                q = q + 1
            print(xs, ys)
            print(xs, ys)
            count = 1
            while count >= 0:
                rpath = str(random.randint(10000, 90000))
                pyfile = '/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/' + rpath + '.py'
                if not os.path.isfile(pyfile):
                    count = -1
                else:
                    count = 1

            f = open(pyfile, 'w')
            text = 'from Bio import kNN' + '\n' + 'class model():' + '\n' + '	def knn(self):' + '\n' + '		xs = ' + str(
                xs
            ) + '\n' + '		ys =' + str(ys) + '\n' + '		k=' + str(
                alldata['k']
            ) + '\n' + '		model = kNN.train(xs,ys,k)' + '\n' + '		return model'
            print(text)
            f.write(text)
            f.close()
            if os.path.isfile(pyfile):
                lsData = {
                    "create_id": user['id'],
                    "name": alldata['name'],
                    "file_name": rpath,
                    "packpath": pyfile,
                    "type": '2',
                    "note": alldata['note']
                }
                id = s.save(lsData, table='public.pymodel')
                self.response(id)
        elif alldata['model_type'] == 3:
            xss = alldata['xs'].split()
            xs = []
            ys = []
            q = 0
            for i in xss:
                xs.append([float(i.split(',')[0]), float(i.split(',')[1])])
            for i in range(len(xs)):
                ys.append(int(alldata['ys'].split(',')[q]))
                q = q + 1
            print(xs, ys)
            count = 1
            while count >= 0:
                rpath = str(random.randint(10000, 90000))
                pyfile = '/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/' + rpath + '.py'
                if not os.path.isfile(pyfile):
                    count = -1
                else:
                    count = 1
            f = open(pyfile, 'w')
            text = 'from Bio import NaiveBayes' + '\n' + 'class model():' + '\n' + '	def bayes(self):' + '\n' + '		xs = ' + str(
                xs
            ) + '\n' + '		ys =' + str(
                ys
            ) + '\n' + '		model = NaiveBayes.train(xs,ys)' + '\n' + '		return model'
            print(text)
            f.write(text)
            f.close()
            if os.path.isfile(pyfile):
                lsData = {
                    "create_id": user['id'],
                    "name": alldata['name'],
                    "file_name": rpath,
                    "packpath": pyfile,
                    "type": '3',
                    "note": alldata['note']
                }
            id = s.save(lsData, table='public.pymodel')
            self.response(id)
Пример #10
0
 def test_calculate_model_with_update_callback(self):
     model = LogisticRegression.train(xs, ys, update_fn=show_progress)
     beta = model.beta
     self.assertAlmostEqual(beta[0], 8.9830, places=4)
Пример #11
0
 def test_classify(self):
     model = LogisticRegression.train(xs, ys)
     result = LogisticRegression.classify(model, [6,-173.143442352])
     self.assertEqual(result, 1)
     result = LogisticRegression.classify(model, [309, -271.005880394])
     self.assertEqual(result, 0)
Пример #12
0
 def test_calculate_model(self):
     model = LogisticRegression.train(xs, ys)
     beta = model.beta
     self.assertAlmostEqual(beta[0],  8.9830, places=4)
     self.assertAlmostEqual(beta[1], -0.0360, places=4)
     self.assertAlmostEqual(beta[2],  0.0218, places=4)
Пример #13
0
 def test_calculate_model_with_update_callback(self):
     model = LogisticRegression.train(xs, ys, update_fn=show_progress)
     beta = model.beta
     self.assertAlmostEqual(beta[0], 8.9830, places=4)
from Bio import LogisticRegression
import numpy as np


all_data = np.loadtxt("../datasets/iris/iris.data", delimiter=",",
                      dtype="float, float, float, float, S11")

xs = []
ys = []

for i in all_data:
    if 'virgi' not in str(i[-1]):
        xs.append([i[0], i[1], i[2], i[3]])
        if 'setosa' in str(i[-1]):
            ys.append(0)
        else:
            ys.append(1)

test_xs = xs.pop()
test_ys = ys.pop()


def show_progress(iteration, loglikelihood):
    print("Iteration:", iteration, "Log-likelihood function:", loglikelihood)

model = LogisticRegression.train(xs, ys, update_fn=show_progress)
print("This should be Iris-versic (1): {}".format(LogisticRegression.classify(model, test_xs)))
Пример #15
0
	def post(self):
		alldata = self.getRequestData()
		user = self.objUserInfo
		s=Entity.model(self.db)
		print(alldata)
		if alldata['model_type']==1:
			xss=alldata['xs'].split()
			xs=[]
			ys=[]
			q=0
			for i in xss:
				xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
			for i in range(len(xs)):
				ys.append(int(alldata['ys'].split(',')[q]))
				q=q+1
			print(len(xs),len(ys))
			model=LogisticRegression.train(xs,ys)
			if model.beta:
				lsData={
					"create_id"	:	user['id'],
					"name"		:	alldata['name'],
					"beta"	:	str(model.beta),
				        "note"			:	alldata['note']
			
					}
				id = s.save(lsData,table='public.logistis')
				self.response(id)			
		elif alldata['model_type']==2:
			xss=alldata['xs'].split()
			xs=[]
			ys=[]
			q=0
			for i in xss:
				xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
			for i in range(len(xs)):
				ys.append(int(alldata['ys'].split(',')[q]))
				q=q+1
			print(xs,ys)
			print(xs,ys)
			count=1
			while count >= 0 :
				rpath = str(random.randint(10000, 90000))
				pyfile='/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/'+rpath+'.py'
				if not os.path.isfile(pyfile):
					count=-1
				else:
					count=1
				
				
			
			f=open(pyfile,'w')
			text = 'from Bio import kNN'+'\n'+'class model():'+'\n'+'	def knn(self):'+'\n'+'		xs = '+str(xs)+'\n'+'		ys ='+str(ys)+'\n'+'		k='+str(alldata['k'])+'\n'+'		model = kNN.train(xs,ys,k)'+'\n'+'		return model'
			print(text)
			f.write(text)
			f.close()
			if os.path.isfile(pyfile):
				lsData={
					"create_id"	:	user['id'],
				        "name"  	:	alldata['name'],
					"file_name"	:	rpath,
				        "packpath"	:	pyfile,
				        "type"          :       '2',
					"note"		:	alldata['note']
				       }
				id = s.save(lsData,table='public.pymodel')
				self.response(id)					
		elif alldata['model_type']==3:
			xss=alldata['xs'].split()
			xs=[]
			ys=[]
			q=0
			for i in xss:
				xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
			for i in range(len(xs)):
				ys.append(int(alldata['ys'].split(',')[q]))
				q=q+1
			print(xs,ys)
			count=1
			while count >= 0 :
				rpath = str(random.randint(10000, 90000))
				pyfile='/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/'+rpath+'.py'
				if not os.path.isfile(pyfile):
					count=-1
				else:
					count=1
			f=open(pyfile,'w')
			text = 'from Bio import NaiveBayes'+'\n'+'class model():'+'\n'+'	def bayes(self):'+'\n'+'		xs = '+str(xs)+'\n'+'		ys ='+str(ys)+'\n'+'		model = NaiveBayes.train(xs,ys)'+'\n'+'		return model'
			print(text)
			f.write(text)
			f.close()
			if os.path.isfile(pyfile):
				lsData={
				"create_id"	:	user['id'],
				"name"  	:	alldata['name'],
				"file_name"	:	rpath,
				"packpath"	:	pyfile,
				"type"          :       '3',
				"note"		:	alldata['note']
				}
			id = s.save(lsData,table='public.pymodel')
			self.response(id)