def xor_trainer():

    # XOR gate possible 2D inputs and outputs.

    data_xor = [[0, 0], [1, 0], [0, 1], [1, 1]]
    labels_xor = [0, 1, 1, 0]

    # Now we build a perceptron to learn XOR.

    classifier_xor = Perceptron(max_iter=40)
    classifier_xor.fit(data_xor, labels_xor)
    # print(classifier_xor.score(data_xor, labels_xor))

    # Even though an input like [0.5, 0.5] isn’t a real input to an OR logic gate, to see the distances of the input points to the separator
    # line we can use the decision function.

    x_xor = np.linspace(0, 1, 100)
    y_xor = np.linspace(0, 1, 100)

    point_grid_xor = list(product(x_xor, y_xor))

    distances_xor = classifier_xor.decision_function(point_grid_xor)

    # distances contains positive and negative values, yet we want just the absolute distances.

    abs_distances_xor = [abs(i) for i in distances_xor]
    distances_matrix_xor = np.reshape(abs_distances_xor, (100, 100))

    return x_xor, y_xor, distances_matrix_xor
Esempio n. 2
0
def ppn_classify(X_train,y_train,X_test,y_test):
	ppn = Perceptron(max_iter=100, eta0=0.0001, random_state=0)
	#ppn = Perceptron(**args['ppn_args'])
	ppn.fit(X_train,y_train)
	ppn_y_pred = ppn.predict(X_test)
	acc = accuracy_score(ppn_y_pred,y_test)
	f1 = f1_score(y_test, ppn_y_pred)
	y_scores = ppn.decision_function(X_test)
	auc = roc_auc_score(y_test,y_scores)
	strs = "PPN Test_acc: {:.6f}".format(acc)
	strs = strs + " F1-score: {:.6f}".format(f1)
	strs = strs + " AUC: {:.6f}".format(auc)
	print(strs)
def plotRocCurve_balanced(X, y, s1, m_itr):

    print()
    print()
    print("IN plotROC BALANCED")

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.1,
                                                        random_state=42)

    balancing_obj = SMOTE(random_state=42, k_neighbors=5)
    X_train, y_train = balancing_obj.fit_sample(X_train, y_train)

    perceptron = Perceptron(max_iter=m_itr).fit(X_train, y_train)
    predicted_labels_train = perceptron.predict(X_train)
    predicted_labels_test = perceptron.predict(X_test)

    confidence_score = perceptron.decision_function(X_test)
    confidence_score = preprocessing.scale(confidence_score)

    f1 = f1_score(y_test, predicted_labels_test)
    precision = precision_score(y_test, predicted_labels_test)
    recall = recall_score(y_test, predicted_labels_test)
    print('precision is: ', precision)
    print('recall is:', recall)
    print('f1 score is :', f1)

    false_positive_rate, true_positive_rate, thresholds = roc_curve(
        y_test, confidence_score, pos_label=1)
    area_curve = auc(false_positive_rate, true_positive_rate)

    plt.figure()
    plt.title('ROC and F1 = %0.2f' % f1 + ' class_weight:' + ' max_iter:' +
              str(m_itr))
    plt.plot(false_positive_rate,
             true_positive_rate,
             'b',
             label='AUC = %0.2f' % area_curve)
    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'r--')
    #plt.xlim([0,1])
    #plt.ylim([0,1])
    plt.ylabel('True Positive Rate')
    plt.xlabel('False Positive Rate')
    fig1 = plt.gcf()
    plt.show()
    fig1.savefig(s1, dpi=300)
Esempio n. 4
0
class _PerceptronImpl:
    def __init__(self, **hyperparams):
        self._hyperparams = hyperparams
        self._wrapped_model = Op(**self._hyperparams)

    def fit(self, X, y=None):
        if y is not None:
            self._wrapped_model.fit(X, y)
        else:
            self._wrapped_model.fit(X)
        return self

    def predict(self, X):
        return self._wrapped_model.predict(X)

    def decision_function(self, X):
        return self._wrapped_model.decision_function(X)
def plotRoc_feat_wise_percep(X, y, class_w, plt, m_itr=None):

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.1,
                                                        random_state=42)
    perceptron = Perceptron(class_weight={
        1: class_w
    }, max_iter=m_itr).fit(X_train, y_train)
    predicted_labels_test = perceptron.predict(X_test)

    confidence_score = perceptron.decision_function(X_test)
    confidence_score = preprocessing.scale(confidence_score)

    f1 = f1_score(y_test, predicted_labels_test)
    precision = precision_score(y_test, predicted_labels_test)
    recall = recall_score(y_test, predicted_labels_test)
    print('precision is: ', precision)
    print('recall is:', recall)
    print('f1 score is :', f1)

    false_positive_rate, true_positive_rate, thresholds = roc_curve(
        y_test, confidence_score, pos_label=1)
    area_curve = auc(false_positive_rate, true_positive_rate)

    plt.plot(false_positive_rate,
             true_positive_rate,
             label=[
                 'AUC = %0.2f' % area_curve,
                 'F1 = %0.2f' % f1,
                 'feats = %d' % X_train.shape[1]
             ])
    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'r--')

    return area_curve
from sklearn.linear_model import Perceptron
import matplotlib.pyplot as plt
import numpy as np
from itertools import product

data = [[0, 0], [0, 1], [1, 0], [1, 1]]
labels = [0, 0, 0, 1]

plt.scatter([point[0] for point in data], [point[1] for point in data],
            c=labels)
plt.show()

classifier = Perceptron(max_iter=40)
classifier.fit(data, labels)
print(classifier.score(data, labels))

x_values = np.linspace(0, 1, 100)
y_values = np.linspace(0, 1, 100)
point_grid = list(product(x_values, y_values))

distances = classifier.decision_function(point_grid)
abs_distances = [abs(distance) for distance in distances]
distances_matrix = np.reshape(abs_distances, (100, 100))

heatmap = plt.pcolormesh(x_values, y_values, distances_matrix)
plt.colorbar(heatmap)
plt.show()
Esempio n. 7
0
precision = []
recall = []
f1 = []
ap = []
tr = Helper.getTrueKeyphrases(
    'ake-datasets-master/datasets/500N-KPCrowd/references/test.reader.stem.json'
)
kfs = {}

for doc_index, doc_name in enumerate(test.keys()):
    params = calculateParameters(allCandidatesTest[doc_index],
                                 testStr[doc_index], bm25test[doc_name])

    predicted = p_classifier.predict(params)
    plane = p_classifier.decision_function(params)
    true = testTargets[doc_name]

    print('PERCEPTRON')
    print(predicted)
    print('[P2]', plane)
    print('REALITY')
    print(true)

    rnk = {
        list(bm25test[doc_name].keys())[i]: v
        for i, v in enumerate(plane) if v > 0
    }
    rnk = list(dict(Helper.dictToOrderedList(rnk, rev=True)).keys())
    p, r, f = calcResults(predicted, true)
    kfs[doc_name] = rnk
Esempio n. 8
0
from sklearn.feature_extraction.text import TfidfVectorizer
from msg2matrix import trainmsg, trainresult, testmsg, testresult
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
vectorizer = TfidfVectorizer(max_df=0.20,
                             min_df=5,
                             max_features=1000,
                             sublinear_tf=True)
vectorizer.fit(trainmsg)
dictionary = vectorizer.get_feature_names()

train_matrix = mx.extract_features(trainmsg, dictionary)
test_matrix = mx.extract_features(testmsg, dictionary)

per = Perceptron(max_iter=5, eta0=1, class_weight="balanced")
per.fit(train_matrix, trainresult)
decision = per.decision_function(test_matrix)
hloss = hinge_loss(testresult, decision)
#Eout_per = 1 - per.score(test_matrix, testresult)

#clf = SVC(kernel='linear', C=0.1)
#clf.fit(train_matrix, trainresult)
#Eout_svm = 1 - clf.score(test_matrix, testresult)

#NB = MultinomialNB()
#NB.fit(train_matrix, trainresult)
#Eout_nb = 1 - NB.score(test_matrix, testresult)

#NBB = BernoulliNB()
#NBB.fit(train_matrix, trainresult)
#Eout_nbb = 1 - NBB.score(test_matrix, testresult)
Esempio n. 9
0
plt.imshow(perceptron_cm)
labels = ['negative', 'positive']
xlocations = np.array(range(len(labels)))
plt.xticks(xlocations, labels, rotation=0)
plt.yticks(xlocations, labels)
plt.title('Confusion matrix of the Perceptron classifier')
plt.xlabel('True Label')
plt.ylabel('Predict Label')
plt.colorbar()
thresh = perceptron_cm.max()
for i, j in itertools.product(range(perceptron_cm.shape[0]), range(perceptron_cm.shape[1])):
    plt.text(j, i, perceptron_cm[i, j], horizontalalignment="center", 
    color="white" if perceptron_cm[i, j] > thresh else "black")
plt.show()
print("Perceptron\n", classification_report(test_label, test_predict_per))
per_score = perceptron_model.decision_function(test_new)
per_auc = roc_auc_score(test_label, per_score)
print("Perceptron AUC: %.3f" %(per_auc))

'''
# SVM: Use cross-validation to select gamma and C
cv = StratifiedKFold(n_splits=5, shuffle=True)
C_range = np.logspace(-2, 1, 15)
gamma_range = np.logspace(-2, 1, 15)
ACC = np.zeros([15,15])
for i in range(len(gamma_range)):
    for j in range(len(C_range)):
        clf = svm.SVC(kernel='rbf', gamma=gamma_range[i], C=C_range[j])
        acc = cross_val_score(clf, train_new, train_label, cv = cv, scoring='accuracy')
        mean_acc = np.mean(acc)
        ACC[i][j] = mean_acc
Esempio n. 10
0
# training set: train_d aka x, (mon4, mon5); train_t aka y, (mon6)
# testing set: test_d aka x, (mon4, mon5, mon6); test_t aka y, (mon7)
train_d = dc(mon4)
train_d.extend(mon5)
test_d = dc(train_d)
test_d.extend(mon6)
train_t = dc(mon6)
test_t = dc(mon7)

# processing data
train_d = pp.process_activity(train_d)
train_t = pp.process_activity(mon6)
train_d = pp.normalization(train_d)

train_x,train_y = pp.get_train_data(train_d,train_t)
test_d = pp.process_activity(test_d)
test_d = pp.normalization(test_d)
test_t = pp.process_activity(test_t)

# using percetron to train model
pcpt = Perceptron()
pcpt.fit(train_x, train_y)

# geting 3000 best prediction data
result = heapq.nlargest(2000,test_d,lambda x:pcpt.decision_function(test_d[x]))

# calculating the quality of result
precision, recall, f1 = pp.get_comments(result, test_t)
print "Precision rate: %f\nRecall rate: %f\nF1: %f\n" % (precision,recall,f1)
Esempio n. 11
0
class Predator():

	"""
	Predator are created with a Grid object associated to them.

	You can initialize their brain either with an already trained perceptron, or the weights for the perceptron.

	You can set predator confusion to True or False, and give them a mask for their field of vision.
	A mask is an np.array of shape (13,), and the default mask is np.ones(13).

	How masks work:
	-> Each digit tells us whether the corresponding section is activated. The sections are listed from left to right, and represent 15° each. The last digit in the array is always set to 1 as it represents the bias.
	-> mask [0,0,0,1,1,1,1,1,1,0,0,0,1] means the predator sees at an angle of 90°.
	-> mask np.ones(13) means the predator sees at an angle of 180° (widest angle possible).
	-> mask [0,0,0,0,0,0,0,0,0,0,0,0,1] means the predator is blind.
	

	"""
	

	def __init__(self,grid,coefs=None,brain=None,confusion=True,mask=np.ones(13),view_mode='boolean'):

		self.grid = grid
		self.grid.add_animal(self)

		self.confusion = confusion
		self.view_mode = view_mode

		if brain: self.brain = brain
		else:
			self.brain = Perceptron(max_iter=1)
			self.initialize_brain(coefs)
		# randomly place the predator on the grid
		self.pos_x = random.random()*self.grid.width
		self.pos_y = random.random()*self.grid.height
		self.orientation = int(random.random()*360)
		# view is the input vector for the predator's perceptron
		# (12 sections + bias)
		self.view = np.zeros(13)
		# mask is used to impair the predator's vision
		# if it's a vector of ones, all information goes through
		# we can set some values to 0 to blind certain angles
		self.mask = mask
		# wait is for the digesting period
		# (the predator has to wait 10 iterations between 2 attacks)
		self.wait = 0
		self.fitness = 0

	
	def copy(self,grid=None):
		if grid: copy = Predator(grid)
		else: copy = Predator(self.grid)
		copy.brain = self.brain
		return copy


	def set(self,x,y,o):
		self.pos_x = x
		self.pos_y = y
		self.orientation = o

	
	def set_mask(self,mask):
		self.mask = mask


	def initialize_brain(self,coefs=None):
		# make random input and target vectors
		x = np.random.choice([0,1],(4,13))
		y = np.arange(4)
		# fit it (1 iteration) to initialize all parameters
		self.brain.fit(x,y)
		# can used pre-defined coefs (for inheritance for instance)
		if coefs: self.brain.coef__ = coefs
		# otherwise, weights attributed randomly
		else: self.brain.coef_ = np.random.rand(4,13)*2-1
		

	def move(self,mode):

		if mode=='stay still': return

		elif mode=='turn right':
			self.orientation -=6
			self.orientation %= 360

		elif mode=='turn left':
			self.orientation +=6
			self.orientation %= 360
		
		angle = math.radians(self.orientation)
		new_x,new_y = new_position(self.pos_x,self.pos_y,3,angle)
		# normalizing the predator's position
		self.pos_x = new_x%self.grid.width
		self.pos_y = new_y%self.grid.height


	def pick_move(self):

		predictions = self.brain.decision_function(self.view.reshape(1,-1))
		predictions = predictions.flatten()
		# converting to a predict_proba format
		# (not necessary but easier to read in case we ever need to)
		predictions = np.array([1/(1+math.exp(-p)) for p in predictions])
		# pick most likely next move
		next_move = np.argmax(predictions)
		if next_move==0: self.move('stay still')
		elif next_move==1: self.move('turn right')
		elif next_move==2: self.move('turn left')
		elif next_move==3: self.move('move forward')
		else: print("\nCareful, orientation not recognized.\n")

		# update fitness at each iteration
		self.fitness += (50-self.grid.n_preys())
		# decrement the waiting period if it is activated
		if self.wait>0: self.wait-=1


	"""
	When the predator looks around, it lists all other animals and initiates its view to zeros.
	For each animal, if it is visible, the section is set to one (boolean view). We can toy with that by making each section the count of animals in it, which would give our predators a sense of density of preys in such and such directions.
	If an animal is close enough (5 units), the predator attempts an attack. The waiting period is set to 10.
	If predator confusion is activated, the look_around() function will return the number of preys that were visible at the time of the attack, and whether the attack was successful, so we can measure averages and deduce predator efficienty in evolution_main.py.

	"""


	# other animals = list of other animals on the grid
	def look_around(self):
		other_animals = [a for a in self.grid.inhabitants if a!=self]
		view = np.zeros(13)
		attack = False
		for animal in other_animals:
			see = self.scan(animal)
			if see: 
				if self.view_mode=='boolean': view[see]=1
				elif self.view_mode=='cummulative': view[see]+=1
			if self.can_attack(animal) and self.wait==0:
				# activate waiting period
				self.wait=10
				r = self.attack(animal)
				attack = True
				
		# update bias
		view[-1] = 1
		self.view = view
		if self.confusion and attack: return r


	"""
	The attack() function supports both values of predator confusion. If the confusion is activated, you can print out at each attack the number of preys visible, whether the attack was successful, and the remaining number of preys on the grid. Simply set the printing parameter to True to activate printing (useful when doing visualizations).

	"""

		
	def attack(self,animal,printing=False):
		if self.confusion:
			# counting the number of preys the predator can see
			n_visible = 1
			for animal_ in self.grid.inhabitants:
				# calculate distance between target and other preys
				if isinstance(animal_,Prey) and animal_!=animal:
					coordinates = (animal.pos_x,animal.pos_y,animal_.pos_x,animal_.pos_y)
					d = distance(*coordinates,self.grid.width,self.grid.height)
					# if distance<30 units, it adds to confusion
					if self.scan(animal_) and d<=30: n_visible+=1
			if printing: print(n_visible,'prey(s) in sight')
			if np.random.rand() < 1/n_visible: 
				animal.die()
				if printing: 
					print('Attack: successful')
					print(self.grid.n_preys(),'preys left\n')
				return n_visible,True
			else: 
				if printing: print('Attack: unsuccessful\n')
				return n_visible,False
		else: animal.die()


	def can_see(self,animal):
		# calculate distance
		d = distance(self.pos_x,self.pos_y,animal.pos_x,animal.pos_y,self.grid.width,self.grid.height)
		if d > 200: return False
		return True

	
	def can_attack(self,animal):
		# calculate distance
		d = distance(self.pos_x,self.pos_y,animal.pos_x,animal.pos_y,self.grid.width,self.grid.height)
		if d > 5: return False

		# calculate angle -- see if the prey in in field of vision

		angle = math.radians(self.orientation)

		ox,oy = self.pos_x,self.pos_y
		px,py = self.pos_x+1,self.pos_y
		new_x = ox + math.cos(angle) * (px-ox) - math.sin(angle) * (py-oy)
		new_y = oy + math.sin(angle) * (px-ox) + math.cos(angle) * (py-oy)

		da = np.array([animal.pos_x - ox, animal.pos_y - oy])
		do = np.array([new_x - ox, new_y - oy])

		cosangle = np.dot(da,do) / (np.linalg.norm(da) * np.linalg.norm(do))
		aangle = math.degrees(math.acos(cosangle))
		det = do[0]*da[1] - do[1]*da[0]

		return self.mask[angle_to_section(aangle,det)]==1
			

	def scan(self,animal):
		if self.can_see(animal):

			angle = math.radians(self.orientation)

			ox,oy = self.pos_x,self.pos_y
			px,py = self.pos_x+1,self.pos_y
			new_x = ox + math.cos(angle) * (px-ox) - math.sin(angle) * (py-oy)
			new_y = oy + math.sin(angle) * (px-ox) + math.cos(angle) * (py-oy)

			da = np.array([animal.pos_x - ox, animal.pos_y - oy])
			do = np.array([new_x - ox, new_y - oy])

			cosangle = np.dot(da,do) / (np.linalg.norm(da) * np.linalg.norm(do))
			aangle = math.degrees(math.acos(cosangle))
			det = do[0]*da[1] - do[1]*da[0]

			if isinstance(animal,Prey): 
				section = angle_to_section(aangle,det)
				# check the prey is in one of the activated sections
				if self.mask[section]==1: return section
			return False

	"""
	The see_brain() function displays the weights in the predator's perceptrons for each possible action, so we can see roughly how the view affects them.

	"""


	def see_brain(self):
		weights = np.concatenate((np.arange(-6,0,1).astype(str),np.arange(1,7,1).astype(str)),axis=0)
		weights = np.concatenate((weights,np.array(['bias'])),axis=0)
		x = np.arange(13)
		y_still = self.brain.coef_[0]
		y_right = self.brain.coef_[1]
		y_left = self.brain.coef_[2]
		y_forward = self.brain.coef_[3]
		
		plt.title('Weights in Deciding to Stay Still')
		plt.xticks(x,weights)
		plt.xlabel('view section (left to right)')
		plt.ylabel('weight')
		p1 = plt.bar(x,y_still,alpha=1)
		plt.show()

		plt.title('Weights in Deciding to Turn Right')
		plt.xticks(x,weights)
		plt.xlabel('view section (left to right)')
		plt.ylabel('weight')
		p1 = plt.bar(x,y_right,alpha=1)
		plt.show()
		
		plt.title('Weights in Deciding to Turn Left')
		plt.xticks(x,weights)
		plt.xlabel('view section (left to right)')
		plt.ylabel('weight')
		p1 = plt.bar(x,y_left,alpha=1)
		plt.show()

		plt.title('Weights in Deciding to Go Forward')
		plt.xticks(x,weights)
		plt.xlabel('view section (left to right)')
		plt.ylabel('weight')
		p1 = plt.bar(x,y_forward,alpha=1)
		plt.show()
		


	def save(self,name):
		pickle.dump(self,open('predators/'+name+'.pkl','wb'))
class Prey():
    def __init__(self, grid, brain=None):

        self.grid = grid
        self.grid.add_animal(self)

        if brain: self.brain = brain
        else:
            self.brain = Perceptron(max_iter=1)
            self.initialize_brain()
        self.pos_x = random.random() * self.grid.width
        self.pos_y = random.random() * self.grid.height
        self.orientation = int(random.random() * 360)
        self.view = np.zeros(25)
        self.fitness = 0

    def copy(self):
        copy = Prey(self.grid)
        copy.brain = self.brain
        return copy

    def set(self, x, y, o):
        self.pos_x = x
        self.pos_y = y
        self.orientation = o

    def initialize_brain(self):
        x = np.random.choice([0, 1], (4, 25))
        y = np.arange(4)
        self.brain.fit(x, y)
        self.brain.coef_ = np.random.rand(4, 25) * 2 - 1

    def move(self, mode):

        if mode == 'stay still': return

        elif mode == 'turn right':
            self.orientation -= 8
            self.orientation %= 360

        elif mode == 'turn left':
            self.orientation += 8
            self.orientation %= 360

        angle = math.radians(self.orientation)
        self.pos_x, self.pos_y = new_position(self.pos_x, self.pos_y, 1, angle)

        self.pos_x = self.pos_x % self.grid.width
        self.pos_y = self.pos_y % self.grid.height

    def pick_move(self):

        predictions = self.brain.decision_function(self.view.reshape(1, -1))
        predictions = predictions.flatten()
        # convert to probability distribution
        predictions = np.array([1 / (1 + math.exp(-p)) for p in predictions])
        next_move = np.argmax(predictions)
        if next_move == 0: self.move('stay still')
        elif next_move == 1: self.move('turn right')
        elif next_move == 2: self.move('turn left')
        elif next_move == 3: self.move('move forward')
        else: print("\nCareful, orientation not recognized.\n")

        self.fitness += (self.grid.n_preys())

    # other animals = list of other animals on the grid
    def look_around(self):
        # define field of vision for the prey
        #	-> 100 units around but have to remember they can cross the borders
        other_animals = [a for a in self.grid.inhabitants if a != self]
        view = np.zeros(25)
        for animal in other_animals:
            see = self.scan(animal)
            if see: view[see] = 1
        view[-1] = 1
        self.view = view

    def can_see(self, animal):
        # distance
        d = distance(self.pos_x, self.pos_y, animal.pos_x, animal.pos_y,
                     self.grid.width, self.grid.height)
        if d > 100: return False
        return True

    # section_id = int starting at 0 indicating number of section from left side
    def scan(self, animal):
        if self.can_see(animal):

            angle = math.radians(self.orientation)

            ox, oy = self.pos_x, self.pos_y
            px, py = self.pos_x + 1, self.pos_y
            new_x = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py -
                                                                          oy)
            new_y = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py -
                                                                          oy)

            da = np.array([animal.pos_x - ox, animal.pos_y - oy])
            do = np.array([new_x - ox, new_y - oy])

            cosangle = np.dot(da,
                              do) / (np.linalg.norm(da) * np.linalg.norm(do))
            aangle = math.degrees(math.acos(cosangle))
            det = do[0] * da[1] - do[1] * da[0]

            section = angle_to_section(aangle, det)

            if isinstance(animal, Prey): return section
            else:
                if section: return section + 12
            return False

    def see_brain(self):
        weights = np.concatenate(
            (np.arange(-6, 0, 1).astype(str), np.arange(1, 7, 1).astype(str)),
            axis=0)
        weights = np.concatenate((weights, np.array(['bias'])), axis=0)
        x = np.arange(13)
        y_still = self.brain.coef_[0]
        y_right = self.brain.coef_[1]
        y_left = self.brain.coef_[2]
        y_forward = self.brain.coef_[3]

        plt.title('Weights in Deciding to Stay Still')
        plt.xticks(x, weights)
        plt.xlabel('view section (left to right)')
        plt.ylabel('weight')
        p1 = plt.bar(x,
                     np.concatenate((y_still[:12], np.array([y_still[-1]])),
                                    axis=0),
                     alpha=.5,
                     color='lime')
        p2 = plt.bar(x, y_still[12:], alpha=.5, color='orange')
        plt.scatter(x,
                    np.concatenate(
                        (y_still[:12], np.zeros(1)), axis=0) + y_still[12:],
                    c='k',
                    marker='.')
        plt.legend((p1[0], p2[0]), ('preys', 'predators'))
        plt.show()

        plt.title('Weights in Deciding to Turn Right')
        plt.xticks(x, weights)
        plt.xlabel('view section (left to right)')
        plt.ylabel('weight')
        p1 = plt.bar(x,
                     np.concatenate((y_right[:12], np.array([y_right[-1]])),
                                    axis=0),
                     alpha=.5,
                     color='lime')
        p2 = plt.bar(x, y_right[12:], alpha=.5, color='orange')
        plt.scatter(x,
                    np.concatenate(
                        (y_right[:12], np.zeros(1)), axis=0) + y_right[12:],
                    c='k',
                    marker='.')
        plt.legend((p1[0], p2[0]), ('preys', 'predators'))
        plt.show()

        plt.title('Weights in Deciding to Turn Left')
        plt.xticks(x, weights)
        plt.xlabel('view section (left to right)')
        plt.ylabel('weight')
        p1 = plt.bar(x,
                     np.concatenate((y_left[:12], np.array([y_left[-1]])),
                                    axis=0),
                     alpha=.5,
                     color='lime')
        p2 = plt.bar(x, y_left[12:], alpha=.5, color='orange')
        plt.scatter(x,
                    np.concatenate(
                        (y_left[:12], np.zeros(1)), axis=0) + y_left[12:],
                    c='k',
                    marker='.')
        plt.legend((p1[0], p2[0]), ('preys', 'predators'))
        plt.show()

        plt.title('Weights in Deciding to Go Forward')
        plt.xticks(x, weights)
        plt.xlabel('view section (left to right)')
        plt.ylabel('weight')
        p1 = plt.bar(x,
                     np.concatenate(
                         (y_forward[:12], np.array([y_forward[-1]])), axis=0),
                     alpha=.5,
                     color='lime')
        p2 = plt.bar(x, y_forward[12:], alpha=.5, color='orange')
        plt.scatter(x,
                    np.concatenate((y_forward[:12], np.zeros(1)), axis=0) +
                    y_forward[12:],
                    c='k',
                    marker='.')
        plt.legend((p1[0], p2[0]), ('preys', 'predators'))
        plt.show()

    def die(self):
        self.grid.delete_animal(self)

    def save(self, name):
        pickle.dump(self, open('preys/' + name + '/pkl', 'wb'))
y= train['target'].values

import numpy as np
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2)
np.shape(X_train)

from sklearn.linear_model import Perceptron
clf = Perceptron(tol=1e-3, eta0 = 1e-6, max_iter =10000, n_iter_no_change =10, random_state=2)
#clf.fit(X_train, y_train)
clf.fit(variables, target)

clf.score(variables, target)

from sklearn.metrics import roc_auc_score
roc_auc_score(target, clf.decision_function(variables))

from sklearn.decomposition import PCA
pca = PCA(n_components=100, whiten=True)
data = pca.fit_transform(X)


import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn.linear_model import Perceptron
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=10)
Esempio n. 14
0
idx = [22, 36, 70, 80]

plt.scatter(X[idx, 0], X[idx, 1], c='r', s=100, alpha=0.5)

for i in idx:

    plt.annotate(i, xy=(X[i, 0], X[i, 1] + 0.1))

plt.grid(False)

plt.show()

# bar chart

plt.bar(range(len(idx)), model.decision_function(X[idx]))

plt.xticks(range(len(idx)), idx)

plt.gca().xaxis.grid(False)

plt.title("Discriminant Function")

plt.show()

#----------------------

#3차원

from mpl_toolkits.mplot3d import Axes3D
def b_sampling_perc(X, Y, x_test, y_test):

    #print(X)
    start = time.process_time()
    count = 0
    beta = {0.001: 0.01, 0.01: 0.1, 0.1: 1}

    classifier = Perceptron()
    classifier.fit(X, Y)
    print(np.abs(classifier.decision_function(X)).shape)
    #print(np.argmin(np.abs(classifier.decision_function(X))))

    #sample = X[np.argmin(np.abs(classifier.decision_function(X)), axis=0)]
    #print(sample.shape)
    #print(X.shape)

    arr = classifier.decision_function(x_test)
    #  print(arr)
    score_train = classifier.score(X, Y)
    print(score_train)

    #     y_train = pd.DataFrame(y_train)
    #     y_test = pd.DataFrame(y_test)

    beta_t_array = []
    beta_score_array = []

    for key, value in beta.items():
        print(key, value)
        t = 0
        count_for = 0
        x_train = X.copy()
        y_train = Y.copy()

        for index, row in x_test.iterrows():

            if (count < 20):
                y_hat = (classifier.predict(x_test.iloc[count_for:(count_for +
                                                                   1), :]))

                z = ((random.random() * value) < (key / (key + abs(y_hat))))

                if (z == 1):
                    count += 1
                    t += 1
                    a = x_test.iloc[count_for:count_for + 1, :]
                    x_train = x_train.append(a)
                    lab = y_test[index]
                    lab_series = pd.Series(lab)
                    y_train = y_train.append(lab_series, ignore_index=True)
            else:
                classifier.fit(x_train, y_train)
                count = 0

            count_for += 1

        score_test = classifier.score(x_test, y_test)
        beta_t_array.append(t)
        beta_score_array.append(score_test)

    end = time.process_time()
    print("time takes for Perceptron= {} min".format((end - start) / 60))

    return (beta_t_array, beta_score_array)
## Scatter plot the data
plt.scatter(x, y, c=labels)
plt.show()

## Create perceptron object
classifier = Perceptron(max_iter=40)

## Fit the data
classifier.fit(data, labels)

## Look at the score
classifier.score(data, labels)

## Investigate decision function - This will help us determine how far we are from the boundary
classifier.decision_function([[0, 0], [1, 1], [0.5, 0.5]])

## Begin heat mapping our decision boundary
x_values = np.linspace(0, 1, 100)
y_values = np.linspace(0, 1, 100)

## Get all possible values of the x & y values
point_grid = list(product(x_values, y_values))

## Recall .decision_function with our point grid - This will get us distances around the boundary
distances = classifier.decision_function(point_grid)

## Get only positive distances
abs_distances = [abs(point) for point in distances]

## Reshape for heat mapping
Esempio n. 17
0
z = np.random.multivariate_normal(mu_z, sigma2_z, n_samples)
psi_z = np.c_[z[:, 0], z[:, 0]*z[:, 1]]
x = muf(z[:, 0], z[:, 1]) +\
       sigf(z[:, 0], z[:, 1]) * np.random.randn(n_samples)

x = np.heaviside(x, 1)
# -

# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_linclass_perceptron-implementation-step02): Perceptron

# +
perc_clf = Perceptron(class_weight=None)
perc_clf = perc_clf.fit(psi_z, x)

# Probabilities
p_perc = expit(perc_clf.decision_function(psi_z))

# Scores
s_0 = perc_clf.decision_function(psi_z[x == 0])
s_1 = perc_clf.decision_function(psi_z[x == 1])

# ROC curve and AUC
fpr_perc, tpr_perc, _ = roc_curve(x, p_perc)
auc_perc = auc(fpr_perc, tpr_perc)

# Error
err = np.mean((x-perc_clf.predict(psi_z))**2)

# Confusion matrix
cm_perc = confusion_matrix(x, perc_clf.predict(psi_z))
cm_perc = cm_perc/np.sum(cm_perc, axis=1)