def find_min_brute(f, a, b, h): for i in np.arange(a, b, h): low = f(i) if (f(i + h) < low): lowest_value = low x_position = i print("Minimum at x = ", x_position, "with value of:", lowest_value)
def znajdz_kolege(wyniki): # metoda ruletki tablica = np.array(wyniki) temp = tablica.argsort( ) # posortowane od najmniejszego do największego indexy wynikow rank = np.empty_like(temp) rank[temp] = np.arange(len(tablica)) dopasowanie = [len(rank) - x for x in rank] c_wyniki = copy.deepcopy(dopasowanie) for i in range(1, len(c_wyniki)): c_wyniki[i] = dopasowanie[i] + c_wyniki[i - 1] prawd = [x / c_wyniki[-1] for x in c_wyniki] rand = random.random() for i in range(0, len(prawd)): if rand < prawd[i]: return i
import pandas as pd df = pd.arange(1, 10) print(df)
df['comp_score'] = df['compound'].apply(lambda c: 'pos' if c >= 0 else 'neg') #df.head() c = 0 #neg d = 0 # positive k = df['comp_score'] p = list(k) o = len(p) for i in p: if i == 'neg': c = c + 1 else: d = d + 1 c = (c / o) * 100 d = (d / o) * 100 import matplotlib.pyplot as plt plt.rcdefaults() import numpy as np import matplotlib.pyplot as plt objects = ('Positive', 'Negative') y_pos = np.arange(len(objects)) performance = [d, c] plt.bar(y_pos, performance, align='center', alpha=0.5) plt.xticks(y_pos, objects) plt.ylabel('% of people') plt.title('sentiment') plt.show()
# -*- coding: utf-8 -*- """ Created on Wed Nov 22 11:02:27 2017 @author: IFPB """ import pandas as np import numpy as np import matplotlib.pyplot as plt y = [100, 12, 44] x = np.arange(1) espacamento = 0.5 cor = 'red' plt.bar(x, y[0], width = espacamento, color = 'y', label = 'total de pessoas') plt.bar(x + 0.6, y[1], width = espacamento, color = 'r', label = '% de pessoas com mais de 5 acidentes(12%)') plt.bar(x + 1.2, y[2], width = espacamento, color = 'b', label = '% de pessoas entre 2 e 4 acidentes(44%)') plt.legend()
cls.fit(traning_x,traning_y) # traning the data set # to we will predict the value y_pred=cls.predict(test_x) y_pred #now to compare with the actual value test_y # now to check the number of right predictiona and wrong prediction, we will use confusion matrics c_m=confusion_matrix(test_y,y_pred) #it fuction require actual variable and predict vaiable c_m # to see the detail #now to see the training plot data x_set,y_set = traning_x,traning_y x1,x2 = np.meshgrid(np.arange(start= x_set[:,0].min()-1,stop= x_set[:,0].min()+1,step=0.01) np.arange(start= x_set[:,1].min()-1,stop= x_set[:,1].min()+1,step=0.01) plt.counterf(x1,x2, cls.predict(np.array[x1.ravel(),x2.ravel()]).T).reshape(x1.shape), alpha=0.75, cmap=ListedColormap(('red','green'))) plt.xlim(x1.min(),x1.max()) plt.ylim(x2.min(),x2.max()) for i, j in enumerate (np.unique(y_set)): plt.scatter(x_set[y_set==j,0], x_set[y_set==j,1], c = ListedColormap(('red','green'))(i),label = j) plt.title('K-NN (Traningin Set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show()
figure = plt.figure(figsize=(27, 20)) j = 1 for i, feature1 in enumerate(features_list2[1:]): y_name = features_list2[0] key = feature1 bin_step = 20 all_data = df[[y_name, key]] # Remove NaN values from Age data all_data = all_data[~np.isnan(all_data[key])] # Divide the range of data into bins and count survival rates min_value = all_data[key].min() max_value = all_data[key].max() value_range = max_value - min_value bins = np.arange(min_value, max_value + value_range/bin_step, value_range/bin_step ) y0 = all_data[all_data[y_name] == 0][key].reset_index(drop = True) y1 = all_data[all_data[y_name] == 1][key].reset_index(drop = True) ax = plt.subplot(len(features_list2)/4+1, 4, j) ax.hist(y0, bins = bins, alpha = 0.6, color = 'red', label = 'y0') ax.hist(y1, bins = bins, alpha = 0.6, color = 'green', label = 'y1') ax.set_xlim(bins.min(), bins.max()) ax.set_title(key) # ax.legend(framealpha = 0.8) j+=1 figure.subplots_adjust(left=.02, right=.98) plt.show() outlier_name1 = str(df['name'][df.salary ==df['salary'].max()]) outlier_name2 = str(df['name'][df.bonus ==df['bonus'].max()])